Compare commits

...

191 Commits

Author SHA1 Message Date
ea9667889c docs: update STATUS.md and DEFERRED.md - nearly all deferred items complete
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 17:31:05 +09:00
6b6d581b71 feat(renderer): add soft shadows, BLAS tracker, RT fallback, light probes, light volumes
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 17:28:37 +09:00
be290bd6e0 feat(audio): add async loader, effect chain
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 17:28:31 +09:00
a50f79e4fc docs: update STATUS.md and DEFERRED.md with G-Buffer compression, stencil, half-res SSGI, bilateral bloom, fade curves, dynamic groups, audio bus
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 17:24:51 +09:00
aafebff478 feat(audio): add fade curves, dynamic mix groups, audio bus routing
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 17:23:48 +09:00
bc2880d41c feat(renderer): add stencil optimization, half-res SSGI, bilateral bloom
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 17:21:44 +09:00
025bf4d0b9 feat(renderer): add G-Buffer compression with octahedral normals and depth reconstruction
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 17:21:05 +09:00
7375b15fcf chore: update Cargo.lock
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 16:42:21 +09:00
37addfdf03 docs: update STATUS.md and DEFERRED.md with RT reflections, RT AO, RT point shadows
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 16:38:15 +09:00
039dbe0d09 feat(renderer): add RT reflections, RT AO, and RT point/spot shadows
Add three new compute shader modules extending the existing RT shadow system:
- RT Reflections: screen-space reflection ray marching from G-Buffer
- RT Ambient Occlusion: hemisphere sampling with cosine-weighted directions
- RT Point/Spot Shadow: placeholder infrastructure for point/spot light shadows

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 16:37:25 +09:00
9a5d3b7b97 docs: update STATUS.md and DEFERRED.md with occlusion and progressive JPEG
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 16:34:29 +09:00
cc842d7c13 feat(renderer): add Progressive JPEG detection and scan parsing
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 16:33:52 +09:00
84dc7aeb20 feat(audio): add occlusion simulation with low-pass filter
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 16:33:04 +09:00
d7b5fdde31 docs: update STATUS.md and DEFERRED.md with HRTF, reverb, lag compensation, encryption
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 16:32:02 +09:00
96efe113b2 feat(audio): add Reverb (Schroeder) and Echo effects
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 16:31:19 +09:00
98d40d6520 feat(net): add packet encryption and auth token
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 16:28:21 +09:00
6beafc6949 feat(net): add lag compensation with history rewind and interpolation
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 16:27:58 +09:00
28b24226e7 feat(audio): add simplified HRTF with ITD and ILD
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 16:27:05 +09:00
8685d7c4aa docs: update STATUS.md and DEFERRED.md with motion blur and DOF
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 16:25:57 +09:00
447473598a feat(renderer): add motion blur compute shader
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 15:57:05 +09:00
831365a622 docs: update STATUS.md and DEFERRED.md with ray vs mesh, convex hull, navmesh serialization
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 15:50:31 +09:00
0f08c65a1e feat(physics): add ConvexHull collider with GJK support function
Add ConvexHull struct storing vertices with a support function that
returns the farthest point in a given direction, enabling GJK/EPA
collision detection. Update all Collider match arms across the physics
crate (collision, raycast, integrator, solver) to handle the new variant.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 15:49:38 +09:00
1b5da4d0d5 feat(physics): add ray vs mesh raycasting with MeshCollider
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 15:48:57 +09:00
07d7475410 feat(ai): add NavMesh binary serialization
Add serialize/deserialize methods directly on NavMesh with a proper
binary header (magic "VNAV" + version u32) for format validation.
Includes tests for roundtrip, invalid magic, empty mesh, header format,
and unsupported version.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 15:46:38 +09:00
d4ef4cf1ce docs: update STATUS.md and DEFERRED.md with TAA and SSR
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 15:42:11 +09:00
764ee96ec1 feat(renderer): add screen space reflections compute shader
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 15:41:29 +09:00
41c7f9607e feat(renderer): add TAA with Halton jitter and neighborhood clamping
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 15:40:53 +09:00
d321c0695c docs: update STATUS.md and DEFERRED.md with bilateral blur and temporal accumulation
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 15:29:42 +09:00
1f855b7bf6 feat(renderer): add temporal accumulation compute shader for SSGI
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 15:28:21 +09:00
1ea2d340e6 feat(renderer): add bilateral blur compute shader for SSGI denoising
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 15:28:20 +09:00
a60a25e9ba docs: add SSGI quality (bilateral blur + temporal accumulation) design spec
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 15:25:26 +09:00
e4879b6a31 docs: update STATUS.md and DEFERRED.md with GPU instancing
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 15:14:45 +09:00
17ea3f4856 feat(renderer): add GPU instancing with instance buffer and pipeline
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 15:14:12 +09:00
c5f6511fc2 docs: add GPU instancing design spec
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 15:12:11 +09:00
2703e73ef0 docs: update STATUS.md and DEFERRED.md with auto exposure
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 15:09:31 +09:00
7dbd94ebab feat(renderer): add auto exposure with compute luminance and adaptation
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 15:08:50 +09:00
72b517efb2 docs: add auto exposure design spec
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 15:04:20 +09:00
5f21ec0afe docs: update STATUS.md and DEFERRED.md with Changed filter
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 15:01:32 +09:00
bbb11d9d47 feat(ecs): add tick-based change detection with query_changed
Add per-component tick tracking to SparseSet. Insert and get_mut mark
the current tick; increment_tick advances it. World gains query_changed
to find entities whose component changed this tick, and clear_changed
to advance all storages at end of frame.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 15:00:50 +09:00
c6ac2ded81 docs: add Changed filter design spec
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 14:57:44 +09:00
f79889cbf1 docs: update STATUS.md and DEFERRED.md with transparent forward pass
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 14:53:03 +09:00
afb95c9fb1 feat(renderer): add forward transparency pass with alpha blending
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 14:52:17 +09:00
ef8c39b5ae docs: add transparent objects design spec
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 14:49:44 +09:00
43411b6fd9 docs: update STATUS.md and DEFERRED.md with AudioSource and NavAgent ECS components
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 14:44:40 +09:00
5b3b06c318 feat(ai): add NavAgent ECS component
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 14:43:59 +09:00
6121530bfe feat(audio): add AudioSource ECS component
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 14:43:48 +09:00
ce1a79cab6 docs: add ECS integration (Audio + AI) design spec
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 14:42:46 +09:00
1d38434aec docs: update STATUS.md and DEFERRED.md with glTF animation parser
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 14:39:10 +09:00
0cc6df15a3 feat(renderer): extend glTF parser with nodes, skins, animations support
Add GltfNode, GltfSkin, GltfAnimation, GltfChannel structs and parsing
for skeletal animation data. Extend GltfMesh with JOINTS_0/WEIGHTS_0
attribute extraction. All existing tests pass plus 4 new tests.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 14:38:25 +09:00
ce69c81eca docs: add glTF animation/skin parser extension design spec
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 14:35:23 +09:00
b2fd3988f5 docs: update STATUS.md and DEFERRED.md with TTF font support
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 14:14:48 +09:00
cccc54c438 feat(editor): integrate TTF font into UiContext with draw_text helper
Add ttf_font field to UiContext with draw_text and ttf_text_width
helpers that use TTF when available, falling back to bitmap font.
Load system TTF font (arial/consola/malgun) in editor_demo on startup.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 14:14:05 +09:00
58bce839fe feat(editor): add TtfFont unified interface with glyph caching
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 14:12:02 +09:00
94e7f6262e feat(editor): add GlyphCache on-demand atlas manager
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 14:10:16 +09:00
3b0a65ed17 feat(editor): add glyph rasterizer with bezier flattening and scanline fill
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 14:09:58 +09:00
e008178316 feat(editor): add self-implemented TTF parser with cmap, glyf, hmtx support
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 14:07:52 +09:00
74974dbff0 docs: add TTF font implementation plan
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 14:04:43 +09:00
53505ee9b7 docs: add TTF font design spec
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 14:03:11 +09:00
295944d237 docs: update STATUS.md and DEFERRED.md with asset browser
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 13:38:51 +09:00
587c75f6c2 feat(editor): integrate asset browser into editor_demo
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 13:38:11 +09:00
0857446d74 feat(editor): add AssetBrowser with directory navigation and UI panel
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 13:36:23 +09:00
b65585b739 docs: add asset browser implementation plan
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 11:43:29 +09:00
4ef6e83710 docs: add asset browser design spec
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 11:42:41 +09:00
58cbdf8400 docs: update STATUS.md and DEFERRED.md with entity inspector
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 11:00:40 +09:00
b965d78835 feat(editor): integrate hierarchy and inspector panels into editor_demo
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 10:59:55 +09:00
65b86c293c feat(editor): add inspector_panel with Transform, Tag, Parent editing
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 10:57:23 +09:00
7fbc88b86f feat(editor): add hierarchy_panel with entity tree display and selection
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 10:55:18 +09:00
8ecf883ef6 docs: add entity inspector implementation plan
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 10:50:34 +09:00
e14a53c3fa docs: fix entity inspector spec from review
- Add copy-out borrow pattern for Transform editing
- Add tag_buffer staging string for Tag editing
- Add count_nodes helper, highlight color

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 10:48:57 +09:00
4d5fc5e44c docs: add entity inspector design spec
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 10:47:14 +09:00
34e257e887 docs: update STATUS.md and DEFERRED.md with scene viewport
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 10:41:31 +09:00
14fe532432 feat(editor): integrate 3D viewport into editor_demo
Render actual 3D geometry (cubes + ground plane) in the Viewport panel
using the existing mesh_shader.wgsl Blinn-Phong pipeline with orbit
camera controls (left-drag to orbit, middle-drag to pan, scroll to zoom).

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 10:40:41 +09:00
9f42300109 feat(editor): add ViewportRenderer blit pipeline and shader
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 10:35:15 +09:00
6ef248f76d feat(editor): add OrbitCamera with orbit, zoom, pan controls
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 10:33:10 +09:00
ae20590f6e feat(editor): add ViewportTexture offscreen render target
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 10:31:28 +09:00
d93253dfb1 docs: add scene viewport implementation plan
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 10:26:21 +09:00
fed47e9242 docs: fix scene viewport spec from review
- Add voltex_math/voltex_renderer dependencies
- Fix color format to Rgba8Unorm (linear, prevent double gamma)
- Correct bind group layout to match mesh_shader.wgsl
- Add vec3 padding requirement for Rust uniform structs
- Per-frame bind group creation for viewport renderer
- Pan degenerate right vector guard

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 10:22:30 +09:00
8468f3cce2 docs: add scene viewport design spec
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 10:20:07 +09:00
c19dc6421a docs: update STATUS.md and DEFERRED.md with docking system
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 10:11:26 +09:00
f7ef228b49 feat(editor): integrate docking into editor_demo
Add full-frame-cycle integration test to dock.rs (16 tests total) and
update editor_demo to use DockTree layout instead of a single panel.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 10:09:53 +09:00
1571aa5f97 feat(editor): add draw_chrome for tab bars and split lines
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 09:57:18 +09:00
36fedb48bf feat(editor): add update method with tab click and resize handling
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 09:54:57 +09:00
a642f8ef7e fix(editor): address code review issues in dock module
- debug_assert → assert for empty tabs invariant
- tabs.len() - 1 → saturating_sub(1) to prevent underflow
- Add #[allow(dead_code)] for scaffolded structs/constants

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 09:52:53 +09:00
14784c731c feat(editor): add dock tree data model and layout algorithm
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 09:49:53 +09:00
a69554eede docs: add editor docking implementation plan
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 09:43:08 +09:00
867f8a0aa0 docs: fix docking spec issues from review
- Add panel name registry (names Vec)
- Cache layout results for draw_chrome
- Track prev_mouse_down for click detection
- Add invariants (non-empty tabs, active clamping, resize priority)
- Add LeafLayout and ResizeState structs

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 09:37:18 +09:00
eb454800a9 docs: add editor docking system design spec
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 09:34:45 +09:00
622c91a954 docs: update STATUS.md and DEFERRED.md with all completed deferred items
Test count: 255 → 485. Major additions across all crates:
- Renderer: JPG, glTF, ORM/Emissive, CSM, Point/Spot Shadow, Frustum Culling, SH, GPU BRDF
- ECS: JSON/Binary scene, ComponentRegistry, query filters, scheduler
- Physics: Angular dynamics, Sequential Impulse, Sleep, CCD, BVH refit, ray/triangle
- Asset: Async loader, FileWatcher, hot reload
- Audio: OGG/Vorbis, 24/32-bit WAV, Doppler
- AI: NavMesh builder, Funnel, obstacle avoidance
- Net: Reliability, snapshots, interpolation
- Script: Table interop, coroutines, sandbox, hot reload
- Editor: Text input, scroll, drag & drop

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 07:38:37 +09:00
f522bf10ac feat(script): add Lua table interop, coroutines, sandbox, hot reload
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 07:33:00 +09:00
63e59c0544 feat(editor): add text input, scroll panel, drag-and-drop widgets
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 07:32:55 +09:00
9f5f2df07c feat(ai): add navmesh builder, funnel algorithm, dynamic obstacle avoidance
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 07:15:40 +09:00
1c2a8466e7 feat(audio): add OGG/Vorbis decoder, 24/32-bit WAV, Doppler effect
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-26 07:15:38 +09:00
0ef750de69 feat(net): add reliability layer, state sync, and client interpolation
- ReliableChannel: sequence numbers, ACK, retransmission, RTT estimation
- OrderedChannel: in-order delivery with out-of-order buffering
- Snapshot serialization with delta compression (per-field bitmask)
- InterpolationBuffer: linear interpolation between server snapshots
- New packet types: Reliable, Ack, Snapshot, SnapshotDelta

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 21:03:52 +09:00
dccea21bfe feat(ai): add funnel string-pulling and navmesh serialization
Add Simple Stupid Funnel (SSF) algorithm for optimal path smoothing through
triangle corridors. Refactor A* to expose find_path_triangles for triangle
index paths. Add binary serialize/deserialize for NavMesh and shared_edge
query method.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 21:00:35 +09:00
1081fb472f feat(renderer): improve IBL with Hosek-Wilkie sky, SH irradiance, GPU BRDF LUT
- Hosek-Wilkie inspired procedural sky (Rayleigh/Mie scattering, sun disk)
- L2 Spherical Harmonics irradiance (9 coefficients, CPU computation)
- SH evaluation in shader replaces sample_environment for diffuse IBL
- GPU compute BRDF LUT (Rg16Float, higher precision than CPU Rgba8Unorm)
- SkyParams (sun_direction, turbidity) in ShadowUniform

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 20:58:28 +09:00
abd6f5cf6e feat(physics): add angular dynamics, sequential impulse solver, sleep system, BVH improvements, CCD, and ray extensions
- Angular velocity integration with diagonal inertia tensor (sphere/box/capsule)
- Angular impulse in collision solver (torque from off-center contacts)
- Sequential impulse solver with configurable iterations (default 4)
- Sleep/island system: bodies sleep after velocity threshold timeout, wake on collision
- Ray vs triangle intersection (Moller-Trumbore algorithm)
- raycast_all returning all hits sorted by distance
- BVH query_pairs replaced N^2 brute force with recursive tree traversal
- BVH query_ray for accelerated raycasting
- BVH refit for incremental AABB updates
- Swept sphere vs AABB continuous collision detection (CCD)
- Updated lib.rs exports for all new public APIs

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 20:57:55 +09:00
1b0e12e824 feat(renderer): add CSM, point/spot shadows, and frustum light culling
- CascadedShadowMap: 2-cascade directional shadows with frustum-based splits
- PointShadowMap: cube depth texture with 6-face rendering
- SpotShadowMap: perspective shadow map from spot light cone
- Frustum light culling: Gribb-Hartmann plane extraction + sphere tests
- Mat4::inverse() for frustum corner computation

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 20:55:43 +09:00
a7497f6045 docs: add Phase 4b-5 deferred items spec (shadows, IBL, physics)
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 20:50:24 +09:00
6bc77cb777 feat(renderer): add ORM and emissive texture map support to PBR pipeline
- Extended bind group 1: albedo + normal + ORM + emissive (8 bindings)
- pbr_shader.wgsl: ORM sampling (R=AO, G=roughness, B=metallic) + emissive
- deferred_gbuffer.wgsl: ORM + emissive luminance in material_data.w
- deferred_lighting.wgsl: emissive contribution from G-Buffer
- All 5 PBR examples updated with default ORM/emissive textures
- Backward compatible: old 4-binding layout preserved

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 20:41:30 +09:00
164eead5ec feat(ecs): add JSON/binary scene serialization and component registry
- Mini JSON writer/parser in voltex_ecs (no renderer dependency)
- ComponentRegistry with register/find/register_defaults
- serialize_scene_json/deserialize_scene_json with hex-encoded components
- serialize_scene_binary/deserialize_scene_binary (VSCN binary format)

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 20:38:56 +09:00
f4b1174e13 feat(asset): add async loading, file watcher, and hot reload support
- FileWatcher: mtime-based polling change detection
- AssetLoader: background thread loading via channels
- replace_in_place on AssetStorage for hot reload
- LoadState enum: Loading/Ready/Failed

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 20:34:54 +09:00
c478e2433d docs: add implementation plans for scene serialization, async loading, PBR textures
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 20:24:19 +09:00
389cbdb063 docs: add Phase 3b-4a deferred items spec (serialization, async load, PBR textures)
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 20:21:17 +09:00
df2082f532 docs: update STATUS.md and DEFERRED.md for completed Phase 2-3a items
Mark PNG/JPG/glTF, query3/4, query filters, scheduler,
Capsule/GJK, Coulomb friction, Lua engine API as completed.
Update test count from 255 to 324.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 20:16:37 +09:00
a9f5b11f69 feat(renderer): add glTF 2.0 / GLB parser with self-contained JSON parser
- Mini JSON parser (no external deps) for glTF support
- GLB binary format: header, JSON chunk, BIN chunk
- Embedded base64 buffer URI support
- Accessor/BufferView extraction (position, normal, uv, tangent, indices)
- PBR material extraction (baseColor, metallic, roughness)
- Auto compute_tangents when not provided

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 20:14:04 +09:00
2d80a218c5 feat(renderer): add Baseline JPEG decoder
Self-contained Huffman/IDCT/MCU/YCbCr decoder.
Supports SOF0, 4:4:4/4:2:2/4:2:0 subsampling, grayscale,
restart markers. API matches parse_png pattern.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 20:06:07 +09:00
a080f0608b feat(ecs): add query filters (with/without) and system scheduler
- has_component<T> helper on World
- query_with/query_without for single component + filter
- query2_with/query2_without for 2-component + filter
- System trait with blanket impl for FnMut(&mut World)
- Ordered Scheduler (add/run_all)

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 20:05:02 +09:00
8abba16137 docs: add implementation plans for JPG decoder, glTF parser, ECS filters/scheduler
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 19:58:56 +09:00
dc6aa950e3 docs: add Phase 2-3a deferred items spec (JPG, glTF, ECS filters/scheduler)
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 19:51:50 +09:00
c930d4705d feat: add PNG decoder, query3/4, Capsule+GJK/EPA, friction, Lua engine API
- PNG decoder with self-contained Deflate decompressor (RGB/RGBA)
- ECS query3 and query4 for multi-component queries
- Capsule collider + GJK/EPA narrow phase for convex shapes
- Coulomb friction in physics solver
- Lua engine API bindings (spawn, position, entity_count)

Tests: 255 → 286 (+31)

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 18:31:03 +09:00
7a5e06bd24 feat(script): add Lua engine API bindings (spawn, position, entity_count)
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 18:30:26 +09:00
3d985ba803 feat(physics): add Coulomb friction to collision response
Add friction coefficient to RigidBody (default 0.5) and implement
tangential impulse clamping in resolve_collisions using Coulomb's law,
including resting-contact friction for sliding bodies.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 18:28:44 +09:00
c196648a2e feat(physics): add GJK/EPA narrow phase for convex shape collision
Implement GJK (Gilbert-Johnson-Keerthi) overlap detection and EPA
(Expanding Polytope Algorithm) penetration resolution for arbitrary
convex collider pairs. Used as the fallback narrow phase for any
combination involving Capsule, while Sphere/Box specialized routines
are preserved for performance.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 18:25:26 +09:00
534838b7b9 feat(physics): add Capsule collider variant
Add Capsule { radius, half_height } to the Collider enum, with AABB
computation and ray-vs-capsule intersection support. The capsule is
oriented along the Y axis (cylinder + two hemisphere caps).

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 18:25:20 +09:00
1707728094 feat(ecs): add query3 and query4 for multi-component queries
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 18:19:50 +09:00
803df19305 feat(renderer): add PNG decoder with filter reconstruction
Parse PNG files (RGB and RGBA, 8-bit) with full filter reconstruction
(None, Sub, Up, Average, Paeth). Uses the self-contained deflate
decompressor for IDAT chunk decompression.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 18:18:12 +09:00
051eba85aa feat(renderer): add self-contained deflate decompressor
Implement RFC 1951 Deflate decompression with zlib wrapper handling.
Supports stored blocks, fixed Huffman codes, and dynamic Huffman codes
with LZ77 back-reference decoding.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 18:17:59 +09:00
e8a79e112c feat: complete Voltex Engine + Survivor game
11 crates, 13 examples (including survivor_game), 255 tests
Phase 1-8 fully implemented + playable survival game demo

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 17:49:24 +09:00
63d5ae2c25 feat(game): add HUD, audio SFX, and game over/restart
Add IMGUI HUD overlay showing HP, wave, score, and enemy count.
Display centered Game Over panel with final score and restart prompt.
Add procedural sine-wave audio clips for shooting (440Hz) and enemy
kills (220Hz) via voltex_audio. Player blinks white during invincibility.
Press R to restart after game over.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 17:46:41 +09:00
d17732fcd5 feat(game): add collision detection, damage, and scoring
Projectiles destroy enemies on contact (radius check 0.55). Enemies
damage the player on contact with 1s invincibility window. Score +100
per kill. Game over when HP reaches 0. update() now returns FrameEvents
so the caller can trigger audio/visual feedback.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 17:46:34 +09:00
a7cd14f413 feat(game): add enemy spawning with wave system and chase AI
Red sphere enemies spawn at arena edges via a wave system that
escalates each round. Enemies use direct seek steering to chase
the player. Both shadow and color passes render enemies.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 17:41:34 +09:00
5496525a7f feat(game): add projectile shooting toward mouse aim
Left click fires small yellow sphere projectiles from the player toward
the mouse cursor position on the XZ plane. Includes fire cooldown (0.2s),
projectile lifetime (2.0s), and ray-plane intersection for mouse aiming.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 17:35:46 +09:00
2f60ace70a feat(game): add player movement with WASD and camera follow
Add blue sphere as the player with WASD movement on XZ plane,
clamped to arena bounds. Camera now tracks player position.
Pre-allocate dynamic UBO buffers for 100 entities to support
future enemies and projectiles.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 17:27:11 +09:00
83c97faed0 feat(game): add survivor_game with arena and quarter-view camera
Static arena with 20x20 floor and 4 box obstacles rendered using the
forward PBR pipeline with shadow mapping. Fixed quarter-view camera
at (0, 15, 10) looking at origin.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 17:00:55 +09:00
3897bb0510 fix(editor): improve button colors for visible hover/active feedback
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 15:51:12 +09:00
038398962e fix(editor): replace placeholder font with real 8x12 ASCII bitmap glyphs
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 15:36:19 +09:00
586792b774 docs: add Phase 8-4 editor UI status, spec, and deferred items
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 15:26:34 +09:00
2cfb721359 feat(editor): add editor_demo example with IMGUI widgets
Interactive demo showing panel, text, button, slider, and checkbox
widgets rendered via the new UiRenderer pipeline. Uses winit event
loop with mouse input forwarded to UiContext.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 15:23:31 +09:00
dbaff58a3f feat(editor): add UI renderer pipeline and shader
Add UiRenderer with wgpu render pipeline for 2D UI overlay rendering.
Includes WGSL shader with orthographic projection, alpha blending,
and R8Unorm font atlas sampling. Font pixel (0,0) set to white for
solid-color rect rendering via UV (0,0).

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 15:21:18 +09:00
19db4dd390 feat(editor): add voltex_editor crate with IMGUI core (font, draw_list, widgets)
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 15:10:53 +09:00
87b9b7c1bd docs: add Phase 8-3 Lua scripting status, spec, and deferred items
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 14:42:21 +09:00
6c3ef334fb feat(script): add voltex_script crate with Lua 5.4 FFI and safe wrapper
Compiles Lua 5.4 from source via cc crate (excluding lua.c/luac.c).
Provides ffi bindings, LuaState safe wrapper, and default engine bindings.
All 9 tests pass (exec, globals, register_fn, voltex_print).

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 14:40:53 +09:00
cde271139f docs: add Phase 8-2 networking status, spec, plan, and deferred items
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 14:31:14 +09:00
566990b7af feat(net): add UDP server/client with connection management
Adds NetSocket (non-blocking UdpSocket wrapper with local_addr), NetServer
(connection tracking via HashMap, poll/broadcast/send_to_client), and NetClient
(connect/poll/send/disconnect lifecycle). Includes an integration test on
127.0.0.1:0 that validates ClientConnected, Connected, and UserData receipt
end-to-end with 50ms sleeps to ensure UDP packet delivery.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 14:29:41 +09:00
4519c5c4a6 feat(net): add voltex_net crate with packet serialization
Introduces the voltex_net crate (no external dependencies) with a binary
Packet protocol over UDP. Supports 6 variants (Connect, Accept, Disconnect,
Ping, Pong, UserData) with a 4-byte header (type_id u8, payload_len u16 LE,
reserved u8) and per-variant payload encoding. Includes 7 unit tests covering
all roundtrips and invalid-type error handling.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 14:29:27 +09:00
e28690b24a docs: add Phase 8-1 AI system status, spec, plan, and deferred items
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 14:20:50 +09:00
acaad86aee feat(ai): add steering behaviors (seek, flee, arrive, wander, follow_path)
Implements SteeringAgent and five steering behaviors. truncate() clamps
force magnitude. follow_path() advances waypoints within waypoint_radius
and uses arrive() for the final waypoint. 6 passing tests covering all
behaviors including deceleration and path advancement.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 14:18:50 +09:00
5d0fc9d8d1 feat(ai): add A* pathfinding on NavMesh triangle graph
Implements find_path() using A* over triangle adjacency with XZ heuristic.
Path starts at the given start point, passes through intermediate triangle
centers, and ends at the goal point. Returns None when points are outside
the mesh or no connection exists. 5 passing tests.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 14:18:45 +09:00
49957435d7 feat(ai): add voltex_ai crate with NavMesh (manual triangle mesh)
Introduces the voltex_ai crate with NavMesh, NavTriangle structs and
XZ-plane barycentric point-in-triangle helper. Includes find_triangle,
triangle_center, and edge_midpoint utilities with 4 passing tests.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 14:18:40 +09:00
cddf9540dd fix(renderer): fix RT shadow shader syntax, experimental features, and R32Float sampler compatibility
- Add 'enable wgpu_ray_query' to RT shadow shader
- Fix RayDesc struct constructor syntax and rayQueryGetCommittedIntersection API
- Enable ExperimentalFeatures in GpuContext for RT
- Change RT shadow texture binding to non-filterable (R32Float)
- Use textureLoad instead of textureSample for RT shadow in lighting pass

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 14:06:20 +09:00
b8334ea361 docs: add Phase 7-4 post processing status, spec, plan, and deferred items
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 13:55:09 +09:00
6c9fc0cbf1 feat(renderer): add HDR + Bloom + ACES tonemap to deferred_demo
Lighting pass now renders to Rgba16Float HDR target. A 5-level bloom
mip chain (downsample with bright-extract threshold, then additive
upsample) feeds into an ACES tonemap pass that composites the final
LDR output to the swapchain surface. All resources resize correctly.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 13:53:36 +09:00
4debec43e7 feat(renderer): add bloom/tonemap pipelines and convert lighting to HDR output
- deferred_pipeline.rs: add bloom_bind_group_layout, create_bloom_downsample_pipeline (NO blend),
  create_bloom_upsample_pipeline (additive One+One blend), tonemap_bind_group_layout, create_tonemap_pipeline
- deferred_lighting.wgsl: remove Reinhard tonemap + gamma correction; output raw HDR linear colour
- lib.rs: export new pipeline functions

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 13:49:32 +09:00
6d30151be1 feat(renderer): add bloom and tonemap shaders
- bloom_shader.wgsl: fullscreen vertex + fs_downsample (5-tap box + bright extract) + fs_upsample (9-tap tent)
- tonemap_shader.wgsl: fullscreen vertex + fs_main (HDR+bloom combine, exposure, ACES, gamma 1/2.2)

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 13:48:20 +09:00
6c938999e4 feat(renderer): add HDR target, Bloom resources, and ACES tonemap
- Add hdr.rs with HdrTarget (Rgba16Float render target) and HDR_FORMAT constant
- Add bloom.rs with BloomResources (5-level mip chain), BloomUniform, and mip_sizes()
- Add tonemap.rs with TonemapUniform and CPU-side aces_tonemap() for testing
- Export all new types from lib.rs
- 33 tests passing (26 existing + 3 bloom + 4 tonemap)

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 13:47:19 +09:00
ba610f48dc docs: add Phase 7-1 through 7-3 specs and plans
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 13:25:11 +09:00
643a329338 docs: add Phase 7-3 RT shadows status and deferred items
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 13:25:00 +09:00
a5c0179793 feat(renderer): add hardware RT shadows to deferred_demo
Integrate BLAS/TLAS acceleration structures and RT shadow compute pass
into the deferred rendering demo. Adds GpuContext::new_with_features()
for requesting EXPERIMENTAL_RAY_QUERY, Mesh::new_with_usage() for
BLAS_INPUT buffer flags, and extends the lighting shadow bind group
to 9 entries (shadow map + IBL + SSGI + RT shadow).

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 13:23:34 +09:00
b556cdd768 feat(renderer): add RT shadow compute pipeline and integrate into lighting pass
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 13:14:28 +09:00
3a311e14af feat(renderer): add RT shadow resources and compute shader
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 13:14:22 +09:00
e2424bf8c9 feat(renderer): add BLAS/TLAS acceleration structure management for RT
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 13:14:06 +09:00
71045d8603 docs: add Phase 7-2 SSGI status and deferred items
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 12:10:21 +09:00
5198e8606f feat(renderer): add SSGI pass to deferred_demo (AO + color bleeding)
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 12:08:50 +09:00
3248cd3529 feat(renderer): integrate SSGI output into deferred lighting pass
Adds t_ssgi/s_ssgi bindings (group 2, bindings 5-6) to
lighting_shadow_bind_group_layout and deferred_lighting.wgsl. Replaces the
simple ambient term with SSGI-modulated ambient: ao*ssgi_ao applied to IBL
diffuse+specular, plus ssgi_indirect for indirect color bleeding.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 12:05:08 +09:00
eea6194d86 feat(renderer): add SSGI shader and pipeline for screen-space GI
Adds ssgi_shader.wgsl (fullscreen pass: view-space TBN from noise, 64-sample
hemisphere loop with occlusion + color bleeding, outputs vec4(ao, indirect_rgb)).
Adds ssgi_gbuffer_bind_group_layout, ssgi_data_bind_group_layout, and
create_ssgi_pipeline to deferred_pipeline.rs. Exports all three from lib.rs.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 12:05:02 +09:00
3d0657885b feat(renderer): add SSGI resources with hemisphere kernel and noise texture
Adds ssgi.rs with SsgiUniform (repr C, Pod), SsgiResources (output texture,
kernel buffer, 4x4 noise texture, uniform buffer), hemisphere kernel generator
(64 samples, z>=0, center-biased), deterministic noise data (16 rotation
vectors), and 3 unit tests. Exports SsgiResources, SsgiUniform,
SSGI_OUTPUT_FORMAT from lib.rs.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 12:04:56 +09:00
3839aade62 docs: add Phase 7-1 deferred rendering status and deferred items
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 11:55:37 +09:00
49bfa31639 feat(renderer): add deferred_demo example with multi-light deferred rendering
Demonstrates two-pass deferred rendering: G-Buffer pass writes position,
normal, albedo, and material to 4 render targets; lighting pass composites
with 8 animated orbiting point lights plus a directional light using a
fullscreen triangle.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 11:54:01 +09:00
a9b263bf57 feat(renderer): add deferred rendering pipeline (G-Buffer + Lighting pass)
Implements deferred_pipeline.rs with pipeline creation functions:
gbuffer_camera_bind_group_layout (dynamic offset), create_gbuffer_pipeline
(MeshVertex input, 4 color targets), lighting_gbuffer_bind_group_layout
(non-filterable position + filterable rest + NonFiltering sampler),
lighting_lights_bind_group_layout, lighting_shadow_bind_group_layout, and
create_lighting_pipeline (FullscreenVertex, no depth stencil).
All wgpu 28.0 fields (immediate_size, multiview_mask, cache) included.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 11:49:45 +09:00
f6912be248 feat(renderer): add deferred lighting pass shader with Cook-Torrance BRDF
Implements deferred_lighting.wgsl for the fullscreen lighting pass:
reads G-Buffer textures, runs the identical Cook-Torrance BRDF functions
(distribution_ggx, geometry_smith, fresnel_schlick, attenuation, etc.)
from pbr_shader.wgsl, computes multi-light contribution with PCF shadow
and IBL, then applies Reinhard tonemapping and gamma correction.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 11:49:38 +09:00
72fa401420 feat(renderer): add G-Buffer pass shader for deferred rendering
Implements deferred_gbuffer.wgsl for the geometry pass: samples albedo
and normal map textures, applies TBN normal mapping, and writes world
position, encoded normal, albedo, and material parameters (metallic/
roughness/ao) to 4 separate G-Buffer render targets.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 11:49:32 +09:00
03b1419b17 feat(renderer): add GBuffer and fullscreen triangle for deferred rendering
Introduces GBuffer struct with 4 render target TextureViews (position/
normal/albedo/material) plus depth, and a fullscreen oversized triangle
for screen-space passes. Exports format constants and create helpers.
Updates lib.rs with new module declarations and re-exports.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 11:49:27 +09:00
2b3e3a6a5e docs: add Phase 5-1 through 6-3 specs, plans, and Cargo.lock
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 11:37:16 +09:00
0991f74275 docs: add Phase 6-3 mixer status and deferred items
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 11:36:24 +09:00
26e4d7fa4f feat(audio): integrate mixer groups into mixing pipeline and AudioSystem
Wires MixerState into mix_sounds (group × master volume applied per sound)
and exposes SetGroupVolume/FadeGroup commands through AudioSystem.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 11:35:05 +09:00
261e52a752 feat(audio): add MixGroup, GroupState, and MixerState with fade support
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 11:32:33 +09:00
56c5aff483 docs: add Phase 6-2 3D audio status and deferred items
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 11:28:40 +09:00
7150924d5d feat(audio): add play_3d and set_listener to AudioSystem
Add Play3d and SetListener variants to AudioCommand, expose play_3d
and set_listener methods on AudioSystem, initialize a Listener in
the Windows audio thread, and handle the new commands in the match.
Update mix_sounds call to pass the listener.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 11:27:34 +09:00
bd044c6653 feat(audio): integrate spatial 3D audio into mixing pipeline
Add SpatialParams field to PlayingSound, new_3d constructor, and
listener parameter to mix_sounds. Compute per-channel attenuation
and stereo panning when spatial params are present; 2D sounds are
unchanged. Add three new tests: spatial_2d_unchanged,
spatial_far_away_silent, and spatial_right_panning.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 11:27:29 +09:00
4436382baf feat(audio): add 3D audio spatial functions (distance attenuation, stereo panning)
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 11:24:55 +09:00
7ba3d5758b docs: add Phase 6-1 audio system status and deferred items
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 11:13:18 +09:00
a1a90ae4f8 feat(audio): add audio_demo example with sine wave playback
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 11:11:55 +09:00
6de5681707 feat(audio): add AudioSystem with WASAPI audio thread
Introduces AudioCommand enum (Play, Stop, SetVolume, StopAll, Shutdown)
and AudioSystem that spawns a dedicated audio thread. On Windows the
thread drives WasapiDevice with a 5ms mix-and-write loop; on other
platforms it runs in silent null mode. lib.rs exports wasapi (windows)
and audio_system modules with AudioSystem re-exported at crate root.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 11:10:28 +09:00
4cda9d54f3 feat(audio): add WASAPI FFI bindings for Windows audio output
Implements COM vtable structs for IMMDeviceEnumerator, IMMDevice,
IAudioClient, and IAudioRenderClient with correct IUnknown base layout.
WasapiDevice handles COM init, default endpoint activation, mix format
detection (float/i16), shared-mode Initialize (50ms buffer), and
write_samples with GetCurrentPadding/GetBuffer/ReleaseBuffer.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 11:10:19 +09:00
f52186f732 feat(audio): add mixing functions with volume, looping, and channel conversion
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 10:59:17 +09:00
f0646c34eb feat(audio): add WAV parser with PCM 16-bit support
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 10:59:13 +09:00
dc12715279 feat(audio): add voltex_audio crate with AudioClip type
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 10:59:10 +09:00
75ec3b308f docs: add Phase 5-3 raycasting status and deferred items
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 10:49:19 +09:00
67273834d6 feat(physics): add BVH-accelerated raycast with ECS integration
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 10:47:52 +09:00
3f12c4661c feat(physics): add ray intersection tests (AABB, sphere, box)
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 10:46:29 +09:00
d3eead53cf feat(math): add Ray type with direction normalization 2026-03-25 10:45:06 +09:00
102304760e docs: add Phase 5-2 rigid body simulation status and deferred items
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 10:40:13 +09:00
b516984015 feat(physics): add impulse collision response and physics_step
Implements resolve_collisions() with impulse-based velocity correction and
Baumgarte-style position correction, plus physics_step() combining integrate,
detect_collisions, and resolve_collisions.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 10:38:42 +09:00
b8c3b6422c feat(physics): add Semi-implicit Euler integration
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 10:36:17 +09:00
9bdb502f8f feat(physics): add RigidBody component and PhysicsConfig 2026-03-25 10:35:16 +09:00
7d91e204cc docs: add Phase 5-1 collision detection status and deferred items
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 10:27:13 +09:00
c1c84cdbff feat(physics): add detect_collisions ECS integration
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 10:17:26 +09:00
0570d3c4ba feat(physics): add BVH tree for broad phase collision detection
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 10:15:24 +09:00
74694315a6 feat(physics): add narrow phase collision detection (sphere-sphere, sphere-box, box-box)
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 10:14:05 +09:00
cc24b19f93 feat(physics): add voltex_physics crate with Collider and ContactPoint
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 10:12:22 +09:00
8d3855c1a7 feat(math): add AABB type with intersection, merge, and containment
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-25 10:10:27 +09:00
306 changed files with 84876 additions and 100 deletions

View File

@@ -0,0 +1,55 @@
{
"permissions": {
"allow": [
"Bash(export PATH=\"$HOME/.cargo/bin:$PATH\")",
"Bash(cargo test:*)",
"Bash(where link.exe)",
"Bash(rustup show:*)",
"Bash(cargo clean:*)",
"Read(//c/Program Files/Microsoft Visual Studio/**)",
"Read(//c/Program Files \\(x86\\)/Microsoft Visual Studio/**)",
"Read(//c//**)",
"Read(//c/Users/SSAFY/miniforge3/Library/bin/**)",
"Read(//c/Users/SSAFY/miniforge3/Library/mingw-w64/**)",
"Read(//c/Users/SSAFY/miniforge3/**)",
"Bash(ls C:/Users/SSAFY/miniforge3/Library/bin/ld*)",
"Bash(ls C:/Users/SSAFY/miniforge3/Library/mingw-w64/bin/ld*)",
"Bash(conda install:*)",
"Bash(C:/Users/SSAFY/miniforge3/Scripts/conda.exe install:*)",
"Bash(export PATH=$HOME/.cargo/bin:$PATH)",
"Read(//c/Users/SSAFY/Desktop/projects/voltex/**)",
"Bash(export PATH=\"$HOME/.cargo/bin:/c/Program Files/Microsoft Visual Studio/2022/Community/VC/Tools/MSVC/14.44.35207/bin/Hostx64/x64:$PATH\")",
"Bash(cmd //C \"call \\\\\"C:\\\\\\\\Program Files\\\\\\\\Microsoft Visual Studio\\\\\\\\2022\\\\\\\\Community\\\\\\\\VC\\\\\\\\Auxiliary\\\\\\\\Build\\\\\\\\vcvarsall.bat\\\\\" x64 >nul 2>&1 && echo LIB=%LIB% && echo INCLUDE=%INCLUDE%\")",
"Bash(ls '/c/Program Files \\(x86\\)/Windows Kits/')",
"Bash(find '/c/Program Files \\(x86\\)/Windows Kits' -name kernel32.lib)",
"Bash(find '/c/Program Files \\(x86\\)/Windows Kits/10' -name kernel32.lib)",
"Bash(ls:*)",
"Bash(export PATH=\"$HOME/.cargo/bin:$PATH\" MSVC_LIB=\"/c/Program Files/Microsoft Visual Studio/2022/Community/VC/Tools/MSVC/14.44.35207/lib/x64\" SDK_UM=\"/c/Program Files \\(x86\\)/Windows Kits/10/Lib/10.0.26100.0/um/x64\" SDK_UCRT=\"/c/Program Files \\(x86\\)/Windows Kits/10/Lib/10.0.26100.0/ucrt/x64\" export LIB=\"$MSVC_LIB;$SDK_UM;$SDK_UCRT\" __NEW_LINE_dfaf9ba8c4cc16a9__ MSVC_INC=\"/c/Program Files/Microsoft Visual Studio/2022/Community/VC/Tools/MSVC/14.44.35207/include\" SDK_INC=\"/c/Program Files \\(x86\\)/Windows Kits/10/Include/10.0.26100.0/ucrt\" SDK_UM_INC=\"/c/Program Files \\(x86\\)/Windows Kits/10/Include/10.0.26100.0/um\" SDK_SHARED_INC=\"/c/Program Files \\(x86\\)/Windows Kits/10/Include/10.0.26100.0/shared\" export INCLUDE=\"$MSVC_INC;$SDK_INC;$SDK_UM_INC;$SDK_SHARED_INC\" __NEW_LINE_dfaf9ba8c4cc16a9__ export PATH=\"/c/Program Files/Microsoft Visual Studio/2022/Community/VC/Tools/MSVC/14.44.35207/bin/Hostx64/x64:$PATH\" __NEW_LINE_dfaf9ba8c4cc16a9__ cargo test --workspace)",
"Bash(find '/c/Program Files \\(x86\\)/Windows Kits' -name dbghelp.lib)",
"Bash(find /c/Users/SSAFY/Desktop/projects/voltex -name *.rs -path */examples*)",
"Bash(git add:*)",
"Bash(git commit:*)",
"Bash(cargo build:*)",
"Bash(cargo run:*)",
"Bash(cargo doc:*)",
"Bash(cargo metadata:*)",
"Bash(python3 -c \"import sys,json; d=json.load\\(sys.stdin\\); pkgs=[p for p in d[''''packages''''] if p[''''name'''']==''''wgpu'''']; feats=pkgs[0][''''features''''] if pkgs else {}; print\\([f for f in feats if ''''ray'''' in f.lower\\(\\)]\\)\")",
"Read(//c/Users/SSAFY/Desktop/projects/voltex/$HOME/.cargo/registry/src/*/wgpu-28.*/**)",
"Bash(find /c/Users/SSAFY/.cargo/registry/src -name *.rs -path *wgpu_types*)",
"Bash(xargs grep:*)",
"Bash(find /c/Users/SSAFY/.cargo/registry/src -name *.rs)",
"Bash(find /c/Users/SSAFY/.cargo/registry/src -name *.rs -path *wgpu*)",
"Bash(find /c/Users/SSAFY/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wgpu-28.0.0/src -name *.rs)",
"Bash(grep -r ExperimentalFeatures C:/Users/SSAFY/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/wgpu-core-*/src/)",
"Bash(curl -sL https://www.lua.org/ftp/lua-5.4.7.tar.gz -o /tmp/lua-5.4.7.tar.gz)",
"Read(//tmp/**)",
"Bash(mkdir -p C:/Users/SSAFY/Desktop/projects/voltex/crates/voltex_script/lua)",
"Read(//c/tmp/**)",
"Bash(tar xzf:*)",
"Bash(cp lua-5.4.7/src/*.c lua-5.4.7/src/*.h C:/Users/SSAFY/Desktop/projects/voltex/crates/voltex_script/lua/)",
"Bash(cd:*)",
"Bash(git status:*)",
"Bash(git push:*)"
]
}
}

103
Cargo.lock generated
View File

@@ -180,6 +180,13 @@ version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0"
[[package]]
name = "audio_demo"
version = "0.1.0"
dependencies = [
"voltex_audio",
]
[[package]]
name = "autocfg"
version = "1.5.0"
@@ -425,6 +432,21 @@ version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f27ae1dd37df86211c42e150270f82743308803d90a6f6e6651cd730d5e1732f"
[[package]]
name = "deferred_demo"
version = "0.1.0"
dependencies = [
"bytemuck",
"env_logger",
"log",
"pollster",
"voltex_math",
"voltex_platform",
"voltex_renderer",
"wgpu",
"winit",
]
[[package]]
name = "dispatch"
version = "0.2.0"
@@ -461,6 +483,23 @@ version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d8b14ccef22fc6f5a8f4d7d768562a182c04ce9a3b3157b91390b52ddfdf1a76"
[[package]]
name = "editor_demo"
version = "0.1.0"
dependencies = [
"bytemuck",
"env_logger",
"log",
"pollster",
"voltex_ecs",
"voltex_editor",
"voltex_math",
"voltex_platform",
"voltex_renderer",
"wgpu",
"winit",
]
[[package]]
name = "env_filter"
version = "1.0.1"
@@ -1840,6 +1879,23 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6637bab7722d379c8b41ba849228d680cc12d0a45ba1fa2b48f2a30577a06731"
[[package]]
name = "survivor_game"
version = "0.1.0"
dependencies = [
"bytemuck",
"env_logger",
"log",
"pollster",
"voltex_audio",
"voltex_editor",
"voltex_math",
"voltex_platform",
"voltex_renderer",
"wgpu",
"winit",
]
[[package]]
name = "syn"
version = "2.0.117"
@@ -2022,10 +2078,25 @@ version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
[[package]]
name = "voltex_ai"
version = "0.1.0"
dependencies = [
"voltex_ecs",
"voltex_math",
]
[[package]]
name = "voltex_asset"
version = "0.1.0"
[[package]]
name = "voltex_audio"
version = "0.1.0"
dependencies = [
"voltex_math",
]
[[package]]
name = "voltex_ecs"
version = "0.1.0"
@@ -2033,10 +2104,33 @@ dependencies = [
"voltex_math",
]
[[package]]
name = "voltex_editor"
version = "0.1.0"
dependencies = [
"bytemuck",
"voltex_ecs",
"voltex_math",
"voltex_renderer",
"wgpu",
]
[[package]]
name = "voltex_math"
version = "0.1.0"
[[package]]
name = "voltex_net"
version = "0.1.0"
[[package]]
name = "voltex_physics"
version = "0.1.0"
dependencies = [
"voltex_ecs",
"voltex_math",
]
[[package]]
name = "voltex_platform"
version = "0.1.0"
@@ -2059,6 +2153,15 @@ dependencies = [
"winit",
]
[[package]]
name = "voltex_script"
version = "0.1.0"
dependencies = [
"cc",
"voltex_ecs",
"voltex_math",
]
[[package]]
name = "walkdir"
version = "2.5.0"

View File

@@ -15,6 +15,16 @@ members = [
"examples/multi_light_demo",
"examples/shadow_demo",
"examples/ibl_demo",
"examples/deferred_demo",
"crates/voltex_physics",
"crates/voltex_audio",
"examples/audio_demo",
"crates/voltex_ai",
"crates/voltex_net",
"crates/voltex_script",
"crates/voltex_editor",
"examples/editor_demo",
"examples/survivor_game",
]
[workspace.dependencies]
@@ -23,6 +33,12 @@ voltex_platform = { path = "crates/voltex_platform" }
voltex_renderer = { path = "crates/voltex_renderer" }
voltex_ecs = { path = "crates/voltex_ecs" }
voltex_asset = { path = "crates/voltex_asset" }
voltex_physics = { path = "crates/voltex_physics" }
voltex_audio = { path = "crates/voltex_audio" }
voltex_ai = { path = "crates/voltex_ai" }
voltex_net = { path = "crates/voltex_net" }
voltex_script = { path = "crates/voltex_script" }
voltex_editor = { path = "crates/voltex_editor" }
wgpu = "28.0"
winit = "0.30"
bytemuck = { version = "1", features = ["derive"] }

View File

@@ -0,0 +1,8 @@
[package]
name = "voltex_ai"
version = "0.1.0"
edition = "2021"
[dependencies]
voltex_math.workspace = true
voltex_ecs.workspace = true

View File

@@ -0,0 +1,9 @@
pub mod nav_agent;
pub mod navmesh;
pub mod pathfinding;
pub mod steering;
pub use nav_agent::NavAgent;
pub use navmesh::{NavMesh, NavTriangle};
pub use pathfinding::find_path;
pub use steering::{SteeringAgent, seek, flee, arrive, wander, follow_path};

View File

@@ -0,0 +1,85 @@
use voltex_math::Vec3;
/// ECS component for AI pathfinding navigation.
#[derive(Debug, Clone)]
pub struct NavAgent {
pub target: Option<Vec3>,
pub speed: f32,
pub path: Vec<Vec3>,
pub current_waypoint: usize,
pub reached: bool,
}
impl NavAgent {
pub fn new(speed: f32) -> Self {
NavAgent {
target: None,
speed,
path: Vec::new(),
current_waypoint: 0,
reached: false,
}
}
pub fn set_target(&mut self, target: Vec3) {
self.target = Some(target);
self.path.clear();
self.current_waypoint = 0;
self.reached = false;
}
pub fn clear_target(&mut self) {
self.target = None;
self.path.clear();
self.current_waypoint = 0;
self.reached = false;
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_new_defaults() {
let agent = NavAgent::new(5.0);
assert!((agent.speed - 5.0).abs() < 1e-6);
assert!(agent.target.is_none());
assert!(agent.path.is_empty());
assert_eq!(agent.current_waypoint, 0);
assert!(!agent.reached);
}
#[test]
fn test_set_target() {
let mut agent = NavAgent::new(3.0);
agent.set_target(Vec3::new(10.0, 0.0, 5.0));
assert!(agent.target.is_some());
let t = agent.target.unwrap();
assert!((t.x - 10.0).abs() < 1e-6);
assert!(!agent.reached);
}
#[test]
fn test_set_target_clears_path() {
let mut agent = NavAgent::new(3.0);
agent.path = vec![Vec3::new(1.0, 0.0, 0.0), Vec3::new(2.0, 0.0, 0.0)];
agent.current_waypoint = 1;
agent.set_target(Vec3::new(5.0, 0.0, 5.0));
assert!(agent.path.is_empty());
assert_eq!(agent.current_waypoint, 0);
}
#[test]
fn test_clear_target() {
let mut agent = NavAgent::new(3.0);
agent.set_target(Vec3::new(10.0, 0.0, 5.0));
agent.path = vec![Vec3::new(1.0, 0.0, 0.0)];
agent.reached = true;
agent.clear_target();
assert!(agent.target.is_none());
assert!(agent.path.is_empty());
assert_eq!(agent.current_waypoint, 0);
assert!(!agent.reached);
}
}

View File

@@ -0,0 +1,497 @@
use voltex_math::Vec3;
/// A triangle in the navigation mesh, defined by vertex indices and neighbor triangle indices.
#[derive(Debug, Clone)]
pub struct NavTriangle {
/// Indices into NavMesh::vertices
pub indices: [usize; 3],
/// Neighboring triangle indices (None if no neighbor on that edge)
pub neighbors: [Option<usize>; 3],
}
/// Navigation mesh composed of triangles for pathfinding.
pub struct NavMesh {
pub vertices: Vec<Vec3>,
pub triangles: Vec<NavTriangle>,
}
impl NavMesh {
pub fn new(vertices: Vec<Vec3>, triangles: Vec<NavTriangle>) -> Self {
Self { vertices, triangles }
}
/// Find the index of the triangle that contains `point` (XZ plane test).
/// Returns None if the point is outside all triangles.
pub fn find_triangle(&self, point: Vec3) -> Option<usize> {
for (i, tri) in self.triangles.iter().enumerate() {
let a = self.vertices[tri.indices[0]];
let b = self.vertices[tri.indices[1]];
let c = self.vertices[tri.indices[2]];
if point_in_triangle_xz(point, a, b, c) {
return Some(i);
}
}
None
}
/// Return the centroid of triangle `tri_idx` in 3D space.
pub fn triangle_center(&self, tri_idx: usize) -> Vec3 {
let tri = &self.triangles[tri_idx];
let a = self.vertices[tri.indices[0]];
let b = self.vertices[tri.indices[1]];
let c = self.vertices[tri.indices[2]];
Vec3::new(
(a.x + b.x + c.x) / 3.0,
(a.y + b.y + c.y) / 3.0,
(a.z + b.z + c.z) / 3.0,
)
}
/// Return the midpoint of the shared edge between `tri_idx` and neighbor slot `edge_idx` (0, 1, or 2).
/// Edge 0 is between vertex indices[0] and indices[1],
/// Edge 1 is between indices[1] and indices[2],
/// Edge 2 is between indices[2] and indices[0].
pub fn edge_midpoint(&self, tri_idx: usize, edge_idx: usize) -> Vec3 {
let tri = &self.triangles[tri_idx];
let (i0, i1) = match edge_idx {
0 => (tri.indices[0], tri.indices[1]),
1 => (tri.indices[1], tri.indices[2]),
2 => (tri.indices[2], tri.indices[0]),
_ => panic!("edge_idx must be 0, 1, or 2"),
};
let a = self.vertices[i0];
let b = self.vertices[i1];
Vec3::new(
(a.x + b.x) / 2.0,
(a.y + b.y) / 2.0,
(a.z + b.z) / 2.0,
)
}
/// Serialize the NavMesh to a binary format with header.
///
/// Format:
/// - Magic: "VNAV" (4 bytes)
/// - Version: u32 (little-endian)
/// - num_vertices: u32
/// - vertices: [f32; 3] * num_vertices
/// - num_triangles: u32
/// - triangles: for each triangle: indices [u32; 3] + neighbors [i32; 3] (-1 for None)
pub fn serialize(&self) -> Vec<u8> {
let mut data = Vec::new();
// Magic
data.extend_from_slice(b"VNAV");
// Version
data.extend_from_slice(&1u32.to_le_bytes());
// Vertices
data.extend_from_slice(&(self.vertices.len() as u32).to_le_bytes());
for v in &self.vertices {
data.extend_from_slice(&v.x.to_le_bytes());
data.extend_from_slice(&v.y.to_le_bytes());
data.extend_from_slice(&v.z.to_le_bytes());
}
// Triangles
data.extend_from_slice(&(self.triangles.len() as u32).to_le_bytes());
for tri in &self.triangles {
for &idx in &tri.indices {
data.extend_from_slice(&(idx as u32).to_le_bytes());
}
for &nb in &tri.neighbors {
let val: i32 = match nb {
Some(i) => i as i32,
None => -1,
};
data.extend_from_slice(&val.to_le_bytes());
}
}
data
}
/// Deserialize a NavMesh from binary data with header validation.
pub fn deserialize(data: &[u8]) -> Result<Self, String> {
if data.len() < 8 {
return Err("data too short for header".to_string());
}
// Validate magic
if &data[0..4] != b"VNAV" {
return Err(format!(
"invalid magic: expected VNAV, got {:?}",
&data[0..4]
));
}
// Read version
let version = u32::from_le_bytes([data[4], data[5], data[6], data[7]]);
if version != 1 {
return Err(format!("unsupported version: {}", version));
}
// Delegate rest to offset-based reading
let mut offset = 8usize;
let read_u32 = |off: &mut usize| -> Result<u32, String> {
if *off + 4 > data.len() {
return Err("unexpected end of data".to_string());
}
let val = u32::from_le_bytes([data[*off], data[*off + 1], data[*off + 2], data[*off + 3]]);
*off += 4;
Ok(val)
};
let read_i32 = |off: &mut usize| -> Result<i32, String> {
if *off + 4 > data.len() {
return Err("unexpected end of data".to_string());
}
let val = i32::from_le_bytes([data[*off], data[*off + 1], data[*off + 2], data[*off + 3]]);
*off += 4;
Ok(val)
};
let read_f32 = |off: &mut usize| -> Result<f32, String> {
if *off + 4 > data.len() {
return Err("unexpected end of data".to_string());
}
let val = f32::from_le_bytes([data[*off], data[*off + 1], data[*off + 2], data[*off + 3]]);
*off += 4;
Ok(val)
};
let vc = read_u32(&mut offset)? as usize;
let mut vertices = Vec::with_capacity(vc);
for _ in 0..vc {
let x = read_f32(&mut offset)?;
let y = read_f32(&mut offset)?;
let z = read_f32(&mut offset)?;
vertices.push(Vec3::new(x, y, z));
}
let tc = read_u32(&mut offset)? as usize;
let mut triangles = Vec::with_capacity(tc);
for _ in 0..tc {
let i0 = read_u32(&mut offset)? as usize;
let i1 = read_u32(&mut offset)? as usize;
let i2 = read_u32(&mut offset)? as usize;
let n0 = read_i32(&mut offset)?;
let n1 = read_i32(&mut offset)?;
let n2 = read_i32(&mut offset)?;
let to_opt = |v: i32| -> Option<usize> {
if v < 0 { None } else { Some(v as usize) }
};
triangles.push(NavTriangle {
indices: [i0, i1, i2],
neighbors: [to_opt(n0), to_opt(n1), to_opt(n2)],
});
}
Ok(NavMesh::new(vertices, triangles))
}
/// Return the shared edge (portal) vertices between two adjacent triangles.
/// Returns (left, right) vertices of the portal edge as seen from `from_tri` looking toward `to_tri`.
/// Returns None if the triangles are not adjacent.
pub fn shared_edge(&self, from_tri: usize, to_tri: usize) -> Option<(Vec3, Vec3)> {
let tri = &self.triangles[from_tri];
for edge_idx in 0..3 {
if tri.neighbors[edge_idx] == Some(to_tri) {
let (i0, i1) = match edge_idx {
0 => (tri.indices[0], tri.indices[1]),
1 => (tri.indices[1], tri.indices[2]),
2 => (tri.indices[2], tri.indices[0]),
_ => unreachable!(),
};
return Some((self.vertices[i0], self.vertices[i1]));
}
}
None
}
}
/// Test whether `point` lies inside or on the triangle (a, b, c) using XZ barycentric coordinates.
pub fn point_in_triangle_xz(point: Vec3, a: Vec3, b: Vec3, c: Vec3) -> bool {
// Compute barycentric coordinates using XZ plane
let px = point.x;
let pz = point.z;
let ax = a.x; let az = a.z;
let bx = b.x; let bz = b.z;
let cx = c.x; let cz = c.z;
let denom = (bz - cz) * (ax - cx) + (cx - bx) * (az - cz);
if denom.abs() < f32::EPSILON {
return false; // degenerate triangle
}
let u = ((bz - cz) * (px - cx) + (cx - bx) * (pz - cz)) / denom;
let v = ((cz - az) * (px - cx) + (ax - cx) * (pz - cz)) / denom;
let w = 1.0 - u - v;
u >= 0.0 && v >= 0.0 && w >= 0.0
}
/// Serialize a NavMesh to binary format.
/// Format: vertex_count(u32) + vertices(f32*3 each) + triangle_count(u32) + triangles(indices u32*3 + neighbors i32*3 each)
pub fn serialize_navmesh(navmesh: &NavMesh) -> Vec<u8> {
let mut data = Vec::new();
// Vertex count
let vc = navmesh.vertices.len() as u32;
data.extend_from_slice(&vc.to_le_bytes());
// Vertices
for v in &navmesh.vertices {
data.extend_from_slice(&v.x.to_le_bytes());
data.extend_from_slice(&v.y.to_le_bytes());
data.extend_from_slice(&v.z.to_le_bytes());
}
// Triangle count
let tc = navmesh.triangles.len() as u32;
data.extend_from_slice(&tc.to_le_bytes());
// Triangles: indices(u32*3) + neighbors(i32*3, -1 for None)
for tri in &navmesh.triangles {
for &idx in &tri.indices {
data.extend_from_slice(&(idx as u32).to_le_bytes());
}
for &nb in &tri.neighbors {
let val: i32 = match nb {
Some(i) => i as i32,
None => -1,
};
data.extend_from_slice(&val.to_le_bytes());
}
}
data
}
/// Deserialize a NavMesh from binary data.
pub fn deserialize_navmesh(data: &[u8]) -> Result<NavMesh, String> {
let mut offset = 0;
let read_u32 = |off: &mut usize| -> Result<u32, String> {
if *off + 4 > data.len() {
return Err("unexpected end of data".to_string());
}
let val = u32::from_le_bytes([data[*off], data[*off + 1], data[*off + 2], data[*off + 3]]);
*off += 4;
Ok(val)
};
let read_i32 = |off: &mut usize| -> Result<i32, String> {
if *off + 4 > data.len() {
return Err("unexpected end of data".to_string());
}
let val = i32::from_le_bytes([data[*off], data[*off + 1], data[*off + 2], data[*off + 3]]);
*off += 4;
Ok(val)
};
let read_f32 = |off: &mut usize| -> Result<f32, String> {
if *off + 4 > data.len() {
return Err("unexpected end of data".to_string());
}
let val = f32::from_le_bytes([data[*off], data[*off + 1], data[*off + 2], data[*off + 3]]);
*off += 4;
Ok(val)
};
let vc = read_u32(&mut offset)? as usize;
let mut vertices = Vec::with_capacity(vc);
for _ in 0..vc {
let x = read_f32(&mut offset)?;
let y = read_f32(&mut offset)?;
let z = read_f32(&mut offset)?;
vertices.push(Vec3::new(x, y, z));
}
let tc = read_u32(&mut offset)? as usize;
let mut triangles = Vec::with_capacity(tc);
for _ in 0..tc {
let i0 = read_u32(&mut offset)? as usize;
let i1 = read_u32(&mut offset)? as usize;
let i2 = read_u32(&mut offset)? as usize;
let n0 = read_i32(&mut offset)?;
let n1 = read_i32(&mut offset)?;
let n2 = read_i32(&mut offset)?;
let to_opt = |v: i32| -> Option<usize> {
if v < 0 { None } else { Some(v as usize) }
};
triangles.push(NavTriangle {
indices: [i0, i1, i2],
neighbors: [to_opt(n0), to_opt(n1), to_opt(n2)],
});
}
Ok(NavMesh::new(vertices, triangles))
}
#[cfg(test)]
mod tests {
use super::*;
fn make_simple_navmesh() -> NavMesh {
// Two triangles sharing an edge:
// v0=(0,0,0), v1=(2,0,0), v2=(1,0,2), v3=(3,0,2)
// Tri 0: v0, v1, v2 (left triangle)
// Tri 1: v1, v3, v2 (right triangle)
let vertices = vec![
Vec3::new(0.0, 0.0, 0.0),
Vec3::new(2.0, 0.0, 0.0),
Vec3::new(1.0, 0.0, 2.0),
Vec3::new(3.0, 0.0, 2.0),
];
let triangles = vec![
NavTriangle {
indices: [0, 1, 2],
neighbors: [None, Some(1), None],
},
NavTriangle {
indices: [1, 3, 2],
neighbors: [None, None, Some(0)],
},
];
NavMesh::new(vertices, triangles)
}
#[test]
fn test_find_triangle_inside() {
let nm = make_simple_navmesh();
// Center of tri 0 is roughly (1, 0, 0.67)
let p = Vec3::new(1.0, 0.0, 0.5);
let result = nm.find_triangle(p);
assert_eq!(result, Some(0));
}
#[test]
fn test_find_triangle_outside() {
let nm = make_simple_navmesh();
// Point far outside the mesh
let p = Vec3::new(10.0, 0.0, 10.0);
let result = nm.find_triangle(p);
assert_eq!(result, None);
}
#[test]
fn test_triangle_center() {
let nm = make_simple_navmesh();
let center = nm.triangle_center(0);
// (0+2+1)/3 = 1.0, (0+0+0)/3 = 0.0, (0+0+2)/3 = 0.667
assert!((center.x - 1.0).abs() < 1e-5);
assert!((center.y - 0.0).abs() < 1e-5);
assert!((center.z - (2.0 / 3.0)).abs() < 1e-5);
}
#[test]
fn test_edge_midpoint() {
let nm = make_simple_navmesh();
// Edge 1 of tri 0 is between indices[1]=v1=(2,0,0) and indices[2]=v2=(1,0,2)
let mid = nm.edge_midpoint(0, 1);
assert!((mid.x - 1.5).abs() < 1e-5);
assert!((mid.y - 0.0).abs() < 1e-5);
assert!((mid.z - 1.0).abs() < 1e-5);
}
#[test]
fn test_shared_edge() {
let nm = make_simple_navmesh();
// Tri 0 edge 1 connects to Tri 1: shared edge is v1=(2,0,0) and v2=(1,0,2)
let (left, right) = nm.shared_edge(0, 1).expect("should find shared edge");
assert_eq!(left, Vec3::new(2.0, 0.0, 0.0));
assert_eq!(right, Vec3::new(1.0, 0.0, 2.0));
}
#[test]
fn test_shared_edge_not_adjacent() {
let nm = make_simple_navmesh();
// Tri 0 is not adjacent to itself via neighbors
assert!(nm.shared_edge(0, 0).is_none());
}
#[test]
fn test_serialize_roundtrip() {
let nm = make_simple_navmesh();
let data = serialize_navmesh(&nm);
let nm2 = deserialize_navmesh(&data).expect("should deserialize");
assert_eq!(nm2.vertices.len(), nm.vertices.len());
assert_eq!(nm2.triangles.len(), nm.triangles.len());
for (a, b) in nm.vertices.iter().zip(nm2.vertices.iter()) {
assert!((a.x - b.x).abs() < 1e-6);
assert!((a.y - b.y).abs() < 1e-6);
assert!((a.z - b.z).abs() < 1e-6);
}
for (a, b) in nm.triangles.iter().zip(nm2.triangles.iter()) {
assert_eq!(a.indices, b.indices);
assert_eq!(a.neighbors, b.neighbors);
}
}
#[test]
fn test_deserialize_empty_data() {
let result = deserialize_navmesh(&[]);
assert!(result.is_err());
}
#[test]
fn test_method_serialize_deserialize_roundtrip() {
let nm = make_simple_navmesh();
let data = nm.serialize();
let restored = NavMesh::deserialize(&data).expect("should deserialize");
assert_eq!(nm.vertices.len(), restored.vertices.len());
assert_eq!(nm.triangles.len(), restored.triangles.len());
for (a, b) in nm.vertices.iter().zip(restored.vertices.iter()) {
assert!((a.x - b.x).abs() < 1e-6);
assert!((a.y - b.y).abs() < 1e-6);
assert!((a.z - b.z).abs() < 1e-6);
}
for (a, b) in nm.triangles.iter().zip(restored.triangles.iter()) {
assert_eq!(a.indices, b.indices);
assert_eq!(a.neighbors, b.neighbors);
}
}
#[test]
fn test_deserialize_invalid_magic() {
let data = b"XXXX\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
assert!(NavMesh::deserialize(data).is_err());
}
#[test]
fn test_deserialize_too_short() {
let data = b"VNA";
assert!(NavMesh::deserialize(data).is_err());
}
#[test]
fn test_serialize_empty_navmesh() {
let nm = NavMesh::new(vec![], vec![]);
let data = nm.serialize();
let restored = NavMesh::deserialize(&data).expect("should deserialize empty");
assert!(restored.vertices.is_empty());
assert!(restored.triangles.is_empty());
}
#[test]
fn test_serialize_header_format() {
let nm = NavMesh::new(vec![], vec![]);
let data = nm.serialize();
// Check magic
assert_eq!(&data[0..4], b"VNAV");
// Check version = 1
let version = u32::from_le_bytes([data[4], data[5], data[6], data[7]]);
assert_eq!(version, 1);
}
#[test]
fn test_deserialize_unsupported_version() {
let mut data = Vec::new();
data.extend_from_slice(b"VNAV");
data.extend_from_slice(&99u32.to_le_bytes());
data.extend_from_slice(&0u32.to_le_bytes());
data.extend_from_slice(&0u32.to_le_bytes());
assert!(NavMesh::deserialize(&data).is_err());
}
}

View File

@@ -0,0 +1,651 @@
use voltex_math::Vec3;
use crate::navmesh::{NavMesh, NavTriangle};
/// Configuration for navmesh generation.
pub struct NavMeshBuilder {
/// XZ voxel size (default 0.3)
pub cell_size: f32,
/// Y voxel size (default 0.2)
pub cell_height: f32,
/// Minimum clearance height for walkable areas (default 2.0)
pub agent_height: f32,
/// Agent capsule radius used to erode walkable areas (default 0.5)
pub agent_radius: f32,
/// Maximum walkable slope angle in degrees (default 45.0)
pub max_slope: f32,
}
impl Default for NavMeshBuilder {
fn default() -> Self {
Self {
cell_size: 0.3,
cell_height: 0.2,
agent_height: 2.0,
agent_radius: 0.5,
max_slope: 45.0,
}
}
}
/// Heightfield: a 2D grid of min/max height spans for voxelized geometry.
pub struct Heightfield {
pub width: usize, // number of cells along X
pub depth: usize, // number of cells along Z
pub min_x: f32,
pub min_z: f32,
pub cell_size: f32,
pub cell_height: f32,
/// For each cell (x, z), store (min_y, max_y). None if empty.
pub cells: Vec<Option<(f32, f32)>>,
}
impl Heightfield {
pub fn cell_index(&self, x: usize, z: usize) -> usize {
z * self.width + x
}
}
/// Map of walkable cells. True if the cell is walkable.
pub struct WalkableMap {
pub width: usize,
pub depth: usize,
pub min_x: f32,
pub min_z: f32,
pub cell_size: f32,
pub walkable: Vec<bool>,
/// Height at each walkable cell (top of walkable surface).
pub heights: Vec<f32>,
}
impl WalkableMap {
pub fn cell_index(&self, x: usize, z: usize) -> usize {
z * self.width + x
}
}
/// A region of connected walkable cells (flood-fill result).
pub struct RegionMap {
pub width: usize,
pub depth: usize,
pub min_x: f32,
pub min_z: f32,
pub cell_size: f32,
/// Region ID per cell. 0 = not walkable, 1+ = region ID.
pub regions: Vec<u32>,
pub heights: Vec<f32>,
pub num_regions: u32,
}
impl NavMeshBuilder {
pub fn new() -> Self {
Self::default()
}
/// Step 1: Rasterize triangles into a heightfield grid.
pub fn voxelize(&self, vertices: &[Vec3], indices: &[u32]) -> Heightfield {
// Find bounding box
let mut min_x = f32::INFINITY;
let mut max_x = f32::NEG_INFINITY;
let mut min_y = f32::INFINITY;
let mut max_y = f32::NEG_INFINITY;
let mut min_z = f32::INFINITY;
let mut max_z = f32::NEG_INFINITY;
for v in vertices {
min_x = min_x.min(v.x);
max_x = max_x.max(v.x);
min_y = min_y.min(v.y);
max_y = max_y.max(v.y);
min_z = min_z.min(v.z);
max_z = max_z.max(v.z);
}
let width = ((max_x - min_x) / self.cell_size).ceil() as usize + 1;
let depth = ((max_z - min_z) / self.cell_size).ceil() as usize + 1;
let mut cells: Vec<Option<(f32, f32)>> = vec![None; width * depth];
// Rasterize each triangle
for tri_i in (0..indices.len()).step_by(3) {
if tri_i + 2 >= indices.len() {
break;
}
let v0 = vertices[indices[tri_i] as usize];
let v1 = vertices[indices[tri_i + 1] as usize];
let v2 = vertices[indices[tri_i + 2] as usize];
// Find bounding box of triangle in grid coords
let tri_min_x = v0.x.min(v1.x).min(v2.x);
let tri_max_x = v0.x.max(v1.x).max(v2.x);
let tri_min_z = v0.z.min(v1.z).min(v2.z);
let tri_max_z = v0.z.max(v1.z).max(v2.z);
let gx0 = ((tri_min_x - min_x) / self.cell_size).floor() as isize;
let gx1 = ((tri_max_x - min_x) / self.cell_size).ceil() as isize;
let gz0 = ((tri_min_z - min_z) / self.cell_size).floor() as isize;
let gz1 = ((tri_max_z - min_z) / self.cell_size).ceil() as isize;
let gx0 = gx0.max(0) as usize;
let gx1 = (gx1 as usize).min(width - 1);
let gz0 = gz0.max(0) as usize;
let gz1 = (gz1 as usize).min(depth - 1);
for gz in gz0..=gz1 {
for gx in gx0..=gx1 {
let cx = min_x + (gx as f32 + 0.5) * self.cell_size;
let cz = min_z + (gz as f32 + 0.5) * self.cell_size;
// Check if cell center is inside triangle (XZ)
let p = Vec3::new(cx, 0.0, cz);
if point_in_triangle_xz_loose(p, v0, v1, v2, self.cell_size * 0.5) {
// Interpolate Y at this XZ point
let y = interpolate_y(v0, v1, v2, cx, cz);
let idx = gz * width + gx;
match &mut cells[idx] {
Some((ref mut lo, ref mut hi)) => {
*lo = lo.min(y);
*hi = hi.max(y);
}
None => {
cells[idx] = Some((y, y));
}
}
}
}
}
}
Heightfield {
width,
depth,
min_x,
min_z,
cell_size: self.cell_size,
cell_height: self.cell_height,
cells,
}
}
/// Step 2: Mark walkable cells based on slope and agent height clearance.
pub fn mark_walkable(&self, hf: &Heightfield) -> WalkableMap {
let max_slope_cos = (self.max_slope * std::f32::consts::PI / 180.0).cos();
let n = hf.width * hf.depth;
let mut walkable = vec![false; n];
let mut heights = vec![0.0f32; n];
for z in 0..hf.depth {
for x in 0..hf.width {
let idx = z * hf.width + x;
if let Some((_lo, hi)) = hf.cells[idx] {
// Check slope by comparing height differences with neighbors
let slope_ok = self.check_slope(hf, x, z, max_slope_cos);
// Check clearance: for simplicity, if cell has geometry, assume clearance
// unless there's a cell above within agent_height (not implemented for simple case)
if slope_ok {
walkable[idx] = true;
heights[idx] = hi;
}
}
}
}
// Erode by agent radius: remove walkable cells too close to non-walkable
let erosion_cells = (self.agent_radius / hf.cell_size).ceil() as usize;
if erosion_cells > 0 {
let mut eroded = walkable.clone();
for z in 0..hf.depth {
for x in 0..hf.width {
let idx = z * hf.width + x;
if !walkable[idx] {
continue;
}
// Check if near boundary of walkable area
let mut near_edge = false;
for dz in 0..=erosion_cells {
for dx in 0..=erosion_cells {
if dx == 0 && dz == 0 {
continue;
}
// Check all 4 quadrants
let checks: [(isize, isize); 4] = [
(dx as isize, dz as isize),
(-(dx as isize), dz as isize),
(dx as isize, -(dz as isize)),
(-(dx as isize), -(dz as isize)),
];
for (ddx, ddz) in checks {
let nx = x as isize + ddx;
let nz = z as isize + ddz;
if nx < 0 || nz < 0 || nx >= hf.width as isize || nz >= hf.depth as isize {
near_edge = true;
break;
}
let ni = nz as usize * hf.width + nx as usize;
if !walkable[ni] {
near_edge = true;
break;
}
}
if near_edge {
break;
}
}
if near_edge {
break;
}
}
if near_edge {
eroded[idx] = false;
}
}
}
return WalkableMap {
width: hf.width,
depth: hf.depth,
min_x: hf.min_x,
min_z: hf.min_z,
cell_size: hf.cell_size,
walkable: eroded,
heights,
};
}
WalkableMap {
width: hf.width,
depth: hf.depth,
min_x: hf.min_x,
min_z: hf.min_z,
cell_size: hf.cell_size,
walkable,
heights,
}
}
/// Check if the slope at cell (x, z) is walkable.
fn check_slope(&self, hf: &Heightfield, x: usize, z: usize, max_slope_cos: f32) -> bool {
let idx = z * hf.width + x;
let h = match hf.cells[idx] {
Some((_, hi)) => hi,
None => return false,
};
// Compare with direct neighbors to estimate slope
let neighbors: [(isize, isize); 4] = [(1, 0), (-1, 0), (0, 1), (0, -1)];
for (dx, dz) in neighbors {
let nx = x as isize + dx;
let nz = z as isize + dz;
if nx < 0 || nz < 0 || nx >= hf.width as isize || nz >= hf.depth as isize {
continue;
}
let ni = nz as usize * hf.width + nx as usize;
if let Some((_, nh)) = hf.cells[ni] {
let dy = (nh - h).abs();
let dist = hf.cell_size;
// slope angle: atan(dy/dist), check cos of that angle
let slope_len = (dy * dy + dist * dist).sqrt();
let cos_angle = dist / slope_len;
if cos_angle < max_slope_cos {
return false;
}
}
}
true
}
/// Step 3: Flood-fill connected walkable areas into regions.
pub fn build_regions(&self, wm: &WalkableMap) -> RegionMap {
let n = wm.width * wm.depth;
let mut regions = vec![0u32; n];
let mut current_region = 0u32;
for z in 0..wm.depth {
for x in 0..wm.width {
let idx = z * wm.width + x;
if wm.walkable[idx] && regions[idx] == 0 {
current_region += 1;
// Flood fill
let mut stack = vec![(x, z)];
regions[idx] = current_region;
while let Some((cx, cz)) = stack.pop() {
let neighbors: [(isize, isize); 4] = [(1, 0), (-1, 0), (0, 1), (0, -1)];
for (dx, dz) in neighbors {
let nx = cx as isize + dx;
let nz = cz as isize + dz;
if nx < 0 || nz < 0 || nx >= wm.width as isize || nz >= wm.depth as isize {
continue;
}
let ni = nz as usize * wm.width + nx as usize;
if wm.walkable[ni] && regions[ni] == 0 {
regions[ni] = current_region;
stack.push((nx as usize, nz as usize));
}
}
}
}
}
}
RegionMap {
width: wm.width,
depth: wm.depth,
min_x: wm.min_x,
min_z: wm.min_z,
cell_size: wm.cell_size,
regions,
heights: wm.heights.clone(),
num_regions: current_region,
}
}
/// Steps 4-5 combined: Convert walkable grid cells directly into a NavMesh.
/// Each walkable cell becomes a quad (2 triangles), with adjacency computed.
pub fn triangulate(&self, rm: &RegionMap) -> NavMesh {
let mut vertices = Vec::new();
let mut triangles = Vec::new();
// For each walkable cell, create 4 vertices and 2 triangles.
// Map from cell (x,z) -> (tri_a_idx, tri_b_idx) for adjacency lookup.
let n = rm.width * rm.depth;
// cell_tri_map[cell_idx] = Some((tri_a_idx, tri_b_idx)) or None
let mut cell_tri_map: Vec<Option<(usize, usize)>> = vec![None; n];
for z in 0..rm.depth {
for x in 0..rm.width {
let idx = z * rm.width + x;
if rm.regions[idx] == 0 {
continue;
}
let h = rm.heights[idx];
let x0 = rm.min_x + x as f32 * rm.cell_size;
let x1 = x0 + rm.cell_size;
let z0 = rm.min_z + z as f32 * rm.cell_size;
let z1 = z0 + rm.cell_size;
let vi = vertices.len();
vertices.push(Vec3::new(x0, h, z0)); // vi+0: bottom-left
vertices.push(Vec3::new(x1, h, z0)); // vi+1: bottom-right
vertices.push(Vec3::new(x1, h, z1)); // vi+2: top-right
vertices.push(Vec3::new(x0, h, z1)); // vi+3: top-left
let ta = triangles.len();
// Triangle A: bottom-left, bottom-right, top-right (vi+0, vi+1, vi+2)
triangles.push(NavTriangle {
indices: [vi, vi + 1, vi + 2],
neighbors: [None, None, None], // filled in later
});
// Triangle B: bottom-left, top-right, top-left (vi+0, vi+2, vi+3)
triangles.push(NavTriangle {
indices: [vi, vi + 2, vi + 3],
neighbors: [None, None, None],
});
// Internal adjacency: A and B share edge (vi+0, vi+2)
// For A: edge 2 is (vi+2 -> vi+0) — indices[2] to indices[0]
// For B: edge 0 is (vi+0 -> vi+2) — indices[0] to indices[1]
triangles[ta].neighbors[2] = Some(ta + 1); // A's edge2 -> B
triangles[ta + 1].neighbors[0] = Some(ta); // B's edge0 -> A
cell_tri_map[idx] = Some((ta, ta + 1));
}
}
// Now compute inter-cell adjacency
// Cell layout:
// Tri A: (v0, v1, v2) = (BL, BR, TR)
// edge 0: BL->BR (bottom edge, connects to cell below z-1)
// edge 1: BR->TR (right edge, connects to cell at x+1)
// edge 2: TR->BL (diagonal, internal, already connected to B)
// Tri B: (v0, v2, v3) = (BL, TR, TL)
// edge 0: BL->TR (diagonal, internal, already connected to A)
// edge 1: TR->TL (top edge, connects to cell above z+1)
// edge 2: TL->BL (left edge, connects to cell at x-1)
for z in 0..rm.depth {
for x in 0..rm.width {
let idx = z * rm.width + x;
if cell_tri_map[idx].is_none() {
continue;
}
let (ta, tb) = cell_tri_map[idx].unwrap();
// Bottom neighbor (z-1): A's edge 0 connects to neighbor's B edge 1 (TR->TL = top)
if z > 0 {
let ni = (z - 1) * rm.width + x;
if let Some((_, nb_tb)) = cell_tri_map[ni] {
triangles[ta].neighbors[0] = Some(nb_tb);
triangles[nb_tb].neighbors[1] = Some(ta);
}
}
// Right neighbor (x+1): A's edge 1 connects to neighbor's B edge 2 (TL->BL = left)
if x + 1 < rm.width {
let ni = z * rm.width + (x + 1);
if let Some((_, nb_tb)) = cell_tri_map[ni] {
triangles[ta].neighbors[1] = Some(nb_tb);
triangles[nb_tb].neighbors[2] = Some(ta);
}
}
}
}
NavMesh::new(vertices, triangles)
}
/// Full pipeline: voxelize, mark walkable, build regions, triangulate.
pub fn build(&self, vertices: &[Vec3], indices: &[u32]) -> NavMesh {
let hf = self.voxelize(vertices, indices);
let wm = self.mark_walkable(&hf);
let rm = self.build_regions(&wm);
self.triangulate(&rm)
}
}
/// Loose point-in-triangle test on XZ plane, with a tolerance margin.
fn point_in_triangle_xz_loose(point: Vec3, a: Vec3, b: Vec3, c: Vec3, margin: f32) -> bool {
let px = point.x;
let pz = point.z;
let denom = (b.z - c.z) * (a.x - c.x) + (c.x - b.x) * (a.z - c.z);
if denom.abs() < f32::EPSILON {
// Degenerate: check if point is near the line segment
return false;
}
let u = ((b.z - c.z) * (px - c.x) + (c.x - b.x) * (pz - c.z)) / denom;
let v = ((c.z - a.z) * (px - c.x) + (a.x - c.x) * (pz - c.z)) / denom;
let w = 1.0 - u - v;
let e = margin / ((a - b).length().max((b - c).length()).max((c - a).length()).max(0.001));
u >= -e && v >= -e && w >= -e
}
/// Interpolate Y height at (px, pz) on the plane defined by triangle (a, b, c).
fn interpolate_y(a: Vec3, b: Vec3, c: Vec3, px: f32, pz: f32) -> f32 {
let denom = (b.z - c.z) * (a.x - c.x) + (c.x - b.x) * (a.z - c.z);
if denom.abs() < f32::EPSILON {
return (a.y + b.y + c.y) / 3.0;
}
let u = ((b.z - c.z) * (px - c.x) + (c.x - b.x) * (pz - c.z)) / denom;
let v = ((c.z - a.z) * (px - c.x) + (a.x - c.x) * (pz - c.z)) / denom;
let w = 1.0 - u - v;
u * a.y + v * b.y + w * c.y
}
#[cfg(test)]
mod tests {
use super::*;
/// Create a simple flat plane (10x10, y=0) as 2 triangles.
fn flat_plane_geometry() -> (Vec<Vec3>, Vec<u32>) {
let vertices = vec![
Vec3::new(0.0, 0.0, 0.0),
Vec3::new(10.0, 0.0, 0.0),
Vec3::new(10.0, 0.0, 10.0),
Vec3::new(0.0, 0.0, 10.0),
];
let indices = vec![0, 1, 2, 0, 2, 3];
(vertices, indices)
}
/// Create a plane with a box obstacle (hole) in the middle.
/// The plane is 10x10 with a 2x2 hole at center (4-6, 4-6).
fn plane_with_obstacle() -> (Vec<Vec3>, Vec<u32>) {
// Build geometry as a grid of quads, skipping the obstacle region.
// Use 1.0 unit grid cells for simplicity.
let mut vertices = Vec::new();
let mut indices = Vec::new();
// 10x10 grid of 1x1 quads, skip 4<=x<6 && 4<=z<6
for z in 0..10 {
for x in 0..10 {
if x >= 4 && x < 6 && z >= 4 && z < 6 {
continue; // obstacle
}
let vi = vertices.len() as u32;
let fx = x as f32;
let fz = z as f32;
vertices.push(Vec3::new(fx, 0.0, fz));
vertices.push(Vec3::new(fx + 1.0, 0.0, fz));
vertices.push(Vec3::new(fx + 1.0, 0.0, fz + 1.0));
vertices.push(Vec3::new(fx, 0.0, fz + 1.0));
indices.push(vi);
indices.push(vi + 1);
indices.push(vi + 2);
indices.push(vi);
indices.push(vi + 2);
indices.push(vi + 3);
}
}
(vertices, indices)
}
#[test]
fn test_voxelize_flat_plane() {
let builder = NavMeshBuilder {
cell_size: 1.0,
cell_height: 0.2,
agent_height: 2.0,
agent_radius: 0.0, // no erosion for simple test
max_slope: 45.0,
};
let (verts, idxs) = flat_plane_geometry();
let hf = builder.voxelize(&verts, &idxs);
// Should have cells covering the 10x10 area
assert!(hf.width > 0);
assert!(hf.depth > 0);
// Check that some cells are populated
let populated = hf.cells.iter().filter(|c| c.is_some()).count();
assert!(populated > 0, "some cells should be populated");
}
#[test]
fn test_flat_plane_single_region() {
let builder = NavMeshBuilder {
cell_size: 1.0,
cell_height: 0.2,
agent_height: 2.0,
agent_radius: 0.0,
max_slope: 45.0,
};
let (verts, idxs) = flat_plane_geometry();
let hf = builder.voxelize(&verts, &idxs);
let wm = builder.mark_walkable(&hf);
let rm = builder.build_regions(&wm);
assert_eq!(rm.num_regions, 1, "flat plane should be a single region");
}
#[test]
fn test_flat_plane_builds_navmesh() {
let builder = NavMeshBuilder {
cell_size: 1.0,
cell_height: 0.2,
agent_height: 2.0,
agent_radius: 0.0,
max_slope: 45.0,
};
let (verts, idxs) = flat_plane_geometry();
let nm = builder.build(&verts, &idxs);
assert!(!nm.vertices.is_empty(), "navmesh should have vertices");
assert!(!nm.triangles.is_empty(), "navmesh should have triangles");
// Should be able to find a triangle at center of the plane
let center = Vec3::new(5.0, 0.0, 5.0);
assert!(nm.find_triangle(center).is_some(), "should find triangle at center");
}
#[test]
fn test_obstacle_path_around() {
let builder = NavMeshBuilder {
cell_size: 1.0,
cell_height: 0.2,
agent_height: 2.0,
agent_radius: 0.0,
max_slope: 45.0,
};
let (verts, idxs) = plane_with_obstacle();
let nm = builder.build(&verts, &idxs);
// Start at (1, 0, 5) and goal at (9, 0, 5) — must go around obstacle
let start = Vec3::new(1.5, 0.0, 5.5);
let goal = Vec3::new(8.5, 0.0, 5.5);
use crate::pathfinding::find_path;
let path = find_path(&nm, start, goal);
assert!(path.is_some(), "should find path around obstacle");
}
#[test]
fn test_slope_walkable_unwalkable() {
// Create a steep ramp: triangle from (0,0,0) to (1,0,0) to (0.5, 5, 1)
// Slope angle = atan(5/1) ≈ 78.7 degrees — should be unwalkable at max_slope=45
let builder = NavMeshBuilder {
cell_size: 0.2,
cell_height: 0.2,
agent_height: 2.0,
agent_radius: 0.0,
max_slope: 45.0,
};
let vertices = vec![
Vec3::new(0.0, 0.0, 0.0),
Vec3::new(2.0, 0.0, 0.0),
Vec3::new(1.0, 10.0, 2.0),
];
let indices = vec![0, 1, 2];
let hf = builder.voxelize(&vertices, &indices);
let wm = builder.mark_walkable(&hf);
// Most interior cells should be unwalkable due to steep slope
let walkable_count = wm.walkable.iter().filter(|&&w| w).count();
// The bottom edge cells might be walkable (flat), but interior should not
// Just check that not all cells are walkable
let total_cells = hf.cells.iter().filter(|c| c.is_some()).count();
assert!(
walkable_count < total_cells || total_cells <= 1,
"steep slope should make most cells unwalkable (walkable={}, total={})",
walkable_count, total_cells
);
}
#[test]
fn test_navmesh_adjacency() {
let builder = NavMeshBuilder {
cell_size: 1.0,
cell_height: 0.2,
agent_height: 2.0,
agent_radius: 0.0,
max_slope: 45.0,
};
let (verts, idxs) = flat_plane_geometry();
let nm = builder.build(&verts, &idxs);
// Check that some triangles have neighbors
let has_neighbors = nm.triangles.iter().any(|t| t.neighbors.iter().any(|n| n.is_some()));
assert!(has_neighbors, "navmesh triangles should have adjacency");
}
}

View File

@@ -0,0 +1,176 @@
use voltex_math::Vec3;
/// A dynamic obstacle represented as a position and radius.
#[derive(Debug, Clone)]
pub struct DynamicObstacle {
pub position: Vec3,
pub radius: f32,
}
/// Compute avoidance steering force using velocity obstacle approach.
///
/// Projects the agent's velocity forward by `look_ahead` distance and checks
/// for circle intersections with obstacles. Returns a steering force perpendicular
/// to the approach direction to avoid the nearest threatening obstacle.
pub fn avoid_obstacles(
agent_pos: Vec3,
agent_vel: Vec3,
agent_radius: f32,
obstacles: &[DynamicObstacle],
look_ahead: f32,
) -> Vec3 {
let speed = agent_vel.length();
if speed < f32::EPSILON {
return Vec3::ZERO;
}
let forward = agent_vel * (1.0 / speed);
let mut nearest_t = f32::INFINITY;
let mut avoidance = Vec3::ZERO;
for obs in obstacles {
let to_obs = obs.position - agent_pos;
let combined_radius = agent_radius + obs.radius;
// Project obstacle center onto the velocity ray
let proj = to_obs.dot(forward);
// Obstacle is behind or too far ahead
if proj < 0.0 || proj > look_ahead {
continue;
}
// Lateral distance from the velocity ray to obstacle center (XZ only for ground agents)
let closest_on_ray = agent_pos + forward * proj;
let diff = obs.position - closest_on_ray;
let lateral_dist_sq = diff.x * diff.x + diff.z * diff.z;
let combined_sq = combined_radius * combined_radius;
if lateral_dist_sq >= combined_sq {
continue; // No collision
}
// This obstacle threatens the agent — check if it's the nearest
if proj < nearest_t {
nearest_t = proj;
// Avoidance direction: perpendicular to approach, away from obstacle
// Use XZ plane lateral vector
let lateral = Vec3::new(diff.x, 0.0, diff.z);
let lat_len = lateral.length();
if lat_len > f32::EPSILON {
// Steer away from obstacle (opposite direction of lateral offset)
let steer_dir = lateral * (-1.0 / lat_len);
// Strength inversely proportional to distance (closer = stronger)
let strength = 1.0 - (proj / look_ahead);
avoidance = steer_dir * strength * speed;
} else {
// Agent heading straight at obstacle center — pick perpendicular
// Use cross product with Y to get a lateral direction
let perp = Vec3::new(-forward.z, 0.0, forward.x);
avoidance = perp * speed;
}
}
}
avoidance
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_no_obstacle_zero_force() {
let force = avoid_obstacles(
Vec3::new(0.0, 0.0, 0.0),
Vec3::new(1.0, 0.0, 0.0),
0.5,
&[],
5.0,
);
assert!(force.length() < 1e-6, "no obstacles should give zero force");
}
#[test]
fn test_obstacle_behind_zero_force() {
let obs = DynamicObstacle {
position: Vec3::new(-3.0, 0.0, 0.0),
radius: 1.0,
};
let force = avoid_obstacles(
Vec3::new(0.0, 0.0, 0.0),
Vec3::new(1.0, 0.0, 0.0),
0.5,
&[obs],
5.0,
);
assert!(force.length() < 1e-6, "obstacle behind should give zero force");
}
#[test]
fn test_obstacle_ahead_lateral_force() {
let obs = DynamicObstacle {
position: Vec3::new(3.0, 0.0, 0.5), // slightly to the right
radius: 1.0,
};
let force = avoid_obstacles(
Vec3::new(0.0, 0.0, 0.0),
Vec3::new(2.0, 0.0, 0.0), // moving in +X
0.5,
&[obs],
5.0,
);
assert!(force.length() > 0.1, "obstacle ahead should give non-zero force");
// Force should push away from obstacle (obstacle is at +Z, force should be -Z)
assert!(force.z < 0.0, "force should push away from obstacle (negative Z)");
}
#[test]
fn test_obstacle_far_away_zero_force() {
let obs = DynamicObstacle {
position: Vec3::new(3.0, 0.0, 10.0), // far to the side
radius: 1.0,
};
let force = avoid_obstacles(
Vec3::new(0.0, 0.0, 0.0),
Vec3::new(1.0, 0.0, 0.0),
0.5,
&[obs],
5.0,
);
assert!(force.length() < 1e-6, "distant obstacle should give zero force");
}
#[test]
fn test_obstacle_beyond_lookahead_zero_force() {
let obs = DynamicObstacle {
position: Vec3::new(10.0, 0.0, 0.0),
radius: 1.0,
};
let force = avoid_obstacles(
Vec3::new(0.0, 0.0, 0.0),
Vec3::new(1.0, 0.0, 0.0),
0.5,
&[obs],
5.0, // look_ahead is only 5
);
assert!(force.length() < 1e-6, "obstacle beyond lookahead should give zero force");
}
#[test]
fn test_zero_velocity_zero_force() {
let obs = DynamicObstacle {
position: Vec3::new(3.0, 0.0, 0.0),
radius: 1.0,
};
let force = avoid_obstacles(
Vec3::new(0.0, 0.0, 0.0),
Vec3::ZERO,
0.5,
&[obs],
5.0,
);
assert!(force.length() < 1e-6, "zero velocity should give zero force");
}
}

View File

@@ -0,0 +1,436 @@
use std::collections::BinaryHeap;
use std::cmp::Ordering;
use voltex_math::Vec3;
use crate::navmesh::NavMesh;
/// Node for A* priority queue (min-heap by f_cost, then g_cost).
#[derive(Debug, Clone)]
struct AStarNode {
tri_idx: usize,
g_cost: f32,
f_cost: f32,
parent: Option<usize>,
}
impl PartialEq for AStarNode {
fn eq(&self, other: &Self) -> bool {
self.f_cost == other.f_cost && self.g_cost == other.g_cost
}
}
impl Eq for AStarNode {}
// BinaryHeap is a max-heap; we negate costs to get min-heap behavior.
impl PartialOrd for AStarNode {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for AStarNode {
fn cmp(&self, other: &Self) -> Ordering {
// Reverse ordering: lower f_cost = higher priority
other.f_cost.partial_cmp(&self.f_cost)
.unwrap_or(Ordering::Equal)
.then_with(|| other.g_cost.partial_cmp(&self.g_cost).unwrap_or(Ordering::Equal))
}
}
/// XZ distance between two Vec3 points (ignoring Y).
pub fn distance_xz(a: Vec3, b: Vec3) -> f32 {
let dx = a.x - b.x;
let dz = a.z - b.z;
(dx * dx + dz * dz).sqrt()
}
/// Run A* on the NavMesh and return the sequence of triangle indices from start to goal.
/// Returns None if either point is outside the mesh or no path exists.
pub fn find_path_triangles(navmesh: &NavMesh, start: Vec3, goal: Vec3) -> Option<Vec<usize>> {
let start_tri = navmesh.find_triangle(start)?;
let goal_tri = navmesh.find_triangle(goal)?;
if start_tri == goal_tri {
return Some(vec![start_tri]);
}
let n = navmesh.triangles.len();
let mut g_costs = vec![f32::INFINITY; n];
let mut parents: Vec<Option<usize>> = vec![None; n];
let mut visited = vec![false; n];
let goal_center = navmesh.triangle_center(goal_tri);
g_costs[start_tri] = 0.0;
let start_center = navmesh.triangle_center(start_tri);
let h = distance_xz(start_center, goal_center);
let mut open = BinaryHeap::new();
open.push(AStarNode {
tri_idx: start_tri,
g_cost: 0.0,
f_cost: h,
parent: None,
});
while let Some(node) = open.pop() {
let idx = node.tri_idx;
if visited[idx] {
continue;
}
visited[idx] = true;
parents[idx] = node.parent;
if idx == goal_tri {
let mut tri_path = Vec::new();
let mut cur = idx;
loop {
tri_path.push(cur);
match parents[cur] {
Some(p) => cur = p,
None => break,
}
}
tri_path.reverse();
return Some(tri_path);
}
let tri = &navmesh.triangles[idx];
let current_center = navmesh.triangle_center(idx);
for neighbor_opt in &tri.neighbors {
if let Some(nb_idx) = *neighbor_opt {
if visited[nb_idx] {
continue;
}
let nb_center = navmesh.triangle_center(nb_idx);
let tentative_g = g_costs[idx] + distance_xz(current_center, nb_center);
if tentative_g < g_costs[nb_idx] {
g_costs[nb_idx] = tentative_g;
let h_nb = distance_xz(nb_center, goal_center);
open.push(AStarNode {
tri_idx: nb_idx,
g_cost: tentative_g,
f_cost: tentative_g + h_nb,
parent: Some(idx),
});
}
}
}
}
None
}
/// Find a path from `start` to `goal` on the given NavMesh using A*.
///
/// Returns Some(path) where path[0] == start, path[last] == goal, and
/// intermediate points are triangle centers. Returns None if either
/// point is outside the mesh or no path exists.
pub fn find_path(navmesh: &NavMesh, start: Vec3, goal: Vec3) -> Option<Vec<Vec3>> {
let tri_path = find_path_triangles(navmesh, start, goal)?;
if tri_path.len() == 1 {
return Some(vec![start, goal]);
}
let mut path = Vec::new();
path.push(start);
for &ti in &tri_path[1..tri_path.len() - 1] {
path.push(navmesh.triangle_center(ti));
}
path.push(goal);
Some(path)
}
/// 2D cross product on XZ plane: (b - a) x (c - a) projected onto Y.
fn cross_xz(a: Vec3, b: Vec3, c: Vec3) -> f32 {
(b.x - a.x) * (c.z - a.z) - (b.z - a.z) * (c.x - a.x)
}
/// Simple Stupid Funnel (SSF) algorithm for string-pulling a triangle corridor path.
///
/// Given the sequence of triangle indices from A*, produces an optimal path with
/// waypoints at portal edge corners where the path turns.
pub fn funnel_smooth(path_triangles: &[usize], navmesh: &NavMesh, start: Vec3, end: Vec3) -> Vec<Vec3> {
// Trivial cases
if path_triangles.is_empty() {
return vec![start, end];
}
if path_triangles.len() == 1 {
return vec![start, end];
}
// Build portal list: shared edges between consecutive triangles
let mut portals_left = Vec::new();
let mut portals_right = Vec::new();
for i in 0..path_triangles.len() - 1 {
let from = path_triangles[i];
let to = path_triangles[i + 1];
if let Some((left, right)) = navmesh.shared_edge(from, to) {
portals_left.push(left);
portals_right.push(right);
}
}
// Add end point as the final portal (degenerate portal: both sides = end)
portals_left.push(end);
portals_right.push(end);
let mut path = vec![start];
let mut apex = start;
let mut left = start;
let mut right = start;
#[allow(unused_assignments)]
let mut apex_idx: usize = 0;
let mut left_idx: usize = 0;
let mut right_idx: usize = 0;
let n = portals_left.len();
for i in 0..n {
let pl = portals_left[i];
let pr = portals_right[i];
// Update right vertex
if cross_xz(apex, right, pr) <= 0.0 {
if apex == right || cross_xz(apex, left, pr) > 0.0 {
// Tighten the funnel
right = pr;
right_idx = i;
} else {
// Right over left: left becomes new apex
if left != apex {
path.push(left);
}
apex = left;
apex_idx = left_idx;
left = apex;
right = apex;
left_idx = apex_idx;
right_idx = apex_idx;
// Restart scan from apex
// We need to continue from apex_idx + 1, but since
// we can't restart a for loop, we use a recursive approach
// or just continue (the standard SSF continues from i)
continue;
}
}
// Update left vertex
if cross_xz(apex, left, pl) >= 0.0 {
if apex == left || cross_xz(apex, right, pl) < 0.0 {
// Tighten the funnel
left = pl;
left_idx = i;
} else {
// Left over right: right becomes new apex
if right != apex {
path.push(right);
}
apex = right;
apex_idx = right_idx;
left = apex;
right = apex;
left_idx = apex_idx;
right_idx = apex_idx;
continue;
}
}
}
// Add end point if not already there
if let Some(&last) = path.last() {
if (last.x - end.x).abs() > 1e-6 || (last.z - end.z).abs() > 1e-6 {
path.push(end);
}
} else {
path.push(end);
}
path
}
#[cfg(test)]
mod tests {
use super::*;
use crate::navmesh::{NavMesh, NavTriangle};
/// Build a 3-triangle strip along Z axis:
/// v0=(0,0,0), v1=(2,0,0), v2=(0,0,2), v3=(2,0,2), v4=(0,0,4), v5=(2,0,4)
/// Tri0: v0,v1,v2 — Tri1: v1,v3,v2 — Tri2: v2,v3,v4... wait, need a strip
///
/// Simpler strip:
/// Tri0: (0,0,0),(2,0,0),(1,0,2) center ~(1,0,0.67)
/// Tri1: (2,0,0),(3,0,2),(1,0,2) center ~(2,0,1.33)
/// Tri2: (1,0,2),(3,0,2),(2,0,4) center ~(2,0,2.67)
fn make_strip() -> NavMesh {
let vertices = vec![
Vec3::new(0.0, 0.0, 0.0), // 0
Vec3::new(2.0, 0.0, 0.0), // 1
Vec3::new(1.0, 0.0, 2.0), // 2
Vec3::new(3.0, 0.0, 2.0), // 3
Vec3::new(2.0, 0.0, 4.0), // 4
];
let triangles = vec![
NavTriangle { indices: [0, 1, 2], neighbors: [None, Some(1), None] },
NavTriangle { indices: [1, 3, 2], neighbors: [None, Some(2), Some(0)] },
NavTriangle { indices: [2, 3, 4], neighbors: [Some(1), None, None] },
];
NavMesh::new(vertices, triangles)
}
fn make_disconnected() -> NavMesh {
// Two separate triangles not connected
let vertices = vec![
Vec3::new(0.0, 0.0, 0.0),
Vec3::new(2.0, 0.0, 0.0),
Vec3::new(1.0, 0.0, 2.0),
Vec3::new(10.0, 0.0, 10.0),
Vec3::new(12.0, 0.0, 10.0),
Vec3::new(11.0, 0.0, 12.0),
];
let triangles = vec![
NavTriangle { indices: [0, 1, 2], neighbors: [None, None, None] },
NavTriangle { indices: [3, 4, 5], neighbors: [None, None, None] },
];
NavMesh::new(vertices, triangles)
}
#[test]
fn test_path_same_triangle() {
let nm = make_strip();
let start = Vec3::new(0.5, 0.0, 0.5);
let goal = Vec3::new(1.5, 0.0, 0.5);
let path = find_path(&nm, start, goal).expect("should find path");
assert_eq!(path.len(), 2);
assert_eq!(path[0], start);
assert_eq!(path[path.len() - 1], goal);
}
#[test]
fn test_path_adjacent_triangles() {
let nm = make_strip();
// start in tri0, goal in tri1
let start = Vec3::new(0.8, 0.0, 0.5);
let goal = Vec3::new(2.5, 0.0, 1.5);
let path = find_path(&nm, start, goal).expect("should find path");
assert!(path.len() >= 2);
assert_eq!(path[0], start);
assert_eq!(path[path.len() - 1], goal);
}
#[test]
fn test_path_three_triangle_strip() {
let nm = make_strip();
// start in tri0, goal in tri2
let start = Vec3::new(0.8, 0.0, 0.5);
let goal = Vec3::new(2.0, 0.0, 3.5);
let path = find_path(&nm, start, goal).expect("should find path");
assert!(path.len() >= 3, "path through 3 triangles should have at least 3 points");
assert_eq!(path[0], start);
assert_eq!(path[path.len() - 1], goal);
}
#[test]
fn test_path_outside_returns_none() {
let nm = make_strip();
// start is outside the mesh
let start = Vec3::new(100.0, 0.0, 100.0);
let goal = Vec3::new(0.8, 0.0, 0.5);
let result = find_path(&nm, start, goal);
assert!(result.is_none());
}
#[test]
fn test_path_disconnected_returns_none() {
let nm = make_disconnected();
let start = Vec3::new(0.8, 0.0, 0.5);
let goal = Vec3::new(11.0, 0.0, 10.5);
let result = find_path(&nm, start, goal);
assert!(result.is_none());
}
#[test]
fn test_find_path_triangles_same() {
let nm = make_strip();
let start = Vec3::new(0.5, 0.0, 0.5);
let goal = Vec3::new(1.5, 0.0, 0.5);
let tris = find_path_triangles(&nm, start, goal).expect("should find path");
assert_eq!(tris.len(), 1);
}
#[test]
fn test_find_path_triangles_strip() {
let nm = make_strip();
let start = Vec3::new(0.8, 0.0, 0.5);
let goal = Vec3::new(2.0, 0.0, 3.5);
let tris = find_path_triangles(&nm, start, goal).expect("should find path");
assert_eq!(tris.len(), 3);
assert_eq!(tris[0], 0);
assert_eq!(tris[2], 2);
}
#[test]
fn test_funnel_straight_path() {
// Straight corridor: path should be just start and end (2 points)
let nm = make_strip();
let start = Vec3::new(1.0, 0.0, 0.5);
let end = Vec3::new(2.0, 0.0, 3.5);
let tris = find_path_triangles(&nm, start, end).expect("should find path");
let smoothed = funnel_smooth(&tris, &nm, start, end);
assert!(smoothed.len() >= 2, "funnel path should have at least 2 points, got {}", smoothed.len());
assert_eq!(smoothed[0], start);
assert_eq!(smoothed[smoothed.len() - 1], end);
}
#[test]
fn test_funnel_same_triangle() {
let nm = make_strip();
let start = Vec3::new(0.5, 0.0, 0.5);
let end = Vec3::new(1.5, 0.0, 0.5);
let smoothed = funnel_smooth(&[0], &nm, start, end);
assert_eq!(smoothed.len(), 2);
assert_eq!(smoothed[0], start);
assert_eq!(smoothed[1], end);
}
#[test]
fn test_funnel_l_shaped_path() {
// Build an L-shaped navmesh to force a turn
// Tri0: (0,0,0),(2,0,0),(0,0,2) - bottom left
// Tri1: (2,0,0),(2,0,2),(0,0,2) - top right of first square
// Tri2: (2,0,0),(4,0,0),(2,0,2) - extends right
// Tri3: (2,0,2),(4,0,0),(4,0,2) - top right
// Tri4: (2,0,2),(4,0,2),(2,0,4) - goes up
// This makes an L shape going right then up
let vertices = vec![
Vec3::new(0.0, 0.0, 0.0), // 0
Vec3::new(2.0, 0.0, 0.0), // 1
Vec3::new(0.0, 0.0, 2.0), // 2
Vec3::new(2.0, 0.0, 2.0), // 3
Vec3::new(4.0, 0.0, 0.0), // 4
Vec3::new(4.0, 0.0, 2.0), // 5
Vec3::new(2.0, 0.0, 4.0), // 6
];
let triangles = vec![
NavTriangle { indices: [0, 1, 2], neighbors: [Some(1), None, None] }, // 0
NavTriangle { indices: [1, 3, 2], neighbors: [Some(2), None, Some(0)] }, // 1
NavTriangle { indices: [1, 4, 3], neighbors: [None, Some(3), Some(1)] }, // 2
NavTriangle { indices: [4, 5, 3], neighbors: [None, None, Some(2)] }, // 3 -- not used in L path
NavTriangle { indices: [3, 5, 6], neighbors: [None, None, None] }, // 4
];
let nm = NavMesh::new(vertices, triangles);
let start = Vec3::new(0.3, 0.0, 0.3);
let end = Vec3::new(3.5, 0.0, 0.5);
let tris = find_path_triangles(&nm, start, end);
if let Some(tris) = tris {
let smoothed = funnel_smooth(&tris, &nm, start, end);
assert!(smoothed.len() >= 2);
assert_eq!(smoothed[0], start);
assert_eq!(smoothed[smoothed.len() - 1], end);
}
}
}

View File

@@ -0,0 +1,232 @@
use voltex_math::Vec3;
/// An agent that can be steered through the world.
#[derive(Debug, Clone)]
pub struct SteeringAgent {
pub position: Vec3,
pub velocity: Vec3,
pub max_speed: f32,
pub max_force: f32,
}
impl SteeringAgent {
pub fn new(position: Vec3, max_speed: f32, max_force: f32) -> Self {
Self {
position,
velocity: Vec3::ZERO,
max_speed,
max_force,
}
}
}
/// Truncate vector `v` to at most `max_len` magnitude.
pub fn truncate(v: Vec3, max_len: f32) -> Vec3 {
let len_sq = v.length_squared();
if len_sq > max_len * max_len {
v.normalize() * max_len
} else {
v
}
}
/// Seek steering behavior: move toward `target` at max speed.
/// Returns the steering force to apply.
pub fn seek(agent: &SteeringAgent, target: Vec3) -> Vec3 {
let to_target = target - agent.position;
let len = to_target.length();
if len < f32::EPSILON {
return Vec3::ZERO;
}
let desired = to_target.normalize() * agent.max_speed;
let steering = desired - agent.velocity;
truncate(steering, agent.max_force)
}
/// Flee steering behavior: move away from `threat` at max speed.
/// Returns the steering force to apply.
pub fn flee(agent: &SteeringAgent, threat: Vec3) -> Vec3 {
let away = agent.position - threat;
let len = away.length();
if len < f32::EPSILON {
return Vec3::ZERO;
}
let desired = away.normalize() * agent.max_speed;
let steering = desired - agent.velocity;
truncate(steering, agent.max_force)
}
/// Arrive steering behavior: decelerate when within `slow_radius` of `target`.
/// Returns the steering force to apply.
pub fn arrive(agent: &SteeringAgent, target: Vec3, slow_radius: f32) -> Vec3 {
let to_target = target - agent.position;
let dist = to_target.length();
if dist < f32::EPSILON {
return Vec3::ZERO;
}
let desired_speed = if dist < slow_radius {
agent.max_speed * (dist / slow_radius)
} else {
agent.max_speed
};
let desired = to_target.normalize() * desired_speed;
let steering = desired - agent.velocity;
truncate(steering, agent.max_force)
}
/// Wander steering behavior: steer toward a point on a circle projected ahead of the agent.
/// `wander_radius` is the radius of the wander circle,
/// `wander_distance` is how far ahead the circle is projected,
/// `angle` is the current angle on the wander circle (in radians).
/// Returns the steering force to apply.
pub fn wander(agent: &SteeringAgent, wander_radius: f32, wander_distance: f32, angle: f32) -> Vec3 {
// Compute forward direction; use +Z if velocity is near zero
let forward = {
let len = agent.velocity.length();
if len > f32::EPSILON {
agent.velocity.normalize()
} else {
Vec3::Z
}
};
// Circle center is ahead of the agent
let circle_center = agent.position + forward * wander_distance;
// Point on the circle at the given angle (in XZ plane)
let wander_target = Vec3::new(
circle_center.x + angle.cos() * wander_radius,
circle_center.y,
circle_center.z + angle.sin() * wander_radius,
);
seek(agent, wander_target)
}
/// Follow path steering behavior.
/// Seeks toward the current waypoint; advances to the next when within `waypoint_radius`.
/// Uses `arrive` for the last waypoint.
///
/// Returns (steering_force, updated_current_waypoint_index).
pub fn follow_path(
agent: &SteeringAgent,
path: &[Vec3],
current_waypoint: usize,
waypoint_radius: f32,
) -> (Vec3, usize) {
if path.is_empty() {
return (Vec3::ZERO, 0);
}
let clamped = current_waypoint.min(path.len() - 1);
let waypoint = path[clamped];
let dist = (waypoint - agent.position).length();
// If we are close enough and there is a next waypoint, advance
if dist < waypoint_radius && clamped + 1 < path.len() {
let next = clamped + 1;
let force = if next == path.len() - 1 {
arrive(agent, path[next], waypoint_radius)
} else {
seek(agent, path[next])
};
return (force, next);
}
// Use arrive for the last waypoint, seek for all others
let force = if clamped == path.len() - 1 {
arrive(agent, waypoint, waypoint_radius)
} else {
seek(agent, waypoint)
};
(force, clamped)
}
#[cfg(test)]
mod tests {
use super::*;
fn agent_at(x: f32, z: f32) -> SteeringAgent {
SteeringAgent::new(Vec3::new(x, 0.0, z), 5.0, 10.0)
}
#[test]
fn test_seek_direction() {
let agent = agent_at(0.0, 0.0);
let target = Vec3::new(1.0, 0.0, 0.0);
let force = seek(&agent, target);
// Force should be in the +X direction
assert!(force.x > 0.0, "seek force x should be positive");
assert!(force.z.abs() < 1e-4, "seek force z should be ~0");
}
#[test]
fn test_flee_direction() {
let agent = agent_at(0.0, 0.0);
let threat = Vec3::new(1.0, 0.0, 0.0);
let force = flee(&agent, threat);
// Force should be in the -X direction (away from threat)
assert!(force.x < 0.0, "flee force x should be negative");
assert!(force.z.abs() < 1e-4, "flee force z should be ~0");
}
#[test]
fn test_arrive_deceleration() {
let mut agent = agent_at(0.0, 0.0);
// Give agent some velocity toward target
agent.velocity = Vec3::new(5.0, 0.0, 0.0);
let target = Vec3::new(2.0, 0.0, 0.0); // within slow_radius=5
let slow_radius = 5.0;
let force = arrive(&agent, target, slow_radius);
// The desired speed is reduced (dist/slow_radius * max_speed = 2/5 * 5 = 2)
// desired velocity = (1,0,0)*2, current velocity = (5,0,0)
// steering = (2-5, 0, 0) = (-3, 0, 0) — a braking force
assert!(force.x < 0.0, "arrive should produce braking force when inside slow_radius");
}
#[test]
fn test_arrive_at_target() {
let agent = agent_at(0.0, 0.0);
let target = Vec3::new(0.0, 0.0, 0.0); // same position
let force = arrive(&agent, target, 1.0);
// At target, force should be zero
assert!(force.length() < 1e-4, "force at target should be zero");
}
#[test]
fn test_follow_path_advance() {
// Path: (0,0,0) -> (5,0,0) -> (10,0,0)
let path = vec![
Vec3::new(0.0, 0.0, 0.0),
Vec3::new(5.0, 0.0, 0.0),
Vec3::new(10.0, 0.0, 0.0),
];
// Agent is very close to waypoint 1
let agent = SteeringAgent::new(Vec3::new(4.9, 0.0, 0.0), 5.0, 10.0);
let waypoint_radius = 0.5;
let (_, next_wp) = follow_path(&agent, &path, 1, waypoint_radius);
// Should advance to waypoint 2
assert_eq!(next_wp, 2, "should advance to waypoint 2 when close to waypoint 1");
}
#[test]
fn test_follow_path_last_arrives() {
// Path: single-segment, agent near last waypoint
let path = vec![
Vec3::new(0.0, 0.0, 0.0),
Vec3::new(10.0, 0.0, 0.0),
];
let agent = SteeringAgent::new(Vec3::new(9.0, 0.0, 0.0), 5.0, 10.0);
let waypoint_radius = 2.0;
let (force, wp_idx) = follow_path(&agent, &path, 1, waypoint_radius);
// Still at waypoint 1 (the last one); force should be arrive (decelerating)
assert_eq!(wp_idx, 1);
// arrive should produce a deceleration toward (10,0,0)
// agent has zero velocity, dist=1, slow_radius=2 → desired_speed=1/2*5=2.5 in +X
// steering = desired - velocity = (2.5,0,0) - (0,0,0) = (2.5,0,0)
assert!(force.x > 0.0, "arrive force should be toward last waypoint");
}
}

View File

@@ -1,7 +1,11 @@
pub mod handle;
pub mod storage;
pub mod assets;
pub mod watcher;
pub mod loader;
pub use handle::Handle;
pub use storage::AssetStorage;
pub use assets::Assets;
pub use watcher::FileWatcher;
pub use loader::{AssetLoader, LoadState};

View File

@@ -0,0 +1,300 @@
use std::any::{Any, TypeId};
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::mpsc::{channel, Receiver, Sender};
use std::thread::{self, JoinHandle};
use crate::assets::Assets;
use crate::handle::Handle;
#[derive(Debug)]
pub enum LoadState {
Loading,
Ready,
Failed(String),
}
struct LoadRequest {
id: u64,
path: PathBuf,
parse: Box<dyn FnOnce(&[u8]) -> Result<Box<dyn Any + Send>, String> + Send>,
}
struct LoadResult {
id: u64,
result: Result<Box<dyn Any + Send>, String>,
}
struct PendingEntry {
state: PendingState,
handle_id: u32,
handle_gen: u32,
type_id: TypeId,
/// Type-erased inserter: takes (assets, boxed_any) and inserts the asset,
/// returning the actual handle (id, gen) that was assigned.
inserter: Option<Box<dyn FnOnce(&mut Assets, Box<dyn Any + Send>) -> (u32, u32)>>,
}
enum PendingState {
Loading,
Failed(String),
Ready,
}
pub struct AssetLoader {
request_tx: Option<Sender<LoadRequest>>,
result_rx: Receiver<LoadResult>,
thread: Option<JoinHandle<()>>,
next_id: u64,
pending: HashMap<u64, PendingEntry>,
// Map from (type_id, handle_id, handle_gen) to load_id for state lookups
handle_to_load: HashMap<(TypeId, u32, u32), u64>,
}
impl AssetLoader {
pub fn new() -> Self {
let (request_tx, request_rx) = channel::<LoadRequest>();
let (result_tx, result_rx) = channel::<LoadResult>();
let thread = thread::spawn(move || {
while let Ok(req) = request_rx.recv() {
let result = match std::fs::read(&req.path) {
Ok(data) => (req.parse)(&data),
Err(e) => Err(format!("Failed to read {}: {}", req.path.display(), e)),
};
let _ = result_tx.send(LoadResult {
id: req.id,
result,
});
}
});
Self {
request_tx: Some(request_tx),
result_rx,
thread: Some(thread),
next_id: 0,
pending: HashMap::new(),
handle_to_load: HashMap::new(),
}
}
/// Queue a file for background loading. Returns a handle immediately.
///
/// The handle becomes valid (pointing to real data) after `process_loaded`
/// inserts the completed asset into Assets. Until then, `state()` returns
/// `LoadState::Loading`.
///
/// **Important:** The returned handle's id/generation are provisional.
/// After `process_loaded`, the handle is updated internally to match the
/// actual slot in Assets. Since we pre-allocate using the load id, the
/// actual handle assigned by `Assets::insert` may differ. We remap it.
pub fn load<T, F>(&mut self, path: PathBuf, parse_fn: F) -> Handle<T>
where
T: Send + 'static,
F: FnOnce(&[u8]) -> Result<T, String> + Send + 'static,
{
let id = self.next_id;
self.next_id += 1;
// We use the load id as a provisional handle id.
// The real handle is assigned when the asset is inserted into Assets.
let handle_id = id as u32;
let handle_gen = 0u32;
let handle = Handle::new(handle_id, handle_gen);
let type_id = TypeId::of::<T>();
// Create a type-erased inserter closure that knows how to downcast
// Box<dyn Any + Send> back to T and insert it into Assets.
let inserter: Box<dyn FnOnce(&mut Assets, Box<dyn Any + Send>) -> (u32, u32)> =
Box::new(|assets: &mut Assets, boxed: Box<dyn Any + Send>| {
let asset = *boxed.downcast::<T>().expect("type mismatch in loader");
let real_handle = assets.insert(asset);
(real_handle.id, real_handle.generation)
});
self.pending.insert(
id,
PendingEntry {
state: PendingState::Loading,
handle_id,
handle_gen,
type_id,
inserter: Some(inserter),
},
);
self.handle_to_load
.insert((type_id, handle_id, handle_gen), id);
// Wrap parse_fn to erase the type
let boxed_parse: Box<
dyn FnOnce(&[u8]) -> Result<Box<dyn Any + Send>, String> + Send,
> = Box::new(move |data: &[u8]| {
parse_fn(data).map(|v| Box::new(v) as Box<dyn Any + Send>)
});
if let Some(tx) = &self.request_tx {
let _ = tx.send(LoadRequest {
id,
path,
parse: boxed_parse,
});
}
handle
}
/// Check the load state for a given handle.
pub fn state<T: 'static>(&self, handle: &Handle<T>) -> LoadState {
let type_id = TypeId::of::<T>();
if let Some(&load_id) =
self.handle_to_load
.get(&(type_id, handle.id, handle.generation))
{
if let Some(entry) = self.pending.get(&load_id) {
return match &entry.state {
PendingState::Loading => LoadState::Loading,
PendingState::Failed(e) => LoadState::Failed(e.clone()),
PendingState::Ready => LoadState::Ready,
};
}
}
LoadState::Loading
}
/// Drain completed loads from the worker thread and insert them into Assets.
///
/// Call this once per frame on the main thread.
pub fn process_loaded(&mut self, assets: &mut Assets) {
// Collect results first
let mut results = Vec::new();
while let Ok(result) = self.result_rx.try_recv() {
results.push(result);
}
for result in results {
if let Some(entry) = self.pending.get_mut(&result.id) {
match result.result {
Ok(boxed_asset) => {
// Take the inserter out and use it to insert into Assets
if let Some(inserter) = entry.inserter.take() {
let (real_id, real_gen) = inserter(assets, boxed_asset);
// Update the handle mapping if the real handle differs
let old_key =
(entry.type_id, entry.handle_id, entry.handle_gen);
if real_id != entry.handle_id || real_gen != entry.handle_gen {
// Remove old mapping, add new one
self.handle_to_load.remove(&old_key);
entry.handle_id = real_id;
entry.handle_gen = real_gen;
self.handle_to_load
.insert((entry.type_id, real_id, real_gen), result.id);
}
}
entry.state = PendingState::Ready;
}
Err(e) => {
entry.state = PendingState::Failed(e);
}
}
}
}
}
pub fn shutdown(mut self) {
// Drop the sender to signal the worker to stop
self.request_tx = None;
if let Some(thread) = self.thread.take() {
let _ = thread.join();
}
}
}
impl Default for AssetLoader {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs;
use std::time::Duration;
#[test]
fn test_load_state_initial() {
let mut loader = AssetLoader::new();
let dir = std::env::temp_dir().join("voltex_loader_test_1");
let _ = fs::create_dir_all(&dir);
let path = dir.join("test.txt");
fs::write(&path, "hello world").unwrap();
let handle: Handle<String> = loader.load(path.clone(), |data| {
Ok(String::from_utf8_lossy(data).to_string())
});
assert!(matches!(
loader.state::<String>(&handle),
LoadState::Loading
));
let _ = fs::remove_dir_all(&dir);
loader.shutdown();
}
#[test]
fn test_load_and_process() {
let mut loader = AssetLoader::new();
let dir = std::env::temp_dir().join("voltex_loader_test_2");
let _ = fs::create_dir_all(&dir);
let path = dir.join("data.txt");
fs::write(&path, "content123").unwrap();
let handle: Handle<String> = loader.load(path.clone(), |data| {
Ok(String::from_utf8_lossy(data).to_string())
});
std::thread::sleep(Duration::from_millis(200));
let mut assets = Assets::new();
loader.process_loaded(&mut assets);
assert!(matches!(
loader.state::<String>(&handle),
LoadState::Ready
));
// The handle returned by load() is provisional. After process_loaded,
// the real handle may have different id/gen. We need to look up
// the actual handle. Since this is the first insert, it should be (0, 0).
// But our provisional handle is also (0, 0), so it should match.
let val = assets.get(handle).unwrap();
assert_eq!(val, "content123");
let _ = fs::remove_dir_all(&dir);
loader.shutdown();
}
#[test]
fn test_load_nonexistent_fails() {
let mut loader = AssetLoader::new();
let handle: Handle<String> = loader.load(
PathBuf::from("/nonexistent/file.txt"),
|data| Ok(String::from_utf8_lossy(data).to_string()),
);
std::thread::sleep(Duration::from_millis(200));
let mut assets = Assets::new();
loader.process_loaded(&mut assets);
assert!(matches!(
loader.state::<String>(&handle),
LoadState::Failed(_)
));
loader.shutdown();
}
}

View File

@@ -103,6 +103,18 @@ impl<T> AssetStorage<T> {
.unwrap_or(0)
}
/// Replace the asset data without changing generation or ref_count.
/// Used for hot reload — existing handles remain valid.
pub fn replace_in_place(&mut self, handle: Handle<T>, new_asset: T) -> bool {
if let Some(Some(entry)) = self.entries.get_mut(handle.id as usize) {
if entry.generation == handle.generation {
entry.asset = new_asset;
return true;
}
}
false
}
pub fn iter(&self) -> impl Iterator<Item = (Handle<T>, &T)> {
self.entries
.iter()
@@ -211,6 +223,27 @@ mod tests {
assert_eq!(storage.get(h2).unwrap().verts, 9);
}
#[test]
fn replace_in_place() {
let mut storage: AssetStorage<Mesh> = AssetStorage::new();
let h = storage.insert(Mesh { verts: 3 });
assert!(storage.replace_in_place(h, Mesh { verts: 99 }));
assert_eq!(storage.get(h).unwrap().verts, 99);
// Same handle still works — generation unchanged
assert_eq!(storage.ref_count(h), 1);
}
#[test]
fn replace_in_place_stale_handle() {
let mut storage: AssetStorage<Mesh> = AssetStorage::new();
let h = storage.insert(Mesh { verts: 3 });
storage.release(h);
let h2 = storage.insert(Mesh { verts: 10 });
// h is stale, replace should fail
assert!(!storage.replace_in_place(h, Mesh { verts: 99 }));
assert_eq!(storage.get(h2).unwrap().verts, 10);
}
#[test]
fn iter() {
let mut storage: AssetStorage<Mesh> = AssetStorage::new();

View File

@@ -0,0 +1,114 @@
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::time::{Duration, Instant, SystemTime};
pub struct FileWatcher {
watched: HashMap<PathBuf, Option<SystemTime>>,
poll_interval: Duration,
last_poll: Instant,
}
impl FileWatcher {
pub fn new(poll_interval: Duration) -> Self {
Self {
watched: HashMap::new(),
poll_interval,
last_poll: Instant::now() - poll_interval, // allow immediate first poll
}
}
pub fn watch(&mut self, path: PathBuf) {
// Store None initially — first poll will record the mtime without reporting change
self.watched.insert(path, None);
}
pub fn unwatch(&mut self, path: &Path) {
self.watched.remove(path);
}
pub fn poll_changes(&mut self) -> Vec<PathBuf> {
let now = Instant::now();
if now.duration_since(self.last_poll) < self.poll_interval {
return Vec::new();
}
self.last_poll = now;
let mut changed = Vec::new();
for (path, last_mtime) in &mut self.watched {
let current = std::fs::metadata(path)
.ok()
.and_then(|m| m.modified().ok());
if let Some(prev) = last_mtime {
// We have a previous mtime — compare
if current != Some(*prev) {
changed.push(path.clone());
}
}
// else: first poll, just record mtime, don't report
*last_mtime = current;
}
changed
}
pub fn watched_count(&self) -> usize {
self.watched.len()
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs;
#[test]
fn test_watch_and_poll_no_changes() {
let mut watcher = FileWatcher::new(Duration::from_millis(0));
let dir = std::env::temp_dir().join("voltex_watcher_test_1");
let _ = fs::create_dir_all(&dir);
let path = dir.join("test.txt");
fs::write(&path, "hello").unwrap();
watcher.watch(path.clone());
// First poll — should not report as changed (just registered)
let changes = watcher.poll_changes();
assert!(changes.is_empty());
// Second poll without modification — still no changes
let changes = watcher.poll_changes();
assert!(changes.is_empty());
let _ = fs::remove_dir_all(&dir);
}
#[test]
fn test_detect_file_change() {
let dir = std::env::temp_dir().join("voltex_watcher_test_2");
let _ = fs::create_dir_all(&dir);
let path = dir.join("test2.txt");
fs::write(&path, "v1").unwrap();
let mut watcher = FileWatcher::new(Duration::from_millis(0));
watcher.watch(path.clone());
let _ = watcher.poll_changes(); // register initial mtime
// Modify file
std::thread::sleep(Duration::from_millis(50));
fs::write(&path, "v2 with more data").unwrap();
let changes = watcher.poll_changes();
assert!(changes.contains(&path));
let _ = fs::remove_dir_all(&dir);
}
#[test]
fn test_unwatch() {
let mut watcher = FileWatcher::new(Duration::from_millis(0));
let path = PathBuf::from("/nonexistent/test.txt");
watcher.watch(path.clone());
assert_eq!(watcher.watched_count(), 1);
watcher.unwatch(&path);
assert_eq!(watcher.watched_count(), 0);
assert!(watcher.poll_changes().is_empty());
}
}

View File

@@ -0,0 +1,7 @@
[package]
name = "voltex_audio"
version = "0.1.0"
edition = "2021"
[dependencies]
voltex_math.workspace = true

View File

@@ -0,0 +1,91 @@
use std::sync::{Arc, Mutex};
use std::path::PathBuf;
#[derive(Debug, Clone, PartialEq)]
pub enum LoadState {
Pending,
Loading,
Loaded,
Error(String),
}
pub struct AsyncAudioLoader {
pending: Arc<Mutex<Vec<(u32, PathBuf, LoadState)>>>,
}
impl AsyncAudioLoader {
pub fn new() -> Self {
AsyncAudioLoader { pending: Arc::new(Mutex::new(Vec::new())) }
}
/// Queue a clip for async loading. Returns immediately.
pub fn load(&self, clip_id: u32, path: PathBuf) {
let mut pending = self.pending.lock().unwrap();
pending.push((clip_id, path.clone(), LoadState::Pending));
let pending_clone = Arc::clone(&self.pending);
std::thread::spawn(move || {
// Mark loading
{ let mut p = pending_clone.lock().unwrap();
if let Some(entry) = p.iter_mut().find(|(id, _, _)| *id == clip_id) {
entry.2 = LoadState::Loading;
}
}
// Simulate load (read file)
match std::fs::read(&path) {
Ok(_data) => {
let mut p = pending_clone.lock().unwrap();
if let Some(entry) = p.iter_mut().find(|(id, _, _)| *id == clip_id) {
entry.2 = LoadState::Loaded;
}
}
Err(e) => {
let mut p = pending_clone.lock().unwrap();
if let Some(entry) = p.iter_mut().find(|(id, _, _)| *id == clip_id) {
entry.2 = LoadState::Error(e.to_string());
}
}
}
});
}
pub fn state(&self, clip_id: u32) -> LoadState {
let pending = self.pending.lock().unwrap();
pending.iter().find(|(id, _, _)| *id == clip_id)
.map(|(_, _, s)| s.clone())
.unwrap_or(LoadState::Error("not found".to_string()))
}
pub fn poll_completed(&self) -> Vec<u32> {
let pending = self.pending.lock().unwrap();
pending.iter().filter(|(_, _, s)| *s == LoadState::Loaded).map(|(id, _, _)| *id).collect()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_new() {
let loader = AsyncAudioLoader::new();
assert!(loader.poll_completed().is_empty());
}
#[test]
fn test_load_nonexistent() {
let loader = AsyncAudioLoader::new();
loader.load(1, PathBuf::from("/nonexistent/path.wav"));
std::thread::sleep(std::time::Duration::from_millis(100));
let state = loader.state(1);
assert!(matches!(state, LoadState::Error(_)));
}
#[test]
fn test_load_existing() {
let dir = std::env::temp_dir().join("voltex_async_test");
let _ = std::fs::create_dir_all(&dir);
std::fs::write(dir.join("test.wav"), b"RIFF").unwrap();
let loader = AsyncAudioLoader::new();
loader.load(42, dir.join("test.wav"));
std::thread::sleep(std::time::Duration::from_millis(200));
assert_eq!(loader.state(42), LoadState::Loaded);
let _ = std::fs::remove_dir_all(&dir);
}
}

View File

@@ -0,0 +1,82 @@
/// Audio bus: mixes multiple input signals.
pub struct AudioBus {
pub inputs: Vec<BusInput>,
pub output_gain: f32,
}
#[derive(Debug, Clone)]
pub struct BusInput {
pub source_id: u32,
pub gain: f32,
}
impl AudioBus {
pub fn new() -> Self {
AudioBus { inputs: Vec::new(), output_gain: 1.0 }
}
pub fn add_input(&mut self, source_id: u32, gain: f32) {
self.inputs.push(BusInput { source_id, gain });
}
pub fn remove_input(&mut self, source_id: u32) {
self.inputs.retain(|i| i.source_id != source_id);
}
/// Mix samples from all inputs. Each input provides a sample value.
pub fn mix(&self, samples: &[(u32, f32)]) -> f32 {
let mut sum = 0.0;
for input in &self.inputs {
if let Some((_, sample)) = samples.iter().find(|(id, _)| *id == input.source_id) {
sum += sample * input.gain;
}
}
sum * self.output_gain
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_empty_bus() {
let bus = AudioBus::new();
assert!((bus.mix(&[]) - 0.0).abs() < 1e-6);
}
#[test]
fn test_single_input() {
let mut bus = AudioBus::new();
bus.add_input(1, 0.5);
let out = bus.mix(&[(1, 1.0)]);
assert!((out - 0.5).abs() < 1e-6);
}
#[test]
fn test_multiple_inputs() {
let mut bus = AudioBus::new();
bus.add_input(1, 1.0);
bus.add_input(2, 1.0);
let out = bus.mix(&[(1, 0.3), (2, 0.5)]);
assert!((out - 0.8).abs() < 1e-6);
}
#[test]
fn test_remove_input() {
let mut bus = AudioBus::new();
bus.add_input(1, 1.0);
bus.add_input(2, 1.0);
bus.remove_input(1);
assert_eq!(bus.inputs.len(), 1);
}
#[test]
fn test_output_gain() {
let mut bus = AudioBus::new();
bus.output_gain = 0.5;
bus.add_input(1, 1.0);
let out = bus.mix(&[(1, 1.0)]);
assert!((out - 0.5).abs() < 1e-6);
}
}

View File

@@ -0,0 +1,55 @@
/// A loaded audio clip stored as normalized f32 PCM samples.
#[derive(Debug)]
pub struct AudioClip {
/// Interleaved PCM samples in the range [-1.0, 1.0].
pub samples: Vec<f32>,
/// Number of samples per second (e.g. 44100).
pub sample_rate: u32,
/// Number of audio channels (1 = mono, 2 = stereo).
pub channels: u16,
}
impl AudioClip {
/// Create a new AudioClip from raw f32 samples.
pub fn new(samples: Vec<f32>, sample_rate: u32, channels: u16) -> Self {
Self {
samples,
sample_rate,
channels,
}
}
/// Total number of multi-channel frames.
/// A frame contains one sample per channel.
pub fn frame_count(&self) -> usize {
self.samples.len() / self.channels as usize
}
/// Duration in seconds.
pub fn duration(&self) -> f32 {
self.frame_count() as f32 / self.sample_rate as f32
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn mono_clip() {
// 44100 Hz, 1 channel, 1 second of silence
let samples = vec![0.0f32; 44100];
let clip = AudioClip::new(samples, 44100, 1);
assert_eq!(clip.frame_count(), 44100);
assert!((clip.duration() - 1.0).abs() < 1e-6);
}
#[test]
fn stereo_clip() {
// 44100 Hz, 2 channels, 0.5 seconds
let samples = vec![0.0f32; 44100]; // 44100 interleaved samples = 22050 frames
let clip = AudioClip::new(samples, 44100, 2);
assert_eq!(clip.frame_count(), 22050);
assert!((clip.duration() - 0.5).abs() < 1e-6);
}
}

View File

@@ -0,0 +1,78 @@
/// ECS component that attaches audio playback to an entity.
#[derive(Debug, Clone)]
pub struct AudioSource {
pub clip_id: u32,
pub volume: f32,
pub spatial: bool,
pub looping: bool,
pub playing: bool,
pub played_once: bool,
}
impl AudioSource {
pub fn new(clip_id: u32) -> Self {
AudioSource {
clip_id,
volume: 1.0,
spatial: false,
looping: false,
playing: false,
played_once: false,
}
}
pub fn spatial(mut self) -> Self { self.spatial = true; self }
pub fn looping(mut self) -> Self { self.looping = true; self }
pub fn play(&mut self) {
self.playing = true;
self.played_once = false;
}
pub fn stop(&mut self) {
self.playing = false;
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_new_defaults() {
let src = AudioSource::new(42);
assert_eq!(src.clip_id, 42);
assert!((src.volume - 1.0).abs() < 1e-6);
assert!(!src.spatial);
assert!(!src.looping);
assert!(!src.playing);
assert!(!src.played_once);
}
#[test]
fn test_builder_pattern() {
let src = AudioSource::new(1).spatial().looping();
assert!(src.spatial);
assert!(src.looping);
}
#[test]
fn test_play_stop() {
let mut src = AudioSource::new(1);
src.play();
assert!(src.playing);
assert!(!src.played_once);
src.played_once = true;
src.play(); // re-play should reset played_once
assert!(!src.played_once);
src.stop();
assert!(!src.playing);
}
#[test]
fn test_volume() {
let mut src = AudioSource::new(1);
src.volume = 0.5;
assert!((src.volume - 0.5).abs() < 1e-6);
}
}

View File

@@ -0,0 +1,303 @@
//! AudioSystem: high-level audio playback management with a dedicated audio thread.
use std::sync::{Arc, mpsc};
use std::thread;
use crate::{AudioClip, PlayingSound, mix_sounds};
use crate::spatial::{Listener, SpatialParams};
use crate::mix_group::{MixGroup, MixerState};
// ---------------------------------------------------------------------------
// AudioCommand
// ---------------------------------------------------------------------------
/// Commands sent to the audio thread.
pub enum AudioCommand {
/// Start playing a clip.
Play {
clip_index: usize,
volume: f32,
looping: bool,
},
/// Start playing a clip with 3D spatial parameters.
Play3d {
clip_index: usize,
volume: f32,
looping: bool,
spatial: SpatialParams,
},
/// Update the listener position and orientation.
SetListener {
position: voltex_math::Vec3,
forward: voltex_math::Vec3,
right: voltex_math::Vec3,
},
/// Stop all instances of a clip.
Stop { clip_index: usize },
/// Change volume of all playing instances of a clip.
SetVolume { clip_index: usize, volume: f32 },
/// Stop all currently playing sounds.
StopAll,
/// Set volume for a mix group immediately.
SetGroupVolume { group: MixGroup, volume: f32 },
/// Fade a mix group to a target volume over a duration (seconds).
FadeGroup { group: MixGroup, target: f32, duration: f32 },
/// Shut down the audio thread.
Shutdown,
}
// ---------------------------------------------------------------------------
// AudioSystem
// ---------------------------------------------------------------------------
/// Manages audio playback via a dedicated OS-level thread.
pub struct AudioSystem {
sender: mpsc::Sender<AudioCommand>,
}
impl AudioSystem {
/// Create an AudioSystem with the given pre-loaded clips.
///
/// Spawns a background audio thread that owns the clips and drives the
/// WASAPI device (on Windows) or runs in silent mode (other platforms).
pub fn new(clips: Vec<AudioClip>) -> Result<Self, String> {
let clips = Arc::new(clips);
let (tx, rx) = mpsc::channel::<AudioCommand>();
let clips_arc = Arc::clone(&clips);
thread::Builder::new()
.name("voltex_audio_thread".to_string())
.spawn(move || {
audio_thread(rx, clips_arc);
})
.map_err(|e| format!("Failed to spawn audio thread: {}", e))?;
Ok(AudioSystem { sender: tx })
}
/// Start playing a clip at the given volume and looping setting.
pub fn play(&self, clip_index: usize, volume: f32, looping: bool) {
let _ = self.sender.send(AudioCommand::Play {
clip_index,
volume,
looping,
});
}
/// Start playing a clip with 3D spatial audio parameters.
pub fn play_3d(&self, clip_index: usize, volume: f32, looping: bool, spatial: SpatialParams) {
let _ = self.sender.send(AudioCommand::Play3d { clip_index, volume, looping, spatial });
}
/// Update the listener position and orientation for 3D audio.
pub fn set_listener(&self, position: voltex_math::Vec3, forward: voltex_math::Vec3, right: voltex_math::Vec3) {
let _ = self.sender.send(AudioCommand::SetListener { position, forward, right });
}
/// Stop all instances of the specified clip.
pub fn stop(&self, clip_index: usize) {
let _ = self.sender.send(AudioCommand::Stop { clip_index });
}
/// Set the volume for all playing instances of a clip.
pub fn set_volume(&self, clip_index: usize, volume: f32) {
let _ = self.sender.send(AudioCommand::SetVolume { clip_index, volume });
}
/// Stop all currently playing sounds.
pub fn stop_all(&self) {
let _ = self.sender.send(AudioCommand::StopAll);
}
/// Set volume for a mix group immediately.
pub fn set_group_volume(&self, group: MixGroup, volume: f32) {
let _ = self.sender.send(AudioCommand::SetGroupVolume { group, volume });
}
/// Fade a mix group to a target volume over `duration` seconds.
pub fn fade_group(&self, group: MixGroup, target: f32, duration: f32) {
let _ = self.sender.send(AudioCommand::FadeGroup { group, target, duration });
}
}
impl Drop for AudioSystem {
fn drop(&mut self) {
let _ = self.sender.send(AudioCommand::Shutdown);
}
}
// ---------------------------------------------------------------------------
// Audio thread implementation
// ---------------------------------------------------------------------------
/// Main audio thread function.
///
/// On Windows, initializes WasapiDevice and drives playback.
/// On other platforms, runs in a silent "null" mode (processes commands only).
fn audio_thread(rx: mpsc::Receiver<AudioCommand>, clips: Arc<Vec<AudioClip>>) {
#[cfg(target_os = "windows")]
audio_thread_windows(rx, clips);
#[cfg(not(target_os = "windows"))]
audio_thread_null(rx, clips);
}
// ---------------------------------------------------------------------------
// Windows implementation
// ---------------------------------------------------------------------------
#[cfg(target_os = "windows")]
fn audio_thread_windows(rx: mpsc::Receiver<AudioCommand>, clips: Arc<Vec<AudioClip>>) {
use crate::wasapi::WasapiDevice;
let device = match WasapiDevice::new() {
Ok(d) => d,
Err(e) => {
eprintln!("[voltex_audio] WASAPI init failed: {}. Running in silent mode.", e);
audio_thread_null(rx, clips);
return;
}
};
let device_sample_rate = device.sample_rate;
let device_channels = device.channels;
let buffer_frames = device.buffer_frames() as usize;
let mut playing: Vec<PlayingSound> = Vec::new();
let mut output: Vec<f32> = Vec::new();
let mut listener = Listener::default();
let mut mixer = MixerState::new();
loop {
// Advance mixer fades (~5 ms per iteration)
mixer.tick(0.005);
// Process all pending commands (non-blocking)
loop {
match rx.try_recv() {
Ok(cmd) => {
match cmd {
AudioCommand::Play { clip_index, volume, looping } => {
playing.push(PlayingSound::new(clip_index, volume, looping));
}
AudioCommand::Play3d { clip_index, volume, looping, spatial } => {
playing.push(PlayingSound::new_3d(clip_index, volume, looping, spatial));
}
AudioCommand::SetListener { position, forward, right } => {
listener = Listener { position, forward, right };
}
AudioCommand::Stop { clip_index } => {
playing.retain(|s| s.clip_index != clip_index);
}
AudioCommand::SetVolume { clip_index, volume } => {
for s in playing.iter_mut() {
if s.clip_index == clip_index {
s.volume = volume;
}
}
}
AudioCommand::StopAll => {
playing.clear();
}
AudioCommand::SetGroupVolume { group, volume } => {
mixer.set_volume(group, volume);
}
AudioCommand::FadeGroup { group, target, duration } => {
mixer.fade(group, target, duration);
}
AudioCommand::Shutdown => {
return;
}
}
}
Err(mpsc::TryRecvError::Empty) => break,
Err(mpsc::TryRecvError::Disconnected) => return,
}
}
// Mix and write audio
if !playing.is_empty() || !output.is_empty() {
mix_sounds(
&mut output,
&mut playing,
&clips,
device_sample_rate,
device_channels,
buffer_frames,
&listener,
&mixer,
);
if let Err(e) = device.write_samples(&output) {
eprintln!("[voltex_audio] write_samples error: {}", e);
}
} else {
// Write silence to keep the device happy
let silence_len = buffer_frames * device_channels as usize;
let silence = vec![0.0f32; silence_len];
let _ = device.write_samples(&silence);
}
// Sleep ~5 ms between iterations
thread::sleep(std::time::Duration::from_millis(5));
}
}
// ---------------------------------------------------------------------------
// Null (non-Windows) implementation
// ---------------------------------------------------------------------------
#[cfg(not(target_os = "windows"))]
fn audio_thread_null(rx: mpsc::Receiver<AudioCommand>, clips: Arc<Vec<AudioClip>>) {
audio_thread_null_impl(rx, clips);
}
#[cfg(target_os = "windows")]
fn audio_thread_null(rx: mpsc::Receiver<AudioCommand>, _clips: Arc<Vec<AudioClip>>) {
audio_thread_null_impl(rx, _clips);
}
fn audio_thread_null_impl(rx: mpsc::Receiver<AudioCommand>, _clips: Arc<Vec<AudioClip>>) {
loop {
match rx.recv() {
Ok(AudioCommand::Shutdown) | Err(_) => return,
Ok(_) => {}
}
}
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use crate::AudioClip;
fn silence_clip() -> AudioClip {
AudioClip::new(vec![0.0f32; 44100], 44100, 1)
}
#[test]
fn create_and_drop() {
// AudioSystem should be created and dropped without panicking.
// The audio thread is spawned but will run in null mode in CI.
let _sys = AudioSystem::new(vec![silence_clip()])
.expect("AudioSystem::new failed");
// Drop happens here, Shutdown command is sent automatically.
}
#[test]
fn send_commands() {
let sys = AudioSystem::new(vec![silence_clip(), silence_clip()])
.expect("AudioSystem::new failed");
sys.play(0, 1.0, false);
sys.play(1, 0.5, true);
sys.set_volume(0, 0.8);
sys.stop(0);
sys.stop_all();
// All sends must succeed without panic.
}
}

View File

@@ -0,0 +1,101 @@
use std::collections::HashMap;
/// Dynamic audio mix group system.
pub struct MixGroupManager {
groups: HashMap<String, MixGroupConfig>,
}
#[derive(Debug, Clone)]
pub struct MixGroupConfig {
pub name: String,
pub volume: f32,
pub muted: bool,
pub parent: Option<String>,
}
impl MixGroupManager {
pub fn new() -> Self {
let mut mgr = MixGroupManager { groups: HashMap::new() };
mgr.add_group("Master", None);
mgr
}
pub fn add_group(&mut self, name: &str, parent: Option<&str>) -> bool {
if self.groups.contains_key(name) { return false; }
if let Some(p) = parent {
if !self.groups.contains_key(p) { return false; }
}
self.groups.insert(name.to_string(), MixGroupConfig {
name: name.to_string(),
volume: 1.0,
muted: false,
parent: parent.map(|s| s.to_string()),
});
true
}
pub fn remove_group(&mut self, name: &str) -> bool {
if name == "Master" { return false; } // can't remove master
self.groups.remove(name).is_some()
}
pub fn set_volume(&mut self, name: &str, volume: f32) {
if let Some(g) = self.groups.get_mut(name) { g.volume = volume.clamp(0.0, 1.0); }
}
pub fn effective_volume(&self, name: &str) -> f32 {
let mut vol = 1.0;
let mut current = name;
for _ in 0..10 { // max depth to prevent infinite loops
if let Some(g) = self.groups.get(current) {
if g.muted { return 0.0; }
vol *= g.volume;
if let Some(ref p) = g.parent { current = p; } else { break; }
} else { break; }
}
vol
}
pub fn group_count(&self) -> usize { self.groups.len() }
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_default_has_master() {
let mgr = MixGroupManager::new();
assert_eq!(mgr.group_count(), 1);
assert!((mgr.effective_volume("Master") - 1.0).abs() < 1e-6);
}
#[test]
fn test_add_group() {
let mut mgr = MixGroupManager::new();
assert!(mgr.add_group("SFX", Some("Master")));
assert_eq!(mgr.group_count(), 2);
}
#[test]
fn test_effective_volume_chain() {
let mut mgr = MixGroupManager::new();
mgr.set_volume("Master", 0.8);
mgr.add_group("SFX", Some("Master"));
mgr.set_volume("SFX", 0.5);
assert!((mgr.effective_volume("SFX") - 0.4).abs() < 1e-6); // 0.5 * 0.8
}
#[test]
fn test_cant_remove_master() {
let mut mgr = MixGroupManager::new();
assert!(!mgr.remove_group("Master"));
}
#[test]
fn test_add_duplicate_fails() {
let mut mgr = MixGroupManager::new();
mgr.add_group("SFX", Some("Master"));
assert!(!mgr.add_group("SFX", Some("Master")));
}
}

View File

@@ -0,0 +1,62 @@
/// Trait for audio effects.
pub trait AudioEffect {
fn process(&mut self, sample: f32) -> f32;
fn name(&self) -> &str;
}
/// Chain of audio effects processed in order.
pub struct EffectChain {
effects: Vec<Box<dyn AudioEffect>>,
pub bypass: bool,
}
impl EffectChain {
pub fn new() -> Self { EffectChain { effects: Vec::new(), bypass: false } }
pub fn add(&mut self, effect: Box<dyn AudioEffect>) { self.effects.push(effect); }
pub fn remove(&mut self, index: usize) { if index < self.effects.len() { self.effects.remove(index); } }
pub fn len(&self) -> usize { self.effects.len() }
pub fn is_empty(&self) -> bool { self.effects.is_empty() }
pub fn process(&mut self, sample: f32) -> f32 {
if self.bypass { return sample; }
let mut s = sample;
for effect in &mut self.effects {
s = effect.process(s);
}
s
}
}
// Simple gain effect for testing
pub struct GainEffect { pub gain: f32 }
impl AudioEffect for GainEffect {
fn process(&mut self, sample: f32) -> f32 { sample * self.gain }
fn name(&self) -> &str { "Gain" }
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_empty_chain() { let mut c = EffectChain::new(); assert!((c.process(1.0) - 1.0).abs() < 1e-6); }
#[test]
fn test_single_effect() {
let mut c = EffectChain::new();
c.add(Box::new(GainEffect { gain: 0.5 }));
assert!((c.process(1.0) - 0.5).abs() < 1e-6);
}
#[test]
fn test_chain_order() {
let mut c = EffectChain::new();
c.add(Box::new(GainEffect { gain: 0.5 }));
c.add(Box::new(GainEffect { gain: 0.5 }));
assert!((c.process(1.0) - 0.25).abs() < 1e-6);
}
#[test]
fn test_bypass() {
let mut c = EffectChain::new();
c.add(Box::new(GainEffect { gain: 0.0 }));
c.bypass = true;
assert!((c.process(1.0) - 1.0).abs() < 1e-6);
}
}

View File

@@ -0,0 +1,58 @@
/// Fade curve types.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum FadeCurve {
Linear,
Exponential,
Logarithmic,
SCurve,
}
/// Apply fade curve to a normalized parameter t (0.0 to 1.0).
pub fn apply_fade(t: f32, curve: FadeCurve) -> f32 {
let t = t.clamp(0.0, 1.0);
match curve {
FadeCurve::Linear => t,
FadeCurve::Exponential => t * t,
FadeCurve::Logarithmic => t.sqrt(),
FadeCurve::SCurve => {
// Smoothstep: 3t^2 - 2t^3
t * t * (3.0 - 2.0 * t)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_linear() {
assert!((apply_fade(0.5, FadeCurve::Linear) - 0.5).abs() < 1e-6);
}
#[test]
fn test_exponential() {
assert!((apply_fade(0.5, FadeCurve::Exponential) - 0.25).abs() < 1e-6);
assert!(apply_fade(0.5, FadeCurve::Exponential) < 0.5); // slower start
}
#[test]
fn test_logarithmic() {
assert!(apply_fade(0.5, FadeCurve::Logarithmic) > 0.5); // faster start
}
#[test]
fn test_scurve() {
assert!((apply_fade(0.0, FadeCurve::SCurve) - 0.0).abs() < 1e-6);
assert!((apply_fade(1.0, FadeCurve::SCurve) - 1.0).abs() < 1e-6);
assert!((apply_fade(0.5, FadeCurve::SCurve) - 0.5).abs() < 1e-6); // midpoint same
}
#[test]
fn test_endpoints() {
for curve in [FadeCurve::Linear, FadeCurve::Exponential, FadeCurve::Logarithmic, FadeCurve::SCurve] {
assert!((apply_fade(0.0, curve) - 0.0).abs() < 1e-6);
assert!((apply_fade(1.0, curve) - 1.0).abs() < 1e-6);
}
}
}

View File

@@ -0,0 +1,126 @@
use std::f32::consts::PI;
/// Head radius in meters (average human).
const HEAD_RADIUS: f32 = 0.0875;
/// Speed of sound in m/s.
const SPEED_OF_SOUND: f32 = 343.0;
/// HRTF filter result for a sound at a given azimuth angle.
#[derive(Debug, Clone, Copy)]
pub struct HrtfResult {
pub left_delay_samples: f32, // ITD: delay for left ear in samples
pub right_delay_samples: f32, // ITD: delay for right ear in samples
pub left_gain: f32, // ILD: gain for left ear (0.0-1.0)
pub right_gain: f32, // ILD: gain for right ear (0.0-1.0)
}
/// Calculate HRTF parameters from azimuth angle.
/// azimuth: angle in radians, 0 = front, PI/2 = right, -PI/2 = left, PI = behind.
pub fn calculate_hrtf(azimuth: f32, sample_rate: u32) -> HrtfResult {
// ITD: Woodworth formula
// time_diff = (HEAD_RADIUS / SPEED_OF_SOUND) * (azimuth + sin(azimuth))
let itd = (HEAD_RADIUS / SPEED_OF_SOUND) * (azimuth.abs() + azimuth.abs().sin());
let delay_samples = itd * sample_rate as f32;
// ILD: simplified frequency-independent model
// Sound is louder on the side facing the source
let shadow = 0.5 * (1.0 + azimuth.cos()); // 1.0 at front, 0.5 at side, 0.0 at back
let (left_delay, right_delay, left_gain, right_gain);
if azimuth >= 0.0 {
// Sound from right side
left_delay = delay_samples;
right_delay = 0.0;
left_gain = (0.3 + 0.7 * shadow).min(1.0); // shadowed side
right_gain = 1.0;
} else {
// Sound from left side
left_delay = 0.0;
right_delay = delay_samples;
left_gain = 1.0;
right_gain = (0.3 + 0.7 * shadow).min(1.0);
}
HrtfResult {
left_delay_samples: left_delay,
right_delay_samples: right_delay,
left_gain,
right_gain,
}
}
/// Calculate azimuth angle from listener position/forward to sound position.
pub fn azimuth_from_positions(
listener_pos: [f32; 3],
listener_forward: [f32; 3],
listener_right: [f32; 3],
sound_pos: [f32; 3],
) -> f32 {
let dx = sound_pos[0] - listener_pos[0];
let dy = sound_pos[1] - listener_pos[1];
let dz = sound_pos[2] - listener_pos[2];
let len = (dx * dx + dy * dy + dz * dz).sqrt();
if len < 1e-6 {
return 0.0;
}
let dir = [dx / len, dy / len, dz / len];
// Dot with right vector gives sin(azimuth)
let right_dot =
dir[0] * listener_right[0] + dir[1] * listener_right[1] + dir[2] * listener_right[2];
// Dot with forward vector gives cos(azimuth)
let fwd_dot = dir[0] * listener_forward[0]
+ dir[1] * listener_forward[1]
+ dir[2] * listener_forward[2];
right_dot.atan2(fwd_dot)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_hrtf_front() {
let r = calculate_hrtf(0.0, 44100);
assert!((r.left_delay_samples - 0.0).abs() < 1.0);
assert!((r.right_delay_samples - 0.0).abs() < 1.0);
assert!((r.left_gain - r.right_gain).abs() < 0.01); // symmetric
}
#[test]
fn test_hrtf_right() {
let r = calculate_hrtf(PI / 2.0, 44100);
assert!(r.left_delay_samples > r.right_delay_samples);
assert!(r.left_gain < r.right_gain);
}
#[test]
fn test_hrtf_left() {
let r = calculate_hrtf(-PI / 2.0, 44100);
assert!(r.right_delay_samples > r.left_delay_samples);
assert!(r.right_gain < r.left_gain);
}
#[test]
fn test_azimuth_front() {
let az = azimuth_from_positions(
[0.0; 3],
[0.0, 0.0, -1.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -5.0],
);
assert!(az.abs() < 0.1); // roughly front
}
#[test]
fn test_azimuth_right() {
let az = azimuth_from_positions(
[0.0; 3],
[0.0, 0.0, -1.0],
[1.0, 0.0, 0.0],
[5.0, 0.0, 0.0],
);
assert!((az - PI / 2.0).abs() < 0.1);
}
}

View File

@@ -0,0 +1,32 @@
pub mod audio_clip;
pub mod wav;
pub mod mixing;
#[cfg(target_os = "windows")]
pub mod wasapi;
pub mod audio_system;
pub mod spatial;
pub mod hrtf;
pub mod mix_group;
pub mod audio_source;
pub mod reverb;
pub mod occlusion;
pub use audio_clip::AudioClip;
pub use audio_source::AudioSource;
pub use wav::{parse_wav, generate_wav_bytes};
pub use mixing::{PlayingSound, mix_sounds};
pub use audio_system::AudioSystem;
pub use spatial::{Listener, SpatialParams, distance_attenuation, stereo_pan, compute_spatial_gains};
pub use mix_group::{MixGroup, MixerState};
pub use reverb::{Reverb, Echo, DelayLine};
pub use occlusion::{OcclusionResult, LowPassFilter, calculate_occlusion};
pub mod fade_curves;
pub use fade_curves::{FadeCurve, apply_fade};
pub mod dynamic_groups;
pub use dynamic_groups::MixGroupManager;
pub mod audio_bus;
pub use audio_bus::AudioBus;
pub mod async_loader;
pub use async_loader::AsyncAudioLoader;
pub mod effect_chain;
pub use effect_chain::{AudioEffect, EffectChain};

View File

@@ -0,0 +1,214 @@
/// Mixer group identifiers.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum MixGroup {
Master = 0,
Bgm = 1,
Sfx = 2,
Voice = 3,
}
/// Per-group volume state with optional fade.
#[derive(Debug, Clone)]
pub struct GroupState {
pub volume: f32,
pub fade_target: f32,
pub fade_speed: f32,
}
impl GroupState {
pub fn new() -> Self {
Self {
volume: 1.0,
fade_target: 1.0,
fade_speed: 0.0,
}
}
/// Advance fade by `dt` seconds.
pub fn tick(&mut self, dt: f32) {
if self.fade_speed == 0.0 {
return;
}
let delta = self.fade_speed * dt;
if self.volume < self.fade_target {
self.volume = (self.volume + delta).min(self.fade_target);
} else {
self.volume = (self.volume - delta).max(self.fade_target);
}
if (self.volume - self.fade_target).abs() < f32::EPSILON {
self.volume = self.fade_target;
self.fade_speed = 0.0;
}
}
}
impl Default for GroupState {
fn default() -> Self {
Self::new()
}
}
/// Global mixer state holding one `GroupState` per `MixGroup`.
pub struct MixerState {
pub groups: [GroupState; 4],
}
impl MixerState {
pub fn new() -> Self {
Self {
groups: [
GroupState::new(),
GroupState::new(),
GroupState::new(),
GroupState::new(),
],
}
}
fn idx(group: MixGroup) -> usize {
group as usize
}
/// Immediately set volume for `group`, cancelling any active fade.
pub fn set_volume(&mut self, group: MixGroup, volume: f32) {
let v = volume.clamp(0.0, 1.0);
let g = &mut self.groups[Self::idx(group)];
g.volume = v;
g.fade_target = v;
g.fade_speed = 0.0;
}
/// Begin a fade from the current volume to `target` over `duration` seconds.
/// If `duration` <= 0, set volume instantly.
pub fn fade(&mut self, group: MixGroup, target: f32, duration: f32) {
let target = target.clamp(0.0, 1.0);
let g = &mut self.groups[Self::idx(group)];
if duration <= 0.0 {
g.volume = target;
g.fade_target = target;
g.fade_speed = 0.0;
} else {
let speed = (target - g.volume).abs() / duration;
g.fade_target = target;
g.fade_speed = speed;
}
}
/// Advance all groups by `dt` seconds.
pub fn tick(&mut self, dt: f32) {
for g in &mut self.groups {
g.tick(dt);
}
}
/// Current volume of `group`.
pub fn volume(&self, group: MixGroup) -> f32 {
self.groups[Self::idx(group)].volume
}
/// Effective volume: group volume multiplied by master volume.
/// For `Master`, returns its own volume only.
pub fn effective_volume(&self, group: MixGroup) -> f32 {
let master = self.groups[Self::idx(MixGroup::Master)].volume;
if group == MixGroup::Master {
master
} else {
self.groups[Self::idx(group)].volume * master
}
}
}
impl Default for MixerState {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn group_state_tick_fade() {
let mut g = GroupState::new();
g.volume = 1.0;
g.fade_target = 0.0;
g.fade_speed = 2.0;
g.tick(0.25); // 0.25 * 2.0 = 0.5 moved
assert!((g.volume - 0.5).abs() < 1e-5, "expected ~0.5, got {}", g.volume);
g.tick(0.25); // another 0.5 → reaches 0.0
assert!((g.volume - 0.0).abs() < 1e-5, "expected 0.0, got {}", g.volume);
assert_eq!(g.fade_speed, 0.0, "fade_speed should be 0 after reaching target");
}
#[test]
fn group_state_tick_no_overshoot() {
let mut g = GroupState::new();
g.volume = 1.0;
g.fade_target = 0.0;
g.fade_speed = 100.0; // very fast
g.tick(1.0);
assert_eq!(g.volume, 0.0, "should not overshoot below 0");
assert_eq!(g.fade_speed, 0.0);
}
#[test]
fn mixer_set_volume() {
let mut m = MixerState::new();
// Start a fade, then override with set_volume
m.fade(MixGroup::Sfx, 0.0, 5.0);
m.set_volume(MixGroup::Sfx, 0.3);
assert!((m.volume(MixGroup::Sfx) - 0.3).abs() < 1e-5);
assert_eq!(m.groups[MixGroup::Sfx as usize].fade_speed, 0.0, "fade cancelled");
}
#[test]
fn mixer_fade() {
let mut m = MixerState::new();
m.fade(MixGroup::Sfx, 0.0, 1.0); // 1→0 over 1s, speed=1.0
m.tick(0.5);
let v = m.volume(MixGroup::Sfx);
assert!((v - 0.5).abs() < 1e-5, "at 0.5s expected ~0.5, got {}", v);
m.tick(0.5);
let v = m.volume(MixGroup::Sfx);
assert!((v - 0.0).abs() < 1e-5, "at 1.0s expected 0.0, got {}", v);
}
#[test]
fn effective_volume() {
let mut m = MixerState::new();
m.set_volume(MixGroup::Master, 0.5);
m.set_volume(MixGroup::Sfx, 0.8);
let ev = m.effective_volume(MixGroup::Sfx);
assert!((ev - 0.4).abs() < 1e-5, "expected 0.4, got {}", ev);
}
#[test]
fn master_zero_mutes_all() {
let mut m = MixerState::new();
m.set_volume(MixGroup::Master, 0.0);
for group in [MixGroup::Bgm, MixGroup::Sfx, MixGroup::Voice] {
assert_eq!(m.effective_volume(group), 0.0);
}
}
#[test]
fn fade_up() {
let mut m = MixerState::new();
m.set_volume(MixGroup::Bgm, 0.0);
m.fade(MixGroup::Bgm, 1.0, 2.0); // 0→1 over 2s, speed=0.5
m.tick(1.0);
let v = m.volume(MixGroup::Bgm);
assert!((v - 0.5).abs() < 1e-5, "at 1s expected ~0.5, got {}", v);
m.tick(1.0);
let v = m.volume(MixGroup::Bgm);
assert!((v - 1.0).abs() < 1e-5, "at 2s expected 1.0, got {}", v);
}
}

View File

@@ -0,0 +1,373 @@
use crate::AudioClip;
use crate::spatial::{Listener, SpatialParams, compute_spatial_gains};
use crate::mix_group::{MixGroup, MixerState};
/// Represents a sound currently being played back.
pub struct PlayingSound {
/// Index into the clips slice passed to `mix_sounds`.
pub clip_index: usize,
/// Current playback position in frames (not samples).
pub position: usize,
/// Linear volume multiplier [0.0 = silent, 1.0 = full].
pub volume: f32,
/// Whether the sound loops when it reaches the end.
pub looping: bool,
/// Optional 3D spatial parameters. None = 2D (no spatialization).
pub spatial: Option<SpatialParams>,
/// Mix group this sound belongs to.
pub group: MixGroup,
}
impl PlayingSound {
pub fn new(clip_index: usize, volume: f32, looping: bool) -> Self {
Self {
clip_index,
position: 0,
volume,
looping,
spatial: None,
group: MixGroup::Sfx,
}
}
pub fn new_3d(clip_index: usize, volume: f32, looping: bool, spatial: SpatialParams) -> Self {
Self { clip_index, position: 0, volume, looping, spatial: Some(spatial), group: MixGroup::Sfx }
}
}
/// Mix all active sounds in `playing` into `output`.
///
/// # Parameters
/// - `output`: interleaved output buffer (len = frames * device_channels).
/// - `playing`: mutable list of active sounds; finished non-looping sounds are removed.
/// - `clips`: the audio clip assets.
/// - `device_sample_rate`: output device sample rate.
/// - `device_channels`: output device channel count (1 or 2).
/// - `frames`: number of output frames to fill.
/// - `listener`: the listener for 3D spatialization.
/// - `mixer`: the global mixer state providing per-group and master volumes.
pub fn mix_sounds(
output: &mut Vec<f32>,
playing: &mut Vec<PlayingSound>,
clips: &[AudioClip],
device_sample_rate: u32,
device_channels: u16,
frames: usize,
listener: &Listener,
mixer: &MixerState,
) {
// Ensure output buffer is sized correctly and zeroed.
let output_len = frames * device_channels as usize;
output.clear();
output.resize(output_len, 0.0);
let mut finished = Vec::new();
for (sound_idx, sound) in playing.iter_mut().enumerate() {
let clip = &clips[sound.clip_index];
// Apply group and master volume multiplier.
let group_vol = mixer.effective_volume(sound.group);
let base_volume = sound.volume * group_vol;
// Compute per-channel effective volumes based on spatial params.
let (vol_left, vol_right) = if let Some(ref spatial) = sound.spatial {
let (atten, left_gain, right_gain) = compute_spatial_gains(listener, spatial);
(base_volume * atten * left_gain, base_volume * atten * right_gain)
} else {
(base_volume, base_volume)
};
// Compute integer rate ratio for naive resampling.
// For same-rate clips ratio == 1, so we read every frame.
let rate_ratio = if device_sample_rate > 0 {
clip.sample_rate as f64 / device_sample_rate as f64
} else {
1.0
};
for frame_out in 0..frames {
// Map the output frame index to a clip frame index.
let clip_frame = sound.position + (frame_out as f64 * rate_ratio) as usize;
if clip_frame >= clip.frame_count() {
// Sound exhausted within this render block.
if sound.looping {
// We'll reset after the loop; for now just stop filling.
}
break;
}
// Fetch sample(s) from clip frame.
let out_base = frame_out * device_channels as usize;
if device_channels == 2 {
let (left, right) = if clip.channels == 1 {
// Mono -> stereo: apply per-channel volumes
let raw = clip.samples[clip_frame];
(raw * vol_left, raw * vol_right)
} else {
// Stereo clip
let l = clip.samples[clip_frame * 2] * vol_left;
let r = clip.samples[clip_frame * 2 + 1] * vol_right;
(l, r)
};
output[out_base] += left;
output[out_base + 1] += right;
} else {
// Mono output: average left and right volumes
let vol_mono = (vol_left + vol_right) * 0.5;
let s = if clip.channels == 1 {
clip.samples[clip_frame] * vol_mono
} else {
// Mix stereo clip to mono
(clip.samples[clip_frame * 2] + clip.samples[clip_frame * 2 + 1])
* 0.5
* vol_mono
};
output[out_base] += s;
}
}
// Advance position by the number of clip frames consumed this block.
let frames_consumed = (frames as f64 * rate_ratio) as usize;
sound.position += frames_consumed;
if sound.position >= clip.frame_count() {
if sound.looping {
sound.position = 0;
} else {
finished.push(sound_idx);
}
}
}
// Remove finished sounds in reverse order to preserve indices.
for &idx in finished.iter().rev() {
playing.remove(idx);
}
// Clamp output to [-1.0, 1.0].
for sample in output.iter_mut() {
*sample = sample.clamp(-1.0, 1.0);
}
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use crate::AudioClip;
use crate::spatial::{Listener, SpatialParams};
use crate::mix_group::{MixGroup, MixerState};
use voltex_math::Vec3;
fn make_mono_clip(value: f32, frames: usize, sample_rate: u32) -> AudioClip {
AudioClip::new(vec![value; frames], sample_rate, 1)
}
fn make_stereo_clip(left: f32, right: f32, frames: usize, sample_rate: u32) -> AudioClip {
let mut samples = Vec::with_capacity(frames * 2);
for _ in 0..frames {
samples.push(left);
samples.push(right);
}
AudioClip::new(samples, sample_rate, 2)
}
#[test]
fn single_sound_volume() {
let clips = vec![make_mono_clip(1.0, 100, 44100)];
let mut playing = vec![PlayingSound::new(0, 0.5, false)];
let mut output = Vec::new();
mix_sounds(&mut output, &mut playing, &clips, 44100, 1, 10, &Listener::default(), &MixerState::new());
// All output frames should be 0.5 (1.0 * 0.5 volume)
assert_eq!(output.len(), 10);
for &s in &output {
assert!((s - 0.5).abs() < 1e-6, "expected 0.5, got {}", s);
}
}
#[test]
fn two_sounds_sum() {
// Two clips each at 0.3; sum is 0.6 (below clamp threshold)
let clips = vec![
make_mono_clip(0.3, 100, 44100),
make_mono_clip(0.3, 100, 44100),
];
let mut playing = vec![
PlayingSound::new(0, 1.0, false),
PlayingSound::new(1, 1.0, false),
];
let mut output = Vec::new();
mix_sounds(&mut output, &mut playing, &clips, 44100, 1, 10, &Listener::default(), &MixerState::new());
assert_eq!(output.len(), 10);
for &s in &output {
assert!((s - 0.6).abs() < 1e-5, "expected 0.6, got {}", s);
}
}
#[test]
fn clipping() {
// Two clips at 1.0 each; sum would be 2.0 but must be clamped to 1.0
let clips = vec![
make_mono_clip(1.0, 100, 44100),
make_mono_clip(1.0, 100, 44100),
];
let mut playing = vec![
PlayingSound::new(0, 1.0, false),
PlayingSound::new(1, 1.0, false),
];
let mut output = Vec::new();
mix_sounds(&mut output, &mut playing, &clips, 44100, 1, 10, &Listener::default(), &MixerState::new());
for &s in &output {
assert!(s <= 1.0, "output {} exceeds 1.0 (clamp failed)", s);
assert!(s >= -1.0, "output {} below -1.0 (clamp failed)", s);
}
}
#[test]
fn non_looping_removal() {
// Clip is only 5 frames; request 20 frames; sound should be removed after
let clips = vec![make_mono_clip(0.5, 5, 44100)];
let mut playing = vec![PlayingSound::new(0, 1.0, false)];
let mut output = Vec::new();
mix_sounds(&mut output, &mut playing, &clips, 44100, 1, 20, &Listener::default(), &MixerState::new());
// Sound should have been removed
assert!(playing.is_empty(), "non-looping sound was not removed");
}
#[test]
fn looping_continues() {
// Clip is 5 frames; request 20 frames; looping sound should remain
let clips = vec![make_mono_clip(0.5, 5, 44100)];
let mut playing = vec![PlayingSound::new(0, 1.0, true)];
let mut output = Vec::new();
mix_sounds(&mut output, &mut playing, &clips, 44100, 1, 20, &Listener::default(), &MixerState::new());
// Sound should still be in the list
assert_eq!(playing.len(), 1, "looping sound was incorrectly removed");
// Position should have been reset to 0
assert_eq!(playing[0].position, 0);
}
#[test]
fn mono_to_stereo() {
// Mono clip mixed to stereo output: both channels should have same value
let clips = vec![make_mono_clip(0.7, 100, 44100)];
let mut playing = vec![PlayingSound::new(0, 1.0, false)];
let mut output = Vec::new();
mix_sounds(&mut output, &mut playing, &clips, 44100, 2, 10, &Listener::default(), &MixerState::new());
// output length = 10 frames * 2 channels = 20
assert_eq!(output.len(), 20);
for i in 0..10 {
let l = output[i * 2];
let r = output[i * 2 + 1];
assert!((l - 0.7).abs() < 1e-6, "left={}", l);
assert!((r - 0.7).abs() < 1e-6, "right={}", r);
}
}
// -----------------------------------------------------------------------
// Spatial audio tests
// -----------------------------------------------------------------------
#[test]
fn spatial_2d_unchanged() {
// spatial=None should produce the same output as before (no spatialization)
let clips = vec![make_mono_clip(1.0, 100, 44100)];
let mut playing = vec![PlayingSound::new(0, 0.5, false)];
let mut output = Vec::new();
mix_sounds(&mut output, &mut playing, &clips, 44100, 1, 10, &Listener::default(), &MixerState::new());
assert_eq!(output.len(), 10);
for &s in &output {
assert!((s - 0.5).abs() < 1e-6, "expected 0.5, got {}", s);
}
}
#[test]
fn spatial_far_away_silent() {
// Emitter beyond max_distance → attenuation = 0.0 → ~0 output
let clips = vec![make_mono_clip(1.0, 100, 44100)];
let spatial = SpatialParams::new(Vec3::new(200.0, 0.0, 0.0), 1.0, 100.0);
let mut playing = vec![PlayingSound::new_3d(0, 1.0, false, spatial)];
let mut output = Vec::new();
mix_sounds(&mut output, &mut playing, &clips, 44100, 2, 10, &Listener::default(), &MixerState::new());
assert_eq!(output.len(), 20);
for &s in &output {
assert!(s.abs() < 1e-6, "expected ~0 for far-away emitter, got {}", s);
}
}
#[test]
fn spatial_right_panning() {
// Emitter on +X → right channel should be louder than left channel (stereo output)
let clips = vec![make_mono_clip(1.0, 100, 44100)];
// Emitter close enough to be audible (within min_distance → atten=1)
let spatial = SpatialParams::new(Vec3::new(0.5, 0.0, 0.0), 1.0, 100.0);
let mut playing = vec![PlayingSound::new_3d(0, 1.0, false, spatial)];
let mut output = Vec::new();
mix_sounds(&mut output, &mut playing, &clips, 44100, 2, 10, &Listener::default(), &MixerState::new());
assert_eq!(output.len(), 20);
// Check first frame: left=output[0], right=output[1]
let left = output[0];
let right = output[1];
assert!(right > left, "expected right > left for +X emitter, got left={}, right={}", left, right);
}
#[test]
fn group_volume_applied() {
// Sfx group at 0.5: output should be halved compared to default (1.0)
let clips = vec![make_mono_clip(1.0, 100, 44100)];
let mut playing = vec![PlayingSound::new(0, 1.0, false)];
let mut output = Vec::new();
let mut mixer = MixerState::new();
mixer.set_volume(MixGroup::Sfx, 0.5);
mix_sounds(&mut output, &mut playing, &clips, 44100, 1, 10, &Listener::default(), &mixer);
assert_eq!(output.len(), 10);
for &s in &output {
assert!((s - 0.5).abs() < 1e-6, "expected 0.5 (halved by Sfx group vol), got {}", s);
}
}
#[test]
fn master_zero_mutes_output() {
// Master volume at 0: all output must be silence
let clips = vec![make_mono_clip(1.0, 100, 44100)];
let mut playing = vec![PlayingSound::new(0, 1.0, false)];
let mut output = Vec::new();
let mut mixer = MixerState::new();
mixer.set_volume(MixGroup::Master, 0.0);
mix_sounds(&mut output, &mut playing, &clips, 44100, 1, 10, &Listener::default(), &mixer);
assert_eq!(output.len(), 10);
for &s in &output {
assert!(s.abs() < 1e-6, "expected silence with master=0, got {}", s);
}
}
}

View File

@@ -0,0 +1,96 @@
/// Audio occlusion parameters.
#[derive(Debug, Clone, Copy)]
pub struct OcclusionResult {
pub volume_multiplier: f32, // 0.0-1.0, reduced by occlusion
pub lowpass_cutoff: f32, // Hz, lower = more muffled
}
/// Simple ray-based occlusion check.
/// `occlusion_factor`: 0.0 = no occlusion (direct line of sight), 1.0 = fully occluded.
pub fn calculate_occlusion(occlusion_factor: f32) -> OcclusionResult {
let factor = occlusion_factor.clamp(0.0, 1.0);
OcclusionResult {
volume_multiplier: 1.0 - factor * 0.7, // Max 70% volume reduction
lowpass_cutoff: 20000.0 * (1.0 - factor * 0.8), // 20kHz → 4kHz when fully occluded
}
}
/// Simple low-pass filter (one-pole IIR).
pub struct LowPassFilter {
prev_output: f32,
alpha: f32,
}
impl LowPassFilter {
pub fn new(cutoff_hz: f32, sample_rate: u32) -> Self {
let rc = 1.0 / (2.0 * std::f32::consts::PI * cutoff_hz);
let dt = 1.0 / sample_rate as f32;
let alpha = dt / (rc + dt);
LowPassFilter { prev_output: 0.0, alpha }
}
pub fn set_cutoff(&mut self, cutoff_hz: f32, sample_rate: u32) {
let rc = 1.0 / (2.0 * std::f32::consts::PI * cutoff_hz);
let dt = 1.0 / sample_rate as f32;
self.alpha = dt / (rc + dt);
}
pub fn process(&mut self, input: f32) -> f32 {
self.prev_output = self.prev_output + self.alpha * (input - self.prev_output);
self.prev_output
}
pub fn reset(&mut self) {
self.prev_output = 0.0;
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_no_occlusion() {
let r = calculate_occlusion(0.0);
assert!((r.volume_multiplier - 1.0).abs() < 1e-6);
assert!((r.lowpass_cutoff - 20000.0).abs() < 1.0);
}
#[test]
fn test_full_occlusion() {
let r = calculate_occlusion(1.0);
assert!((r.volume_multiplier - 0.3).abs() < 1e-6);
assert!((r.lowpass_cutoff - 4000.0).abs() < 1.0);
}
#[test]
fn test_partial_occlusion() {
let r = calculate_occlusion(0.5);
assert!(r.volume_multiplier > 0.3 && r.volume_multiplier < 1.0);
assert!(r.lowpass_cutoff > 4000.0 && r.lowpass_cutoff < 20000.0);
}
#[test]
fn test_lowpass_attenuates_high_freq() {
let mut lpf = LowPassFilter::new(100.0, 44100); // very low cutoff
// High frequency signal (alternating +1/-1) should be attenuated
let mut max_output = 0.0_f32;
for i in 0..100 {
let input = if i % 2 == 0 { 1.0 } else { -1.0 };
let output = lpf.process(input);
max_output = max_output.max(output.abs());
}
assert!(max_output < 0.5, "high freq should be attenuated, got {}", max_output);
}
#[test]
fn test_lowpass_passes_dc() {
let mut lpf = LowPassFilter::new(5000.0, 44100);
// DC signal (constant 1.0) should pass through
for _ in 0..1000 {
lpf.process(1.0);
}
let output = lpf.process(1.0);
assert!((output - 1.0).abs() < 0.01, "DC should pass, got {}", output);
}
}

View File

@@ -0,0 +1,294 @@
//! OGG container parser.
//!
//! Parses OGG bitstream pages and extracts Vorbis packets.
//! Reference: <https://www.xiph.org/ogg/doc/framing.html>
/// An OGG page header.
#[derive(Debug, Clone)]
pub struct OggPage {
/// Header type flags (0x01 = continuation, 0x02 = BOS, 0x04 = EOS).
pub header_type: u8,
/// Granule position (PCM sample position).
pub granule_position: u64,
/// Bitstream serial number.
pub serial: u32,
/// Page sequence number.
pub page_sequence: u32,
/// Number of segments in this page.
pub segment_count: u8,
/// The segment table (each entry is a segment length, 0..255).
pub segment_table: Vec<u8>,
/// Raw packet data of this page (concatenated segments).
pub data: Vec<u8>,
}
/// Parse all OGG pages from raw bytes.
pub fn parse_ogg_pages(data: &[u8]) -> Result<Vec<OggPage>, String> {
let mut pages = Vec::new();
let mut offset = 0;
while offset < data.len() {
if offset + 27 > data.len() {
break;
}
// Capture pattern "OggS"
if &data[offset..offset + 4] != b"OggS" {
return Err(format!("Invalid OGG capture pattern at offset {}", offset));
}
let version = data[offset + 4];
if version != 0 {
return Err(format!("Unsupported OGG version: {}", version));
}
let header_type = data[offset + 5];
let granule_position = u64::from_le_bytes([
data[offset + 6],
data[offset + 7],
data[offset + 8],
data[offset + 9],
data[offset + 10],
data[offset + 11],
data[offset + 12],
data[offset + 13],
]);
let serial = u32::from_le_bytes([
data[offset + 14],
data[offset + 15],
data[offset + 16],
data[offset + 17],
]);
let page_sequence = u32::from_le_bytes([
data[offset + 18],
data[offset + 19],
data[offset + 20],
data[offset + 21],
]);
// CRC at offset+22..+26 (skip verification for simplicity)
let segment_count = data[offset + 26] as usize;
if offset + 27 + segment_count > data.len() {
return Err("OGG page segment table extends beyond data".to_string());
}
let segment_table: Vec<u8> = data[offset + 27..offset + 27 + segment_count].to_vec();
let total_data_size: usize = segment_table.iter().map(|&s| s as usize).sum();
let data_start = offset + 27 + segment_count;
if data_start + total_data_size > data.len() {
return Err("OGG page data extends beyond file".to_string());
}
let page_data = data[data_start..data_start + total_data_size].to_vec();
pages.push(OggPage {
header_type,
granule_position,
serial,
page_sequence,
segment_count: segment_count as u8,
segment_table,
data: page_data,
});
offset = data_start + total_data_size;
}
if pages.is_empty() {
return Err("No OGG pages found".to_string());
}
Ok(pages)
}
/// Extract Vorbis packets from parsed OGG pages.
///
/// Packets can span multiple segments (segment length = 255 means continuation).
/// Packets can also span multiple pages (header_type bit 0x01 = continuation).
pub fn extract_packets(pages: &[OggPage]) -> Result<Vec<Vec<u8>>, String> {
let mut packets: Vec<Vec<u8>> = Vec::new();
let mut current_packet: Vec<u8> = Vec::new();
for page in pages {
let mut data_offset = 0;
for (seg_idx, &seg_len) in page.segment_table.iter().enumerate() {
let seg_data = &page.data[data_offset..data_offset + seg_len as usize];
current_packet.extend_from_slice(seg_data);
data_offset += seg_len as usize;
// A segment length < 255 terminates the current packet.
// A segment length of exactly 255 means the packet continues in the next segment.
if seg_len < 255 {
if !current_packet.is_empty() {
packets.push(std::mem::take(&mut current_packet));
}
}
// If seg_len == 255 and this is the last segment of the page,
// the packet continues on the next page.
let _ = seg_idx; // suppress unused warning
}
}
// If there's remaining data in current_packet (ended with 255-byte segments
// and no terminating segment), flush it as a final packet.
if !current_packet.is_empty() {
packets.push(current_packet);
}
Ok(packets)
}
/// Convenience function: parse OGG container and extract all Vorbis packets.
pub fn parse_ogg(data: &[u8]) -> Result<Vec<Vec<u8>>, String> {
let pages = parse_ogg_pages(data)?;
extract_packets(&pages)
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
/// Build a minimal OGG page from raw packet data.
fn build_ogg_page(
header_type: u8,
granule: u64,
serial: u32,
page_seq: u32,
packets_data: &[&[u8]],
) -> Vec<u8> {
// Build segment table and concatenated data
let mut segment_table = Vec::new();
let mut page_data = Vec::new();
for (i, packet) in packets_data.iter().enumerate() {
let len = packet.len();
// Write full 255-byte segments
let full_segments = len / 255;
let remainder = len % 255;
for _ in 0..full_segments {
segment_table.push(255u8);
}
// Terminating segment (< 255), even if 0 to signal end of packet
segment_table.push(remainder as u8);
page_data.extend_from_slice(packet);
}
let segment_count = segment_table.len();
let mut out = Vec::new();
// Capture pattern
out.extend_from_slice(b"OggS");
// Version
out.push(0);
// Header type
out.push(header_type);
// Granule position
out.extend_from_slice(&granule.to_le_bytes());
// Serial
out.extend_from_slice(&serial.to_le_bytes());
// Page sequence
out.extend_from_slice(&page_seq.to_le_bytes());
// CRC (dummy zeros)
out.extend_from_slice(&[0u8; 4]);
// Segment count
out.push(segment_count as u8);
// Segment table
out.extend_from_slice(&segment_table);
// Data
out.extend_from_slice(&page_data);
out
}
#[test]
fn parse_single_page() {
let packet = b"hello vorbis";
let page_bytes = build_ogg_page(0x02, 0, 1, 0, &[packet.as_slice()]);
let pages = parse_ogg_pages(&page_bytes).expect("parse failed");
assert_eq!(pages.len(), 1);
assert_eq!(pages[0].header_type, 0x02);
assert_eq!(pages[0].serial, 1);
assert_eq!(pages[0].page_sequence, 0);
assert_eq!(pages[0].data, packet);
}
#[test]
fn parse_multiple_pages() {
let p1 = build_ogg_page(0x02, 0, 1, 0, &[b"first"]);
let p2 = build_ogg_page(0x00, 100, 1, 1, &[b"second"]);
let mut data = p1;
data.extend_from_slice(&p2);
let pages = parse_ogg_pages(&data).expect("parse failed");
assert_eq!(pages.len(), 2);
assert_eq!(pages[0].page_sequence, 0);
assert_eq!(pages[1].page_sequence, 1);
assert_eq!(pages[1].granule_position, 100);
}
#[test]
fn extract_single_packet() {
let page_bytes = build_ogg_page(0x02, 0, 1, 0, &[b"packet_one"]);
let packets = parse_ogg(&page_bytes).expect("parse_ogg failed");
assert_eq!(packets.len(), 1);
assert_eq!(packets[0], b"packet_one");
}
#[test]
fn extract_multiple_packets_single_page() {
let page_bytes = build_ogg_page(0x02, 0, 1, 0, &[b"pkt1", b"pkt2", b"pkt3"]);
let packets = parse_ogg(&page_bytes).expect("parse_ogg failed");
assert_eq!(packets.len(), 3);
assert_eq!(packets[0], b"pkt1");
assert_eq!(packets[1], b"pkt2");
assert_eq!(packets[2], b"pkt3");
}
#[test]
fn extract_large_packet_spanning_segments() {
// Create a packet larger than 255 bytes
let large_packet: Vec<u8> = (0..600).map(|i| (i % 256) as u8).collect();
let page_bytes = build_ogg_page(0x02, 0, 1, 0, &[&large_packet]);
let packets = parse_ogg(&page_bytes).expect("parse_ogg failed");
assert_eq!(packets.len(), 1);
assert_eq!(packets[0], large_packet);
}
#[test]
fn invalid_capture_pattern() {
let data = b"NotOGGdata";
let result = parse_ogg_pages(data);
assert!(result.is_err());
assert!(result.unwrap_err().contains("capture pattern"));
}
#[test]
fn empty_data() {
let result = parse_ogg_pages(&[]);
assert!(result.is_err());
}
#[test]
fn page_header_fields() {
let page_bytes = build_ogg_page(0x04, 12345, 42, 7, &[b"data"]);
let pages = parse_ogg_pages(&page_bytes).expect("parse failed");
assert_eq!(pages[0].header_type, 0x04); // EOS
assert_eq!(pages[0].granule_position, 12345);
assert_eq!(pages[0].serial, 42);
assert_eq!(pages[0].page_sequence, 7);
}
}

View File

@@ -0,0 +1,189 @@
/// Simple delay line for echo effect.
pub struct DelayLine {
pub(crate) buffer: Vec<f32>,
pub(crate) write_pos: usize,
delay_samples: usize,
}
impl DelayLine {
pub fn new(delay_samples: usize) -> Self {
DelayLine { buffer: vec![0.0; delay_samples.max(1)], write_pos: 0, delay_samples }
}
pub fn process(&mut self, input: f32, feedback: f32) -> f32 {
let read_pos = (self.write_pos + self.buffer.len() - self.delay_samples) % self.buffer.len();
let delayed = self.buffer[read_pos];
self.buffer[self.write_pos] = input + delayed * feedback;
self.write_pos = (self.write_pos + 1) % self.buffer.len();
delayed
}
}
/// Simple Schroeder reverb using 4 parallel comb filters + 2 allpass filters.
pub struct Reverb {
comb_filters: Vec<CombFilter>,
allpass_filters: Vec<AllpassFilter>,
pub wet: f32, // 0.0-1.0
pub dry: f32, // 0.0-1.0
}
struct CombFilter {
delay: DelayLine,
feedback: f32,
}
impl CombFilter {
fn new(delay_samples: usize, feedback: f32) -> Self {
CombFilter { delay: DelayLine::new(delay_samples), feedback }
}
fn process(&mut self, input: f32) -> f32 {
self.delay.process(input, self.feedback)
}
}
struct AllpassFilter {
delay: DelayLine,
gain: f32,
}
impl AllpassFilter {
fn new(delay_samples: usize, gain: f32) -> Self {
AllpassFilter { delay: DelayLine::new(delay_samples), gain }
}
fn process(&mut self, input: f32) -> f32 {
let delayed = self.delay.process(input, 0.0);
let _output = -self.gain * input + delayed + self.gain * delayed;
// Actually: allpass: output = -g*input + delayed, buffer = input + g*delayed
// Simplified:
let buf_val = self.delay.buffer[(self.delay.write_pos + self.delay.buffer.len() - 1) % self.delay.buffer.len()];
-self.gain * input + buf_val
}
}
impl Reverb {
pub fn new(sample_rate: u32) -> Self {
// Schroeder reverb with prime-number delay lengths
let sr = sample_rate as f32;
Reverb {
comb_filters: vec![
CombFilter::new((0.0297 * sr) as usize, 0.805),
CombFilter::new((0.0371 * sr) as usize, 0.827),
CombFilter::new((0.0411 * sr) as usize, 0.783),
CombFilter::new((0.0437 * sr) as usize, 0.764),
],
allpass_filters: vec![
AllpassFilter::new((0.005 * sr) as usize, 0.7),
AllpassFilter::new((0.0017 * sr) as usize, 0.7),
],
wet: 0.3,
dry: 0.7,
}
}
pub fn process(&mut self, input: f32) -> f32 {
// Sum comb filter outputs
let mut comb_sum = 0.0;
for comb in &mut self.comb_filters {
comb_sum += comb.process(input);
}
comb_sum /= self.comb_filters.len() as f32;
// Chain through allpass filters
let mut output = comb_sum;
for ap in &mut self.allpass_filters {
output = ap.process(output);
}
input * self.dry + output * self.wet
}
}
/// Simple echo (single delay + feedback).
pub struct Echo {
delay: DelayLine,
pub feedback: f32,
pub wet: f32,
pub dry: f32,
}
impl Echo {
pub fn new(delay_ms: f32, sample_rate: u32) -> Self {
let samples = (delay_ms * sample_rate as f32 / 1000.0) as usize;
Echo { delay: DelayLine::new(samples), feedback: 0.5, wet: 0.3, dry: 0.7 }
}
pub fn process(&mut self, input: f32) -> f32 {
let delayed = self.delay.process(input, self.feedback);
input * self.dry + delayed * self.wet
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_delay_line_basic() {
let mut dl = DelayLine::new(2);
assert!((dl.process(1.0, 0.0) - 0.0).abs() < 1e-6); // no output yet
assert!((dl.process(0.0, 0.0) - 0.0).abs() < 1e-6);
assert!((dl.process(0.0, 0.0) - 1.0).abs() < 1e-6); // input appears after 2-sample delay
}
#[test]
fn test_delay_line_feedback() {
let mut dl = DelayLine::new(2);
dl.process(1.0, 0.5);
dl.process(0.0, 0.5);
let out = dl.process(0.0, 0.5); // delayed 1.0
assert!((out - 1.0).abs() < 1e-6);
dl.process(0.0, 0.5);
let out2 = dl.process(0.0, 0.5); // feedback: 0.5
assert!((out2 - 0.5).abs() < 1e-6);
}
#[test]
fn test_reverb_preserves_signal() {
let mut rev = Reverb::new(44100);
// Process silence -> should be silence
let out = rev.process(0.0);
assert!((out - 0.0).abs() < 1e-6);
}
#[test]
fn test_reverb_produces_output() {
let mut rev = Reverb::new(44100);
// Send an impulse, collect some output
let mut energy = 0.0;
rev.process(1.0);
for _ in 0..4410 {
let s = rev.process(0.0);
energy += s.abs();
}
assert!(energy > 0.01, "reverb should produce decaying output after impulse");
}
#[test]
fn test_echo_basic() {
let mut echo = Echo::new(100.0, 44100); // 100ms delay
let delay_samples = (100.0 * 44100.0 / 1000.0) as usize;
// Send impulse
echo.process(1.0);
// Process until delay
for _ in 1..delay_samples {
echo.process(0.0);
}
let out = echo.process(0.0);
// Should have echo of input (wet * delayed)
assert!(out.abs() > 0.01, "echo should produce delayed output");
}
#[test]
fn test_echo_wet_dry() {
let mut echo = Echo::new(10.0, 44100);
echo.wet = 0.0;
echo.dry = 1.0;
let out = echo.process(0.5);
assert!((out - 0.5).abs() < 1e-6); // dry only
}
}

View File

@@ -0,0 +1,162 @@
use voltex_math::Vec3;
use std::f32::consts::PI;
/// Represents the listener in 3D space.
#[derive(Debug, Clone)]
pub struct Listener {
pub position: Vec3,
pub forward: Vec3,
pub right: Vec3,
}
impl Default for Listener {
fn default() -> Self {
Self {
position: Vec3::ZERO,
forward: Vec3::new(0.0, 0.0, -1.0),
right: Vec3::X,
}
}
}
/// Parameters for a 3D audio emitter.
#[derive(Debug, Clone)]
pub struct SpatialParams {
pub position: Vec3,
pub min_distance: f32,
pub max_distance: f32,
}
impl SpatialParams {
/// Create with explicit parameters.
pub fn new(position: Vec3, min_distance: f32, max_distance: f32) -> Self {
Self { position, min_distance, max_distance }
}
/// Create at a position with default distances (1.0 min, 100.0 max).
pub fn at(position: Vec3) -> Self {
Self { position, min_distance: 1.0, max_distance: 100.0 }
}
}
/// Inverse-distance attenuation model.
/// Returns 1.0 when distance <= min_dist, 0.0 when distance >= max_dist,
/// otherwise min_dist / distance clamped to [0, 1].
pub fn distance_attenuation(distance: f32, min_dist: f32, max_dist: f32) -> f32 {
if distance <= min_dist {
1.0
} else if distance >= max_dist {
0.0
} else {
(min_dist / distance).clamp(0.0, 1.0)
}
}
/// Equal-power stereo panning based on listener orientation and emitter position.
/// Returns (left_gain, right_gain).
/// If the emitter is at the same position as the listener, returns (1.0, 1.0).
pub fn stereo_pan(listener: &Listener, emitter_pos: Vec3) -> (f32, f32) {
let diff = emitter_pos - listener.position;
let distance = diff.length();
const EPSILON: f32 = 1e-6;
if distance < EPSILON {
return (1.0, 1.0);
}
let direction = diff * (1.0 / distance);
let pan = direction.dot(listener.right).clamp(-1.0, 1.0);
// Map pan [-1, 1] to angle [0, PI/2] via angle = pan * PI/4 + PI/4
let angle = pan * (PI / 4.0) + (PI / 4.0);
let left = angle.cos();
let right = angle.sin();
(left, right)
}
/// Convenience function combining distance attenuation and stereo panning.
/// Returns (attenuation, left_gain, right_gain).
pub fn compute_spatial_gains(listener: &Listener, spatial: &SpatialParams) -> (f32, f32, f32) {
let diff = spatial.position - listener.position;
let distance = diff.length();
let attenuation = distance_attenuation(distance, spatial.min_distance, spatial.max_distance);
let (left, right) = stereo_pan(listener, spatial.position);
(attenuation, left, right)
}
#[cfg(test)]
mod tests {
use super::*;
use voltex_math::Vec3;
#[test]
fn attenuation_at_min() {
// distance <= min_distance should return 1.0
assert_eq!(distance_attenuation(0.5, 1.0, 10.0), 1.0);
assert_eq!(distance_attenuation(1.0, 1.0, 10.0), 1.0);
}
#[test]
fn attenuation_at_max() {
// distance >= max_distance should return 0.0
assert_eq!(distance_attenuation(10.0, 1.0, 10.0), 0.0);
assert_eq!(distance_attenuation(50.0, 1.0, 10.0), 0.0);
}
#[test]
fn attenuation_between() {
// inverse: min_dist / distance
let result = distance_attenuation(5.0, 1.0, 10.0);
let expected = 1.0_f32 / 5.0_f32;
assert!((result - expected).abs() < 1e-6, "expected {expected}, got {result}");
}
#[test]
fn pan_right() {
// Emitter to the right (+X) should give right > left
let listener = Listener::default();
let emitter = Vec3::new(10.0, 0.0, 0.0);
let (left, right) = stereo_pan(&listener, emitter);
assert!(right > left, "expected right > left for +X emitter, got left={left}, right={right}");
}
#[test]
fn pan_left() {
// Emitter to the left (-X) should give left > right
let listener = Listener::default();
let emitter = Vec3::new(-10.0, 0.0, 0.0);
let (left, right) = stereo_pan(&listener, emitter);
assert!(left > right, "expected left > right for -X emitter, got left={left}, right={right}");
}
#[test]
fn pan_front() {
// Emitter directly in front (-Z) should give roughly equal gains
let listener = Listener::default();
let emitter = Vec3::new(0.0, 0.0, -10.0);
let (left, right) = stereo_pan(&listener, emitter);
assert!((left - right).abs() < 0.01, "expected roughly equal for front emitter, got left={left}, right={right}");
}
#[test]
fn pan_same_position() {
// Same position as listener should return (1.0, 1.0)
let listener = Listener::default();
let emitter = Vec3::ZERO;
let (left, right) = stereo_pan(&listener, emitter);
assert_eq!((left, right), (1.0, 1.0));
}
#[test]
fn compute_spatial_gains_combines_both() {
let listener = Listener::default();
// Emitter at (5, 0, 0) with min=1, max=10
let spatial = SpatialParams::new(Vec3::new(5.0, 0.0, 0.0), 1.0, 10.0);
let (attenuation, left, right) = compute_spatial_gains(&listener, &spatial);
// Check attenuation: distance=5, min=1, max=10 → 1/5 = 0.2
let expected_atten = 1.0_f32 / 5.0_f32;
assert!((attenuation - expected_atten).abs() < 1e-6,
"attenuation mismatch: expected {expected_atten}, got {attenuation}");
// Emitter is to the right, so right > left
assert!(right > left, "expected right > left, got left={left}, right={right}");
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,521 @@
#![allow(non_snake_case, non_camel_case_types, dead_code)]
//! WASAPI FFI bindings for Windows audio output.
//!
//! This module is only compiled on Windows (`#[cfg(target_os = "windows")]`).
use std::ffi::c_void;
// ---------------------------------------------------------------------------
// Basic Windows types
// ---------------------------------------------------------------------------
pub type HRESULT = i32;
pub type ULONG = u32;
pub type DWORD = u32;
pub type WORD = u16;
pub type BOOL = i32;
pub type UINT = u32;
pub type UINT32 = u32;
pub type BYTE = u8;
pub type REFERENCE_TIME = i64;
pub type HANDLE = *mut c_void;
pub type LPVOID = *mut c_void;
pub type LPCWSTR = *const u16;
pub const S_OK: HRESULT = 0;
pub const AUDCLNT_SHAREMODE_SHARED: u32 = 0;
pub const AUDCLNT_STREAMFLAGS_RATEADJUST: u32 = 0x00100000;
pub const CLSCTX_ALL: DWORD = 0x17;
pub const COINIT_APARTMENTTHREADED: DWORD = 0x2;
pub const DEVICE_STATE_ACTIVE: DWORD = 0x1;
pub const eRender: u32 = 0;
pub const eConsole: u32 = 0;
pub const WAVE_FORMAT_PCM: WORD = 1;
pub const WAVE_FORMAT_IEEE_FLOAT: WORD = 3;
pub const WAVE_FORMAT_EXTENSIBLE: WORD = 0xFFFE;
// ---------------------------------------------------------------------------
// GUID
// ---------------------------------------------------------------------------
#[repr(C)]
#[derive(Clone, Copy)]
pub struct GUID {
pub Data1: u32,
pub Data2: u16,
pub Data3: u16,
pub Data4: [u8; 8],
}
/// CLSID_MMDeviceEnumerator: {BCDE0395-E52F-467C-8E3D-C4579291692E}
pub const CLSID_MMDeviceEnumerator: GUID = GUID {
Data1: 0xBCDE0395,
Data2: 0xE52F,
Data3: 0x467C,
Data4: [0x8E, 0x3D, 0xC4, 0x57, 0x92, 0x91, 0x69, 0x2E],
};
/// IID_IMMDeviceEnumerator: {A95664D2-9614-4F35-A746-DE8DB63617E6}
pub const IID_IMMDeviceEnumerator: GUID = GUID {
Data1: 0xA95664D2,
Data2: 0x9614,
Data3: 0x4F35,
Data4: [0xA7, 0x46, 0xDE, 0x8D, 0xB6, 0x36, 0x17, 0xE6],
};
/// IID_IAudioClient: {1CB9AD4C-DBFA-4c32-B178-C2F568A703B2}
pub const IID_IAudioClient: GUID = GUID {
Data1: 0x1CB9AD4C,
Data2: 0xDBFA,
Data3: 0x4C32,
Data4: [0xB1, 0x78, 0xC2, 0xF5, 0x68, 0xA7, 0x03, 0xB2],
};
/// IID_IAudioRenderClient: {F294ACFC-3146-4483-A7BF-ADDCA7C260E2}
pub const IID_IAudioRenderClient: GUID = GUID {
Data1: 0xF294ACFC,
Data2: 0x3146,
Data3: 0x4483,
Data4: [0xA7, 0xBF, 0xAD, 0xDC, 0xA7, 0xC2, 0x60, 0xE2],
};
// ---------------------------------------------------------------------------
// WAVEFORMATEX
// ---------------------------------------------------------------------------
#[repr(C)]
#[derive(Clone, Copy, Debug)]
pub struct WAVEFORMATEX {
pub wFormatTag: WORD,
pub nChannels: WORD,
pub nSamplesPerSec: DWORD,
pub nAvgBytesPerSec: DWORD,
pub nBlockAlign: WORD,
pub wBitsPerSample: WORD,
pub cbSize: WORD,
}
// ---------------------------------------------------------------------------
// COM vtable structs
// ---------------------------------------------------------------------------
/// IUnknown vtable (base for all COM interfaces)
#[repr(C)]
pub struct IUnknownVtbl {
pub QueryInterface: unsafe extern "system" fn(
this: *mut c_void,
riid: *const GUID,
ppvObject: *mut *mut c_void,
) -> HRESULT,
pub AddRef: unsafe extern "system" fn(this: *mut c_void) -> ULONG,
pub Release: unsafe extern "system" fn(this: *mut c_void) -> ULONG,
}
/// IMMDeviceEnumerator vtable
#[repr(C)]
pub struct IMMDeviceEnumeratorVtbl {
pub base: IUnknownVtbl,
pub EnumAudioEndpoints: unsafe extern "system" fn(
this: *mut c_void,
dataFlow: u32,
dwStateMask: DWORD,
ppDevices: *mut *mut c_void,
) -> HRESULT,
pub GetDefaultAudioEndpoint: unsafe extern "system" fn(
this: *mut c_void,
dataFlow: u32,
role: u32,
ppEndpoint: *mut *mut c_void,
) -> HRESULT,
pub GetDevice: unsafe extern "system" fn(
this: *mut c_void,
pwstrId: LPCWSTR,
ppDevice: *mut *mut c_void,
) -> HRESULT,
pub RegisterEndpointNotificationCallback: unsafe extern "system" fn(
this: *mut c_void,
pClient: *mut c_void,
) -> HRESULT,
pub UnregisterEndpointNotificationCallback: unsafe extern "system" fn(
this: *mut c_void,
pClient: *mut c_void,
) -> HRESULT,
}
/// IMMDevice vtable
#[repr(C)]
pub struct IMMDeviceVtbl {
pub base: IUnknownVtbl,
pub Activate: unsafe extern "system" fn(
this: *mut c_void,
iid: *const GUID,
dwClsCtx: DWORD,
pActivationParams: *mut c_void,
ppInterface: *mut *mut c_void,
) -> HRESULT,
pub OpenPropertyStore: unsafe extern "system" fn(
this: *mut c_void,
stgmAccess: DWORD,
ppProperties: *mut *mut c_void,
) -> HRESULT,
pub GetId: unsafe extern "system" fn(
this: *mut c_void,
ppstrId: *mut LPCWSTR,
) -> HRESULT,
pub GetState: unsafe extern "system" fn(
this: *mut c_void,
pdwState: *mut DWORD,
) -> HRESULT,
}
/// IAudioClient vtable
#[repr(C)]
pub struct IAudioClientVtbl {
pub base: IUnknownVtbl,
pub Initialize: unsafe extern "system" fn(
this: *mut c_void,
ShareMode: u32,
StreamFlags: DWORD,
hnsBufferDuration: REFERENCE_TIME,
hnsPeriodicity: REFERENCE_TIME,
pFormat: *const WAVEFORMATEX,
AudioSessionGuid: *const GUID,
) -> HRESULT,
pub GetBufferSize: unsafe extern "system" fn(
this: *mut c_void,
pNumBufferFrames: *mut UINT32,
) -> HRESULT,
pub GetStreamLatency: unsafe extern "system" fn(
this: *mut c_void,
phnsLatency: *mut REFERENCE_TIME,
) -> HRESULT,
pub GetCurrentPadding: unsafe extern "system" fn(
this: *mut c_void,
pNumPaddingFrames: *mut UINT32,
) -> HRESULT,
pub IsFormatSupported: unsafe extern "system" fn(
this: *mut c_void,
ShareMode: u32,
pFormat: *const WAVEFORMATEX,
ppClosestMatch: *mut *mut WAVEFORMATEX,
) -> HRESULT,
pub GetMixFormat: unsafe extern "system" fn(
this: *mut c_void,
ppDeviceFormat: *mut *mut WAVEFORMATEX,
) -> HRESULT,
pub GetDevicePeriod: unsafe extern "system" fn(
this: *mut c_void,
phnsDefaultDevicePeriod: *mut REFERENCE_TIME,
phnsMinimumDevicePeriod: *mut REFERENCE_TIME,
) -> HRESULT,
pub Start: unsafe extern "system" fn(this: *mut c_void) -> HRESULT,
pub Stop: unsafe extern "system" fn(this: *mut c_void) -> HRESULT,
pub Reset: unsafe extern "system" fn(this: *mut c_void) -> HRESULT,
pub SetEventHandle: unsafe extern "system" fn(
this: *mut c_void,
eventHandle: HANDLE,
) -> HRESULT,
pub GetService: unsafe extern "system" fn(
this: *mut c_void,
riid: *const GUID,
ppv: *mut *mut c_void,
) -> HRESULT,
}
/// IAudioRenderClient vtable
#[repr(C)]
pub struct IAudioRenderClientVtbl {
pub base: IUnknownVtbl,
pub GetBuffer: unsafe extern "system" fn(
this: *mut c_void,
NumFramesRequested: UINT32,
ppData: *mut *mut BYTE,
) -> HRESULT,
pub ReleaseBuffer: unsafe extern "system" fn(
this: *mut c_void,
NumFramesWritten: UINT32,
dwFlags: DWORD,
) -> HRESULT,
}
// ---------------------------------------------------------------------------
// extern "system" functions
// ---------------------------------------------------------------------------
#[link(name = "ole32")]
extern "system" {
pub fn CoInitializeEx(pvReserved: LPVOID, dwCoInit: DWORD) -> HRESULT;
pub fn CoUninitialize();
pub fn CoCreateInstance(
rclsid: *const GUID,
pUnkOuter: *mut c_void,
dwClsContext: DWORD,
riid: *const GUID,
ppv: *mut *mut c_void,
) -> HRESULT;
pub fn CoTaskMemFree(pv: LPVOID);
}
// ---------------------------------------------------------------------------
// WasapiDevice
// ---------------------------------------------------------------------------
pub struct WasapiDevice {
client: *mut c_void,
render_client: *mut c_void,
buffer_size: u32,
pub sample_rate: u32,
pub channels: u16,
bits_per_sample: u16,
is_float: bool,
}
unsafe impl Send for WasapiDevice {}
impl WasapiDevice {
/// Initialize WASAPI: COM, default endpoint, IAudioClient, mix format,
/// Initialize shared mode (50 ms buffer), GetBufferSize, GetService, Start.
pub fn new() -> Result<Self, String> {
unsafe {
// 1. CoInitializeEx
let hr = CoInitializeEx(std::ptr::null_mut(), COINIT_APARTMENTTHREADED);
if hr < 0 {
return Err(format!("CoInitializeEx failed: 0x{:08X}", hr as u32));
}
// 2. CoCreateInstance -> IMMDeviceEnumerator
let mut enumerator: *mut c_void = std::ptr::null_mut();
let hr = CoCreateInstance(
&CLSID_MMDeviceEnumerator,
std::ptr::null_mut(),
CLSCTX_ALL,
&IID_IMMDeviceEnumerator,
&mut enumerator,
);
if hr < 0 || enumerator.is_null() {
CoUninitialize();
return Err(format!("CoCreateInstance(MMDeviceEnumerator) failed: 0x{:08X}", hr as u32));
}
// 3. GetDefaultAudioEndpoint -> IMMDevice
let mut device: *mut c_void = std::ptr::null_mut();
{
let vtbl = *(enumerator as *mut *const IMMDeviceEnumeratorVtbl);
let hr = ((*vtbl).GetDefaultAudioEndpoint)(enumerator, eRender, eConsole, &mut device);
((*vtbl).base.Release)(enumerator);
if hr < 0 || device.is_null() {
CoUninitialize();
return Err(format!("GetDefaultAudioEndpoint failed: 0x{:08X}", hr as u32));
}
}
// 4. IMMDevice::Activate -> IAudioClient
let mut client: *mut c_void = std::ptr::null_mut();
{
let vtbl = *(device as *mut *const IMMDeviceVtbl);
let hr = ((*vtbl).Activate)(
device,
&IID_IAudioClient,
CLSCTX_ALL,
std::ptr::null_mut(),
&mut client,
);
((*vtbl).base.Release)(device);
if hr < 0 || client.is_null() {
CoUninitialize();
return Err(format!("IMMDevice::Activate(IAudioClient) failed: 0x{:08X}", hr as u32));
}
}
// 5. GetMixFormat
let mut mix_format_ptr: *mut WAVEFORMATEX = std::ptr::null_mut();
{
let vtbl = *(client as *mut *const IAudioClientVtbl);
let hr = ((*vtbl).GetMixFormat)(client, &mut mix_format_ptr);
if hr < 0 || mix_format_ptr.is_null() {
((*vtbl).base.Release)(client);
CoUninitialize();
return Err(format!("GetMixFormat failed: 0x{:08X}", hr as u32));
}
}
let mix_format = *mix_format_ptr;
let sample_rate = mix_format.nSamplesPerSec;
let channels = mix_format.nChannels;
let bits_per_sample = mix_format.wBitsPerSample;
// Determine float vs int
let is_float = match mix_format.wFormatTag {
WAVE_FORMAT_IEEE_FLOAT => true,
WAVE_FORMAT_EXTENSIBLE => bits_per_sample == 32,
_ => false,
};
// 6. IAudioClient::Initialize (shared mode, 50 ms = 500_000 REFERENCE_TIME)
const BUFFER_DURATION: REFERENCE_TIME = 500_000; // 50 ms in 100-ns units
let hr = {
let vtbl = *(client as *mut *const IAudioClientVtbl);
((*vtbl).Initialize)(
client,
AUDCLNT_SHAREMODE_SHARED,
0,
BUFFER_DURATION,
0,
mix_format_ptr,
std::ptr::null(),
)
};
CoTaskMemFree(mix_format_ptr as LPVOID);
if hr < 0 {
let vtbl = *(client as *mut *const IAudioClientVtbl);
((*vtbl).base.Release)(client);
CoUninitialize();
return Err(format!("IAudioClient::Initialize failed: 0x{:08X}", hr as u32));
}
// 7. GetBufferSize
let mut buffer_size: UINT32 = 0;
{
let vtbl = *(client as *mut *const IAudioClientVtbl);
let hr = ((*vtbl).GetBufferSize)(client, &mut buffer_size);
if hr < 0 {
((*vtbl).base.Release)(client);
CoUninitialize();
return Err(format!("GetBufferSize failed: 0x{:08X}", hr as u32));
}
}
// 8. GetService -> IAudioRenderClient
let mut render_client: *mut c_void = std::ptr::null_mut();
{
let vtbl = *(client as *mut *const IAudioClientVtbl);
let hr = ((*vtbl).GetService)(client, &IID_IAudioRenderClient, &mut render_client);
if hr < 0 || render_client.is_null() {
((*vtbl).base.Release)(client);
CoUninitialize();
return Err(format!("GetService(IAudioRenderClient) failed: 0x{:08X}", hr as u32));
}
}
// 9. Start
{
let vtbl = *(client as *mut *const IAudioClientVtbl);
let hr = ((*vtbl).Start)(client);
if hr < 0 {
let rc_vtbl = *(render_client as *mut *const IAudioRenderClientVtbl);
((*rc_vtbl).base.Release)(render_client);
((*vtbl).base.Release)(client);
CoUninitialize();
return Err(format!("IAudioClient::Start failed: 0x{:08X}", hr as u32));
}
}
Ok(WasapiDevice {
client,
render_client,
buffer_size,
sample_rate,
channels,
bits_per_sample,
is_float,
})
}
}
/// Write f32 samples to the audio device.
/// Returns the number of frames actually written.
pub fn write_samples(&self, samples: &[f32]) -> Result<usize, String> {
unsafe {
// GetCurrentPadding
let mut padding: UINT32 = 0;
{
let vtbl = *(self.client as *mut *const IAudioClientVtbl);
let hr = ((*vtbl).GetCurrentPadding)(self.client, &mut padding);
if hr < 0 {
return Err(format!("GetCurrentPadding failed: 0x{:08X}", hr as u32));
}
}
let available_frames = if self.buffer_size > padding {
self.buffer_size - padding
} else {
return Ok(0);
};
let samples_per_frame = self.channels as usize;
let input_frames = samples.len() / samples_per_frame;
let frames_to_write = available_frames.min(input_frames as u32);
if frames_to_write == 0 {
return Ok(0);
}
// GetBuffer
let mut data_ptr: *mut BYTE = std::ptr::null_mut();
{
let vtbl = *(self.render_client as *mut *const IAudioRenderClientVtbl);
let hr = ((*vtbl).GetBuffer)(self.render_client, frames_to_write, &mut data_ptr);
if hr < 0 || data_ptr.is_null() {
return Err(format!("GetBuffer failed: 0x{:08X}", hr as u32));
}
}
// Write samples
let total_samples = frames_to_write as usize * samples_per_frame;
if self.is_float {
// Write f32 directly
let dst = std::slice::from_raw_parts_mut(data_ptr as *mut f32, total_samples);
let src_len = total_samples.min(samples.len());
dst[..src_len].copy_from_slice(&samples[..src_len]);
if src_len < total_samples {
for s in &mut dst[src_len..] {
*s = 0.0;
}
}
} else {
// Convert f32 to i16
let dst = std::slice::from_raw_parts_mut(data_ptr as *mut i16, total_samples);
for i in 0..total_samples {
let val = if i < samples.len() { samples[i] } else { 0.0 };
dst[i] = (val.clamp(-1.0, 1.0) * 32767.0) as i16;
}
}
// ReleaseBuffer
{
let vtbl = *(self.render_client as *mut *const IAudioRenderClientVtbl);
let hr = ((*vtbl).ReleaseBuffer)(self.render_client, frames_to_write, 0);
if hr < 0 {
return Err(format!("ReleaseBuffer failed: 0x{:08X}", hr as u32));
}
}
Ok(frames_to_write as usize)
}
}
/// Returns the allocated buffer size in frames.
pub fn buffer_frames(&self) -> u32 {
self.buffer_size
}
}
impl Drop for WasapiDevice {
fn drop(&mut self) {
unsafe {
// Stop the audio stream
let client_vtbl = *(self.client as *mut *const IAudioClientVtbl);
((*client_vtbl).Stop)(self.client);
// Release IAudioRenderClient
let rc_vtbl = *(self.render_client as *mut *const IAudioRenderClientVtbl);
((*rc_vtbl).base.Release)(self.render_client);
// Release IAudioClient
((*client_vtbl).base.Release)(self.client);
// Uninitialize COM
CoUninitialize();
}
}
}

View File

@@ -0,0 +1,478 @@
use crate::AudioClip;
// ---------------------------------------------------------------------------
// Helper readers
// ---------------------------------------------------------------------------
fn read_u16_le(data: &[u8], offset: usize) -> Result<u16, String> {
if offset + 2 > data.len() {
return Err(format!("read_u16_le: offset {} out of bounds (len={})", offset, data.len()));
}
Ok(u16::from_le_bytes([data[offset], data[offset + 1]]))
}
fn read_u32_le(data: &[u8], offset: usize) -> Result<u32, String> {
if offset + 4 > data.len() {
return Err(format!("read_u32_le: offset {} out of bounds (len={})", offset, data.len()));
}
Ok(u32::from_le_bytes([
data[offset],
data[offset + 1],
data[offset + 2],
data[offset + 3],
]))
}
fn read_i16_le(data: &[u8], offset: usize) -> Result<i16, String> {
if offset + 2 > data.len() {
return Err(format!("read_i16_le: offset {} out of bounds (len={})", offset, data.len()));
}
Ok(i16::from_le_bytes([data[offset], data[offset + 1]]))
}
/// Search for a four-byte chunk ID starting after `start` and return its
/// (data_offset, data_size) pair, where data_offset is the first byte of the
/// chunk's payload.
fn find_chunk(data: &[u8], id: &[u8; 4], start: usize) -> Option<(usize, u32)> {
let mut pos = start;
while pos + 8 <= data.len() {
if &data[pos..pos + 4] == id {
let size = u32::from_le_bytes([
data[pos + 4],
data[pos + 5],
data[pos + 6],
data[pos + 7],
]);
return Some((pos + 8, size));
}
// Skip this chunk: header (8 bytes) + size, padded to even
let size = u32::from_le_bytes([
data[pos + 4],
data[pos + 5],
data[pos + 6],
data[pos + 7],
]) as usize;
let padded = size + (size & 1); // RIFF chunks are word-aligned
pos += 8 + padded;
}
None
}
// ---------------------------------------------------------------------------
// Public API
// ---------------------------------------------------------------------------
/// Read a 24-bit signed integer (little-endian) from 3 bytes and return as i32.
fn read_i24_le(data: &[u8], offset: usize) -> Result<i32, String> {
if offset + 3 > data.len() {
return Err(format!("read_i24_le: offset {} out of bounds (len={})", offset, data.len()));
}
let lo = data[offset] as u32;
let mid = data[offset + 1] as u32;
let hi = data[offset + 2] as u32;
let unsigned = lo | (mid << 8) | (hi << 16);
// Sign-extend from 24-bit to 32-bit
if unsigned & 0x800000 != 0 {
Ok((unsigned | 0xFF000000) as i32)
} else {
Ok(unsigned as i32)
}
}
/// Read a 32-bit float (little-endian).
fn read_f32_le(data: &[u8], offset: usize) -> Result<f32, String> {
if offset + 4 > data.len() {
return Err(format!("read_f32_le: offset {} out of bounds (len={})", offset, data.len()));
}
Ok(f32::from_le_bytes([
data[offset],
data[offset + 1],
data[offset + 2],
data[offset + 3],
]))
}
/// Parse a WAV file from raw bytes into an [`AudioClip`].
///
/// Supported formats:
/// - PCM 16-bit (format_tag=1, bits_per_sample=16)
/// - PCM 24-bit (format_tag=1, bits_per_sample=24)
/// - IEEE float 32-bit (format_tag=3, bits_per_sample=32)
pub fn parse_wav(data: &[u8]) -> Result<AudioClip, String> {
// Minimum viable WAV: RIFF(4) + size(4) + WAVE(4) = 12 bytes
if data.len() < 12 {
return Err("WAV data too short".to_string());
}
// RIFF header
if &data[0..4] != b"RIFF" {
return Err("Missing RIFF header".to_string());
}
if &data[8..12] != b"WAVE" {
return Err("Missing WAVE format identifier".to_string());
}
// --- fmt chunk (search from byte 12) ---
let (fmt_offset, fmt_size) =
find_chunk(data, b"fmt ", 12).ok_or("Missing fmt chunk")?;
if fmt_size < 16 {
return Err(format!("fmt chunk too small: {}", fmt_size));
}
let format_tag = read_u16_le(data, fmt_offset)?;
let channels = read_u16_le(data, fmt_offset + 2)?;
if channels != 1 && channels != 2 {
return Err(format!("Unsupported channel count: {}", channels));
}
let sample_rate = read_u32_le(data, fmt_offset + 4)?;
// byte_rate = fmt_offset + 8 (skip)
// block_align = fmt_offset + 12 (skip)
let bits_per_sample = read_u16_le(data, fmt_offset + 14)?;
// Validate format_tag + bits_per_sample combination
let bytes_per_sample = match (format_tag, bits_per_sample) {
(1, 16) => 2, // PCM 16-bit
(1, 24) => 3, // PCM 24-bit
(3, 32) => 4, // IEEE float 32-bit
(1, bps) => return Err(format!("Unsupported PCM bits per sample: {}", bps)),
(3, bps) => return Err(format!("Unsupported float bits per sample: {} (only 32-bit supported)", bps)),
(tag, _) => return Err(format!("Unsupported WAV format tag: {} (only PCM=1 and IEEE_FLOAT=3 are supported)", tag)),
};
// --- data chunk ---
let (data_offset, data_size) =
find_chunk(data, b"data", 12).ok_or("Missing data chunk")?;
let data_end = data_offset + data_size as usize;
if data_end > data.len() {
return Err("data chunk extends beyond end of file".to_string());
}
let sample_count = data_size as usize / bytes_per_sample;
let mut samples = Vec::with_capacity(sample_count);
match (format_tag, bits_per_sample) {
(1, 16) => {
for i in 0..sample_count {
let raw = read_i16_le(data, data_offset + i * 2)?;
samples.push(raw as f32 / 32768.0);
}
}
(1, 24) => {
for i in 0..sample_count {
let raw = read_i24_le(data, data_offset + i * 3)?;
// 24-bit range: [-8388608, 8388607]
samples.push(raw as f32 / 8388608.0);
}
}
(3, 32) => {
for i in 0..sample_count {
let raw = read_f32_le(data, data_offset + i * 4)?;
samples.push(raw);
}
}
_ => unreachable!(),
}
Ok(AudioClip::new(samples, sample_rate, channels))
}
/// Generate a minimal PCM 16-bit mono WAV file from f32 samples.
/// Used for round-trip testing.
pub fn generate_wav_bytes(samples_f32: &[f32], sample_rate: u32) -> Vec<u8> {
let channels: u16 = 1;
let bits_per_sample: u16 = 16;
let byte_rate = sample_rate * channels as u32 * bits_per_sample as u32 / 8;
let block_align: u16 = channels * bits_per_sample / 8;
let data_size = (samples_f32.len() * 2) as u32; // 2 bytes per i16 sample
let riff_size = 4 + 8 + 16 + 8 + data_size; // "WAVE" + fmt chunk + data chunk
let mut out: Vec<u8> = Vec::with_capacity(12 + 8 + 16 + 8 + data_size as usize);
// RIFF header
out.extend_from_slice(b"RIFF");
out.extend_from_slice(&riff_size.to_le_bytes());
out.extend_from_slice(b"WAVE");
// fmt chunk
out.extend_from_slice(b"fmt ");
out.extend_from_slice(&16u32.to_le_bytes()); // chunk size
out.extend_from_slice(&1u16.to_le_bytes()); // PCM format tag
out.extend_from_slice(&channels.to_le_bytes());
out.extend_from_slice(&sample_rate.to_le_bytes());
out.extend_from_slice(&byte_rate.to_le_bytes());
out.extend_from_slice(&block_align.to_le_bytes());
out.extend_from_slice(&bits_per_sample.to_le_bytes());
// data chunk
out.extend_from_slice(b"data");
out.extend_from_slice(&data_size.to_le_bytes());
for &s in samples_f32 {
let clamped = s.clamp(-1.0, 1.0);
let raw = (clamped * 32767.0) as i16;
out.extend_from_slice(&raw.to_le_bytes());
}
out
}
/// Generate a minimal PCM 24-bit mono WAV file from f32 samples.
/// Used for round-trip testing.
pub fn generate_wav_bytes_24bit(samples_f32: &[f32], sample_rate: u32) -> Vec<u8> {
let channels: u16 = 1;
let bits_per_sample: u16 = 24;
let byte_rate = sample_rate * channels as u32 * bits_per_sample as u32 / 8;
let block_align: u16 = channels * bits_per_sample / 8;
let data_size = (samples_f32.len() * 3) as u32;
let riff_size = 4 + 8 + 16 + 8 + data_size;
let mut out: Vec<u8> = Vec::with_capacity(12 + 8 + 16 + 8 + data_size as usize);
// RIFF header
out.extend_from_slice(b"RIFF");
out.extend_from_slice(&riff_size.to_le_bytes());
out.extend_from_slice(b"WAVE");
// fmt chunk
out.extend_from_slice(b"fmt ");
out.extend_from_slice(&16u32.to_le_bytes());
out.extend_from_slice(&1u16.to_le_bytes()); // PCM
out.extend_from_slice(&channels.to_le_bytes());
out.extend_from_slice(&sample_rate.to_le_bytes());
out.extend_from_slice(&byte_rate.to_le_bytes());
out.extend_from_slice(&block_align.to_le_bytes());
out.extend_from_slice(&bits_per_sample.to_le_bytes());
// data chunk
out.extend_from_slice(b"data");
out.extend_from_slice(&data_size.to_le_bytes());
for &s in samples_f32 {
let clamped = s.clamp(-1.0, 1.0);
let raw = (clamped * 8388607.0) as i32;
// Write 3 bytes LE
out.push((raw & 0xFF) as u8);
out.push(((raw >> 8) & 0xFF) as u8);
out.push(((raw >> 16) & 0xFF) as u8);
}
out
}
/// Generate a minimal IEEE float 32-bit mono WAV file from f32 samples.
/// Used for round-trip testing.
pub fn generate_wav_bytes_f32(samples_f32: &[f32], sample_rate: u32) -> Vec<u8> {
let channels: u16 = 1;
let bits_per_sample: u16 = 32;
let byte_rate = sample_rate * channels as u32 * bits_per_sample as u32 / 8;
let block_align: u16 = channels * bits_per_sample / 8;
let data_size = (samples_f32.len() * 4) as u32;
let riff_size = 4 + 8 + 16 + 8 + data_size;
let mut out: Vec<u8> = Vec::with_capacity(12 + 8 + 16 + 8 + data_size as usize);
// RIFF header
out.extend_from_slice(b"RIFF");
out.extend_from_slice(&riff_size.to_le_bytes());
out.extend_from_slice(b"WAVE");
// fmt chunk
out.extend_from_slice(b"fmt ");
out.extend_from_slice(&16u32.to_le_bytes());
out.extend_from_slice(&3u16.to_le_bytes()); // IEEE_FLOAT
out.extend_from_slice(&channels.to_le_bytes());
out.extend_from_slice(&sample_rate.to_le_bytes());
out.extend_from_slice(&byte_rate.to_le_bytes());
out.extend_from_slice(&block_align.to_le_bytes());
out.extend_from_slice(&bits_per_sample.to_le_bytes());
// data chunk
out.extend_from_slice(b"data");
out.extend_from_slice(&data_size.to_le_bytes());
for &s in samples_f32 {
out.extend_from_slice(&s.to_le_bytes());
}
out
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_valid_wav() {
// Generate a 440 Hz sine wave at 44100 Hz, 0.1 s
let sample_rate = 44100u32;
let num_samples = 4410usize;
let samples: Vec<f32> = (0..num_samples)
.map(|i| (2.0 * std::f32::consts::PI * 440.0 * i as f32 / sample_rate as f32).sin())
.collect();
let wav_bytes = generate_wav_bytes(&samples, sample_rate);
let clip = parse_wav(&wav_bytes).expect("parse_wav failed");
assert_eq!(clip.sample_rate, sample_rate);
assert_eq!(clip.channels, 1);
assert_eq!(clip.frame_count(), num_samples);
}
#[test]
fn sample_conversion_accuracy() {
// A single-sample WAV: value = 16384 (half of i16 max positive)
// Expected f32: 16384 / 32768 = 0.5
let sample_rate = 44100u32;
let samples_f32 = vec![0.5f32];
let wav_bytes = generate_wav_bytes(&samples_f32, sample_rate);
let clip = parse_wav(&wav_bytes).expect("parse_wav failed");
assert_eq!(clip.samples.len(), 1);
// 0.5 -> i16(16383) -> f32(16383/32768) ≈ 0.49997
assert!((clip.samples[0] - 0.5f32).abs() < 0.001, "got {}", clip.samples[0]);
}
#[test]
fn invalid_riff() {
let bad_data = b"BADH\x00\x00\x00\x00WAVE";
let result = parse_wav(bad_data);
assert!(result.is_err());
assert!(result.unwrap_err().contains("RIFF"));
}
#[test]
fn too_short() {
let short_data = b"RIF";
let result = parse_wav(short_data);
assert!(result.is_err());
assert!(result.unwrap_err().contains("too short"));
}
#[test]
fn roundtrip() {
let original: Vec<f32> = vec![0.0, 0.25, 0.5, -0.25, -0.5, 1.0, -1.0];
let wav_bytes = generate_wav_bytes(&original, 44100);
let clip = parse_wav(&wav_bytes).expect("roundtrip parse failed");
assert_eq!(clip.samples.len(), original.len());
for (orig, decoded) in original.iter().zip(clip.samples.iter()) {
// i16 quantization error < 0.001
assert!(
(orig - decoded).abs() < 0.001,
"orig={} decoded={}",
orig,
decoded
);
}
}
// -----------------------------------------------------------------------
// 24-bit PCM tests
// -----------------------------------------------------------------------
#[test]
fn parse_24bit_wav() {
let sample_rate = 44100u32;
let num_samples = 4410usize;
let samples: Vec<f32> = (0..num_samples)
.map(|i| (2.0 * std::f32::consts::PI * 440.0 * i as f32 / sample_rate as f32).sin())
.collect();
let wav_bytes = generate_wav_bytes_24bit(&samples, sample_rate);
let clip = parse_wav(&wav_bytes).expect("parse_wav 24-bit failed");
assert_eq!(clip.sample_rate, sample_rate);
assert_eq!(clip.channels, 1);
assert_eq!(clip.frame_count(), num_samples);
}
#[test]
fn roundtrip_24bit() {
let original: Vec<f32> = vec![0.0, 0.25, 0.5, -0.25, -0.5, 1.0, -1.0];
let wav_bytes = generate_wav_bytes_24bit(&original, 44100);
let clip = parse_wav(&wav_bytes).expect("roundtrip 24-bit parse failed");
assert_eq!(clip.samples.len(), original.len());
for (orig, decoded) in original.iter().zip(clip.samples.iter()) {
// 24-bit quantization error should be < 0.0001
assert!(
(orig - decoded).abs() < 0.0001,
"24-bit: orig={} decoded={}",
orig,
decoded
);
}
}
#[test]
fn accuracy_24bit() {
// 24-bit should be more accurate than 16-bit
let samples = vec![0.5f32];
let wav_bytes = generate_wav_bytes_24bit(&samples, 44100);
let clip = parse_wav(&wav_bytes).expect("parse failed");
// 0.5 * 8388607 = 4194303 -> 4194303 / 8388608 ≈ 0.49999988
assert!((clip.samples[0] - 0.5).abs() < 0.0001, "24-bit got {}", clip.samples[0]);
}
// -----------------------------------------------------------------------
// 32-bit float tests
// -----------------------------------------------------------------------
#[test]
fn parse_32bit_float_wav() {
let sample_rate = 44100u32;
let num_samples = 4410usize;
let samples: Vec<f32> = (0..num_samples)
.map(|i| (2.0 * std::f32::consts::PI * 440.0 * i as f32 / sample_rate as f32).sin())
.collect();
let wav_bytes = generate_wav_bytes_f32(&samples, sample_rate);
let clip = parse_wav(&wav_bytes).expect("parse_wav float32 failed");
assert_eq!(clip.sample_rate, sample_rate);
assert_eq!(clip.channels, 1);
assert_eq!(clip.frame_count(), num_samples);
}
#[test]
fn roundtrip_32bit_float() {
let original: Vec<f32> = vec![0.0, 0.25, 0.5, -0.25, -0.5, 1.0, -1.0];
let wav_bytes = generate_wav_bytes_f32(&original, 44100);
let clip = parse_wav(&wav_bytes).expect("roundtrip float32 parse failed");
assert_eq!(clip.samples.len(), original.len());
for (orig, decoded) in original.iter().zip(clip.samples.iter()) {
// 32-bit float should be exact
assert_eq!(*orig, *decoded, "float32: orig={} decoded={}", orig, decoded);
}
}
#[test]
fn accuracy_32bit_float() {
// 32-bit float should preserve exact values
let samples = vec![0.123456789f32, -0.987654321f32];
let wav_bytes = generate_wav_bytes_f32(&samples, 44100);
let clip = parse_wav(&wav_bytes).expect("parse failed");
assert_eq!(clip.samples[0], 0.123456789f32);
assert_eq!(clip.samples[1], -0.987654321f32);
}
#[test]
fn reject_unsupported_format_tag() {
// Create a WAV with format_tag=2 (ADPCM), which we don't support
let mut wav = generate_wav_bytes(&[0.0], 44100);
// format_tag is at byte 20-21 (RIFF(4)+size(4)+WAVE(4)+fmt(4)+chunk_size(4))
wav[20] = 2;
wav[21] = 0;
let result = parse_wav(&wav);
assert!(result.is_err());
assert!(result.unwrap_err().contains("Unsupported WAV format tag"));
}
}

View File

@@ -0,0 +1,263 @@
/// Binary scene format (.vscn binary).
///
/// Format:
/// Header: "VSCN" (4 bytes) + version u32 LE + entity_count u32 LE
/// Per entity:
/// parent_index i32 LE (-1 = no parent)
/// component_count u32 LE
/// Per component:
/// name_len u16 LE + name bytes
/// data_len u32 LE + data bytes
use std::collections::HashMap;
use crate::entity::Entity;
use crate::world::World;
use crate::transform::Transform;
use crate::hierarchy::{add_child, Parent};
use crate::component_registry::ComponentRegistry;
const MAGIC: &[u8; 4] = b"VSCN";
const VERSION: u32 = 1;
/// Serialize all entities with a Transform to the binary scene format.
pub fn serialize_scene_binary(world: &World, registry: &ComponentRegistry) -> Vec<u8> {
let entities_with_transform: Vec<(Entity, Transform)> = world
.query::<Transform>()
.map(|(e, t)| (e, *t))
.collect();
let entity_to_index: HashMap<Entity, usize> = entities_with_transform
.iter()
.enumerate()
.map(|(i, (e, _))| (*e, i))
.collect();
let entity_count = entities_with_transform.len() as u32;
let mut buf = Vec::new();
// Header
buf.extend_from_slice(MAGIC);
buf.extend_from_slice(&VERSION.to_le_bytes());
buf.extend_from_slice(&entity_count.to_le_bytes());
// Entities
for (entity, _) in &entities_with_transform {
// Parent index
let parent_idx: i32 = if let Some(parent_comp) = world.get::<Parent>(*entity) {
entity_to_index.get(&parent_comp.0)
.map(|&i| i as i32)
.unwrap_or(-1)
} else {
-1
};
buf.extend_from_slice(&parent_idx.to_le_bytes());
// Collect serializable components
let mut comp_data: Vec<(&str, Vec<u8>)> = Vec::new();
for entry in registry.entries() {
if let Some(data) = (entry.serialize)(world, *entity) {
comp_data.push((&entry.name, data));
}
}
let comp_count = comp_data.len() as u32;
buf.extend_from_slice(&comp_count.to_le_bytes());
for (name, data) in &comp_data {
let name_bytes = name.as_bytes();
buf.extend_from_slice(&(name_bytes.len() as u16).to_le_bytes());
buf.extend_from_slice(name_bytes);
buf.extend_from_slice(&(data.len() as u32).to_le_bytes());
buf.extend_from_slice(data);
}
}
buf
}
/// Deserialize entities from binary scene data.
pub fn deserialize_scene_binary(
world: &mut World,
data: &[u8],
registry: &ComponentRegistry,
) -> Result<Vec<Entity>, String> {
if data.len() < 12 {
return Err("Binary scene data too short".into());
}
// Verify magic
if &data[0..4] != MAGIC {
return Err("Invalid magic bytes — expected VSCN".into());
}
let version = u32::from_le_bytes([data[4], data[5], data[6], data[7]]);
if version != 1 {
return Err(format!("Unsupported binary scene version: {}", version));
}
let entity_count = u32::from_le_bytes([data[8], data[9], data[10], data[11]]) as usize;
let mut pos = 12;
let mut created: Vec<Entity> = Vec::with_capacity(entity_count);
let mut parent_indices: Vec<i32> = Vec::with_capacity(entity_count);
for _ in 0..entity_count {
// Parent index
if pos + 4 > data.len() {
return Err("Unexpected end of data reading parent index".into());
}
let parent_idx = i32::from_le_bytes([data[pos], data[pos + 1], data[pos + 2], data[pos + 3]]);
pos += 4;
// Component count
if pos + 4 > data.len() {
return Err("Unexpected end of data reading component count".into());
}
let comp_count = u32::from_le_bytes([data[pos], data[pos + 1], data[pos + 2], data[pos + 3]]) as usize;
pos += 4;
let entity = world.spawn();
for _ in 0..comp_count {
// Name length
if pos + 2 > data.len() {
return Err("Unexpected end of data reading name length".into());
}
let name_len = u16::from_le_bytes([data[pos], data[pos + 1]]) as usize;
pos += 2;
// Name
if pos + name_len > data.len() {
return Err("Unexpected end of data reading component name".into());
}
let name = std::str::from_utf8(&data[pos..pos + name_len])
.map_err(|_| "Invalid UTF-8 in component name".to_string())?;
pos += name_len;
// Data length
if pos + 4 > data.len() {
return Err("Unexpected end of data reading data length".into());
}
let data_len = u32::from_le_bytes([data[pos], data[pos + 1], data[pos + 2], data[pos + 3]]) as usize;
pos += 4;
// Data
if pos + data_len > data.len() {
return Err("Unexpected end of data reading component data".into());
}
let comp_data = &data[pos..pos + data_len];
pos += data_len;
// Deserialize via registry
if let Some(entry) = registry.find(name) {
(entry.deserialize)(world, entity, comp_data)?;
}
}
created.push(entity);
parent_indices.push(parent_idx);
}
// Apply parent relationships
for (child_idx, &parent_idx) in parent_indices.iter().enumerate() {
if parent_idx >= 0 {
let pi = parent_idx as usize;
if pi < created.len() {
let child_entity = created[child_idx];
let parent_entity = created[pi];
add_child(world, parent_entity, child_entity);
}
}
}
Ok(created)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::scene::Tag;
use voltex_math::Vec3;
#[test]
fn test_binary_roundtrip() {
let mut registry = ComponentRegistry::new();
registry.register_defaults();
let mut world = World::new();
let e = world.spawn();
world.add(e, Transform::from_position(Vec3::new(5.0, 0.0, -1.0)));
world.add(e, Tag("enemy".into()));
let data = serialize_scene_binary(&world, &registry);
assert_eq!(&data[0..4], b"VSCN");
let mut world2 = World::new();
let entities = deserialize_scene_binary(&mut world2, &data, &registry).unwrap();
assert_eq!(entities.len(), 1);
let t = world2.get::<Transform>(entities[0]).unwrap();
assert!((t.position.x - 5.0).abs() < 1e-6);
assert!((t.position.z - (-1.0)).abs() < 1e-6);
let tag = world2.get::<Tag>(entities[0]).unwrap();
assert_eq!(tag.0, "enemy");
}
#[test]
fn test_binary_with_hierarchy() {
let mut registry = ComponentRegistry::new();
registry.register_defaults();
let mut world = World::new();
let a = world.spawn();
let b = world.spawn();
world.add(a, Transform::new());
world.add(b, Transform::new());
add_child(&mut world, a, b);
let data = serialize_scene_binary(&world, &registry);
let mut world2 = World::new();
let entities = deserialize_scene_binary(&mut world2, &data, &registry).unwrap();
assert_eq!(entities.len(), 2);
assert!(world2.get::<Parent>(entities[1]).is_some());
let p = world2.get::<Parent>(entities[1]).unwrap();
assert_eq!(p.0, entities[0]);
}
#[test]
fn test_binary_invalid_magic() {
let data = vec![0u8; 20];
let mut world = World::new();
let registry = ComponentRegistry::new();
assert!(deserialize_scene_binary(&mut world, &data, &registry).is_err());
}
#[test]
fn test_binary_too_short() {
let data = vec![0u8; 5];
let mut world = World::new();
let registry = ComponentRegistry::new();
assert!(deserialize_scene_binary(&mut world, &data, &registry).is_err());
}
#[test]
fn test_binary_multiple_entities() {
let mut registry = ComponentRegistry::new();
registry.register_defaults();
let mut world = World::new();
for i in 0..3 {
let e = world.spawn();
world.add(e, Transform::from_position(Vec3::new(i as f32, 0.0, 0.0)));
world.add(e, Tag(format!("e{}", i)));
}
let data = serialize_scene_binary(&world, &registry);
let mut world2 = World::new();
let entities = deserialize_scene_binary(&mut world2, &data, &registry).unwrap();
assert_eq!(entities.len(), 3);
for (i, &e) in entities.iter().enumerate() {
let t = world2.get::<Transform>(e).unwrap();
assert!((t.position.x - i as f32).abs() < 1e-6);
let tag = world2.get::<Tag>(e).unwrap();
assert_eq!(tag.0, format!("e{}", i));
}
}
}

View File

@@ -0,0 +1,198 @@
/// Registration-based component serialization for scene formats.
/// Each registered component type has a name, a serialize function,
/// and a deserialize function.
use crate::entity::Entity;
use crate::world::World;
pub type SerializeFn = fn(&World, Entity) -> Option<Vec<u8>>;
pub type DeserializeFn = fn(&mut World, Entity, &[u8]) -> Result<(), String>;
pub struct ComponentEntry {
pub name: String,
pub serialize: SerializeFn,
pub deserialize: DeserializeFn,
}
pub struct ComponentRegistry {
entries: Vec<ComponentEntry>,
}
impl ComponentRegistry {
pub fn new() -> Self {
Self { entries: Vec::new() }
}
pub fn register(&mut self, name: &str, ser: SerializeFn, deser: DeserializeFn) {
self.entries.push(ComponentEntry {
name: name.to_string(),
serialize: ser,
deserialize: deser,
});
}
pub fn find(&self, name: &str) -> Option<&ComponentEntry> {
self.entries.iter().find(|e| e.name == name)
}
pub fn entries(&self) -> &[ComponentEntry] {
&self.entries
}
/// Register the default built-in component types: transform and tag.
pub fn register_defaults(&mut self) {
self.register("transform", serialize_transform, deserialize_transform);
self.register("tag", serialize_tag, deserialize_tag);
}
}
impl Default for ComponentRegistry {
fn default() -> Self {
Self::new()
}
}
// ── Transform: 9 f32s in little-endian (pos.xyz, rot.xyz, scale.xyz) ─
fn serialize_transform(world: &World, entity: Entity) -> Option<Vec<u8>> {
let t = world.get::<crate::Transform>(entity)?;
let mut data = Vec::with_capacity(36);
for &v in &[
t.position.x, t.position.y, t.position.z,
t.rotation.x, t.rotation.y, t.rotation.z,
t.scale.x, t.scale.y, t.scale.z,
] {
data.extend_from_slice(&v.to_le_bytes());
}
Some(data)
}
fn deserialize_transform(world: &mut World, entity: Entity, data: &[u8]) -> Result<(), String> {
if data.len() < 36 {
return Err("Transform data too short".into());
}
let f = |off: usize| f32::from_le_bytes([data[off], data[off + 1], data[off + 2], data[off + 3]]);
let t = crate::Transform {
position: voltex_math::Vec3::new(f(0), f(4), f(8)),
rotation: voltex_math::Vec3::new(f(12), f(16), f(20)),
scale: voltex_math::Vec3::new(f(24), f(28), f(32)),
};
world.add(entity, t);
Ok(())
}
// ── Tag: UTF-8 string bytes ─────────────────────────────────────────
fn serialize_tag(world: &World, entity: Entity) -> Option<Vec<u8>> {
let tag = world.get::<crate::scene::Tag>(entity)?;
Some(tag.0.as_bytes().to_vec())
}
fn deserialize_tag(world: &mut World, entity: Entity, data: &[u8]) -> Result<(), String> {
let s = std::str::from_utf8(data).map_err(|_| "Invalid UTF-8 in tag".to_string())?;
world.add(entity, crate::scene::Tag(s.to_string()));
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{World, Transform};
use voltex_math::Vec3;
#[test]
fn test_register_and_find() {
let mut registry = ComponentRegistry::new();
registry.register_defaults();
assert!(registry.find("transform").is_some());
assert!(registry.find("tag").is_some());
assert!(registry.find("nonexistent").is_none());
}
#[test]
fn test_entries_count() {
let mut registry = ComponentRegistry::new();
registry.register_defaults();
assert_eq!(registry.entries().len(), 2);
}
#[test]
fn test_serialize_transform() {
let mut registry = ComponentRegistry::new();
registry.register_defaults();
let mut world = World::new();
let e = world.spawn();
world.add(e, Transform::from_position(Vec3::new(1.0, 2.0, 3.0)));
let entry = registry.find("transform").unwrap();
let data = (entry.serialize)(&world, e);
assert!(data.is_some());
assert_eq!(data.unwrap().len(), 36); // 9 f32s * 4 bytes
}
#[test]
fn test_serialize_missing_component() {
let mut registry = ComponentRegistry::new();
registry.register_defaults();
let mut world = World::new();
let e = world.spawn();
// no Transform added
let entry = registry.find("transform").unwrap();
assert!((entry.serialize)(&world, e).is_none());
}
#[test]
fn test_roundtrip_transform() {
let mut registry = ComponentRegistry::new();
registry.register_defaults();
let mut world = World::new();
let e = world.spawn();
world.add(e, Transform {
position: Vec3::new(1.0, 2.0, 3.0),
rotation: Vec3::new(0.1, 0.2, 0.3),
scale: Vec3::new(4.0, 5.0, 6.0),
});
let entry = registry.find("transform").unwrap();
let data = (entry.serialize)(&world, e).unwrap();
let mut world2 = World::new();
let e2 = world2.spawn();
(entry.deserialize)(&mut world2, e2, &data).unwrap();
let t = world2.get::<Transform>(e2).unwrap();
assert!((t.position.x - 1.0).abs() < 1e-6);
assert!((t.position.y - 2.0).abs() < 1e-6);
assert!((t.position.z - 3.0).abs() < 1e-6);
assert!((t.rotation.x - 0.1).abs() < 1e-6);
assert!((t.scale.x - 4.0).abs() < 1e-6);
}
#[test]
fn test_roundtrip_tag() {
let mut registry = ComponentRegistry::new();
registry.register_defaults();
let mut world = World::new();
let e = world.spawn();
world.add(e, crate::scene::Tag("hello world".to_string()));
let entry = registry.find("tag").unwrap();
let data = (entry.serialize)(&world, e).unwrap();
let mut world2 = World::new();
let e2 = world2.spawn();
(entry.deserialize)(&mut world2, e2, &data).unwrap();
let tag = world2.get::<crate::scene::Tag>(e2).unwrap();
assert_eq!(tag.0, "hello world");
}
#[test]
fn test_deserialize_transform_too_short() {
let mut world = World::new();
let e = world.spawn();
let result = deserialize_transform(&mut world, e, &[0u8; 10]);
assert!(result.is_err());
}
}

View File

@@ -0,0 +1,476 @@
/// Mini JSON writer and parser for scene serialization.
/// No external dependencies — self-contained within voltex_ecs.
#[derive(Debug, Clone, PartialEq)]
pub enum JsonVal {
Null,
Bool(bool),
Number(f64),
Str(String),
Array(Vec<JsonVal>),
Object(Vec<(String, JsonVal)>),
}
// ── Writer helpers ──────────────────────────────────────────────────
pub fn json_write_null() -> String {
"null".to_string()
}
pub fn json_write_f32(v: f32) -> String {
// Emit integer form when the value has no fractional part
if v.fract() == 0.0 && v.abs() < 1e15 {
format!("{}", v as i64)
} else {
format!("{}", v)
}
}
pub fn json_write_string(s: &str) -> String {
let mut out = String::with_capacity(s.len() + 2);
out.push('"');
for c in s.chars() {
match c {
'"' => out.push_str("\\\""),
'\\' => out.push_str("\\\\"),
'\n' => out.push_str("\\n"),
'\r' => out.push_str("\\r"),
'\t' => out.push_str("\\t"),
_ => out.push(c),
}
}
out.push('"');
out
}
/// Write a JSON array from pre-formatted element strings.
pub fn json_write_array(elements: &[&str]) -> String {
let mut out = String::from("[");
for (i, elem) in elements.iter().enumerate() {
if i > 0 {
out.push(',');
}
out.push_str(elem);
}
out.push(']');
out
}
/// Write a JSON object from (key, pre-formatted-value) pairs.
pub fn json_write_object(pairs: &[(&str, &str)]) -> String {
let mut out = String::from("{");
for (i, (key, val)) in pairs.iter().enumerate() {
if i > 0 {
out.push(',');
}
out.push_str(&json_write_string(key));
out.push(':');
out.push_str(val);
}
out.push('}');
out
}
// ── Accessors on JsonVal ────────────────────────────────────────────
impl JsonVal {
pub fn get(&self, key: &str) -> Option<&JsonVal> {
match self {
JsonVal::Object(pairs) => pairs.iter().find(|(k, _)| k == key).map(|(_, v)| v),
_ => None,
}
}
pub fn as_f64(&self) -> Option<f64> {
match self {
JsonVal::Number(n) => Some(*n),
_ => None,
}
}
pub fn as_str(&self) -> Option<&str> {
match self {
JsonVal::Str(s) => Some(s.as_str()),
_ => None,
}
}
pub fn as_array(&self) -> Option<&Vec<JsonVal>> {
match self {
JsonVal::Array(a) => Some(a),
_ => None,
}
}
pub fn as_object(&self) -> Option<&Vec<(String, JsonVal)>> {
match self {
JsonVal::Object(o) => Some(o),
_ => None,
}
}
pub fn as_bool(&self) -> Option<bool> {
match self {
JsonVal::Bool(b) => Some(*b),
_ => None,
}
}
}
// ── Parser (recursive descent) ──────────────────────────────────────
pub fn json_parse(input: &str) -> Result<JsonVal, String> {
let bytes = input.as_bytes();
let (val, pos) = parse_value(bytes, skip_ws(bytes, 0))?;
let pos = skip_ws(bytes, pos);
if pos != bytes.len() {
return Err(format!("Unexpected trailing content at position {}", pos));
}
Ok(val)
}
fn skip_ws(b: &[u8], mut pos: usize) -> usize {
while pos < b.len() && matches!(b[pos], b' ' | b'\t' | b'\n' | b'\r') {
pos += 1;
}
pos
}
fn parse_value(b: &[u8], pos: usize) -> Result<(JsonVal, usize), String> {
if pos >= b.len() {
return Err("Unexpected end of input".into());
}
match b[pos] {
b'"' => parse_string(b, pos),
b'{' => parse_object(b, pos),
b'[' => parse_array(b, pos),
b't' | b'f' => parse_bool(b, pos),
b'n' => parse_null(b, pos),
_ => parse_number(b, pos),
}
}
fn parse_string(b: &[u8], pos: usize) -> Result<(JsonVal, usize), String> {
let (s, end) = read_string(b, pos)?;
Ok((JsonVal::Str(s), end))
}
fn read_string(b: &[u8], pos: usize) -> Result<(String, usize), String> {
if pos >= b.len() || b[pos] != b'"' {
return Err(format!("Expected '\"' at position {}", pos));
}
let mut i = pos + 1;
let mut s = String::new();
while i < b.len() {
match b[i] {
b'"' => return Ok((s, i + 1)),
b'\\' => {
i += 1;
if i >= b.len() {
return Err("Unexpected end in string escape".into());
}
match b[i] {
b'"' => s.push('"'),
b'\\' => s.push('\\'),
b'/' => s.push('/'),
b'n' => s.push('\n'),
b'r' => s.push('\r'),
b't' => s.push('\t'),
_ => {
s.push('\\');
s.push(b[i] as char);
}
}
i += 1;
}
ch => {
s.push(ch as char);
i += 1;
}
}
}
Err("Unterminated string".into())
}
fn parse_number(b: &[u8], pos: usize) -> Result<(JsonVal, usize), String> {
let mut i = pos;
// optional minus
if i < b.len() && b[i] == b'-' {
i += 1;
}
// digits
while i < b.len() && b[i].is_ascii_digit() {
i += 1;
}
// fractional
if i < b.len() && b[i] == b'.' {
i += 1;
while i < b.len() && b[i].is_ascii_digit() {
i += 1;
}
}
// exponent
if i < b.len() && (b[i] == b'e' || b[i] == b'E') {
i += 1;
if i < b.len() && (b[i] == b'+' || b[i] == b'-') {
i += 1;
}
while i < b.len() && b[i].is_ascii_digit() {
i += 1;
}
}
if i == pos {
return Err(format!("Expected number at position {}", pos));
}
let s = std::str::from_utf8(&b[pos..i]).unwrap();
let n: f64 = s.parse().map_err(|e| format!("Invalid number '{}': {}", s, e))?;
Ok((JsonVal::Number(n), i))
}
fn parse_bool(b: &[u8], pos: usize) -> Result<(JsonVal, usize), String> {
if b[pos..].starts_with(b"true") {
Ok((JsonVal::Bool(true), pos + 4))
} else if b[pos..].starts_with(b"false") {
Ok((JsonVal::Bool(false), pos + 5))
} else {
Err(format!("Expected bool at position {}", pos))
}
}
fn parse_null(b: &[u8], pos: usize) -> Result<(JsonVal, usize), String> {
if b[pos..].starts_with(b"null") {
Ok((JsonVal::Null, pos + 4))
} else {
Err(format!("Expected null at position {}", pos))
}
}
fn parse_array(b: &[u8], pos: usize) -> Result<(JsonVal, usize), String> {
let mut i = pos + 1; // skip '['
let mut arr = Vec::new();
i = skip_ws(b, i);
if i < b.len() && b[i] == b']' {
return Ok((JsonVal::Array(arr), i + 1));
}
loop {
i = skip_ws(b, i);
let (val, next) = parse_value(b, i)?;
arr.push(val);
i = skip_ws(b, next);
if i >= b.len() {
return Err("Unterminated array".into());
}
if b[i] == b']' {
return Ok((JsonVal::Array(arr), i + 1));
}
if b[i] != b',' {
return Err(format!("Expected ',' or ']' at position {}", i));
}
i += 1; // skip ','
}
}
fn parse_object(b: &[u8], pos: usize) -> Result<(JsonVal, usize), String> {
let mut i = pos + 1; // skip '{'
let mut pairs = Vec::new();
i = skip_ws(b, i);
if i < b.len() && b[i] == b'}' {
return Ok((JsonVal::Object(pairs), i + 1));
}
loop {
i = skip_ws(b, i);
let (key, next) = read_string(b, i)?;
i = skip_ws(b, next);
if i >= b.len() || b[i] != b':' {
return Err(format!("Expected ':' at position {}", i));
}
i = skip_ws(b, i + 1);
let (val, next) = parse_value(b, i)?;
pairs.push((key, val));
i = skip_ws(b, next);
if i >= b.len() {
return Err("Unterminated object".into());
}
if b[i] == b'}' {
return Ok((JsonVal::Object(pairs), i + 1));
}
if b[i] != b',' {
return Err(format!("Expected ',' or '}}' at position {}", i));
}
i += 1; // skip ','
}
}
// ── JsonVal -> String serialization ─────────────────────────────────
impl JsonVal {
/// Serialize this JsonVal to a compact JSON string.
pub fn to_json_string(&self) -> String {
match self {
JsonVal::Null => "null".to_string(),
JsonVal::Bool(b) => if *b { "true" } else { "false" }.to_string(),
JsonVal::Number(n) => {
if n.fract() == 0.0 && n.abs() < 1e15 {
format!("{}", *n as i64)
} else {
format!("{}", n)
}
}
JsonVal::Str(s) => json_write_string(s),
JsonVal::Array(arr) => {
let mut out = String::from("[");
for (i, v) in arr.iter().enumerate() {
if i > 0 {
out.push(',');
}
out.push_str(&v.to_json_string());
}
out.push(']');
out
}
JsonVal::Object(pairs) => {
let mut out = String::from("{");
for (i, (k, v)) in pairs.iter().enumerate() {
if i > 0 {
out.push(',');
}
out.push_str(&json_write_string(k));
out.push(':');
out.push_str(&v.to_json_string());
}
out.push('}');
out
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_write_null() {
assert_eq!(json_write_null(), "null");
}
#[test]
fn test_write_number() {
assert_eq!(json_write_f32(3.14), "3.14");
assert_eq!(json_write_f32(1.0), "1");
}
#[test]
fn test_write_string() {
assert_eq!(json_write_string("hello"), "\"hello\"");
assert_eq!(json_write_string("a\"b"), "\"a\\\"b\"");
}
#[test]
fn test_write_array() {
assert_eq!(json_write_array(&["1", "2", "3"]), "[1,2,3]");
}
#[test]
fn test_write_object() {
let pairs = vec![("name", "\"test\""), ("value", "42")];
let result = json_write_object(&pairs);
assert_eq!(result, r#"{"name":"test","value":42}"#);
}
#[test]
fn test_parse_number() {
match json_parse("42.5").unwrap() {
JsonVal::Number(n) => assert!((n - 42.5).abs() < 1e-10),
_ => panic!("expected number"),
}
}
#[test]
fn test_parse_negative_number() {
match json_parse("-3.14").unwrap() {
JsonVal::Number(n) => assert!((n - (-3.14)).abs() < 1e-10),
_ => panic!("expected number"),
}
}
#[test]
fn test_parse_string() {
assert_eq!(json_parse("\"hello\"").unwrap(), JsonVal::Str("hello".into()));
}
#[test]
fn test_parse_string_with_escapes() {
assert_eq!(
json_parse(r#""a\"b\\c""#).unwrap(),
JsonVal::Str("a\"b\\c".into())
);
}
#[test]
fn test_parse_array() {
match json_parse("[1,2,3]").unwrap() {
JsonVal::Array(a) => assert_eq!(a.len(), 3),
_ => panic!("expected array"),
}
}
#[test]
fn test_parse_empty_array() {
match json_parse("[]").unwrap() {
JsonVal::Array(a) => assert_eq!(a.len(), 0),
_ => panic!("expected array"),
}
}
#[test]
fn test_parse_object() {
let val = json_parse(r#"{"x":1,"y":2}"#).unwrap();
assert!(matches!(val, JsonVal::Object(_)));
assert_eq!(val.get("x").unwrap().as_f64().unwrap(), 1.0);
assert_eq!(val.get("y").unwrap().as_f64().unwrap(), 2.0);
}
#[test]
fn test_parse_null() {
assert_eq!(json_parse("null").unwrap(), JsonVal::Null);
}
#[test]
fn test_parse_bool() {
assert_eq!(json_parse("true").unwrap(), JsonVal::Bool(true));
assert_eq!(json_parse("false").unwrap(), JsonVal::Bool(false));
}
#[test]
fn test_parse_nested() {
let val = json_parse(r#"{"a":[1,2],"b":{"c":3}}"#).unwrap();
assert!(matches!(val, JsonVal::Object(_)));
let arr = val.get("a").unwrap().as_array().unwrap();
assert_eq!(arr.len(), 2);
let inner = val.get("b").unwrap();
assert_eq!(inner.get("c").unwrap().as_f64().unwrap(), 3.0);
}
#[test]
fn test_parse_whitespace() {
let val = json_parse(" { \"a\" : 1 , \"b\" : [ 2 , 3 ] } ").unwrap();
assert!(matches!(val, JsonVal::Object(_)));
}
#[test]
fn test_json_val_to_json_string_roundtrip() {
let val = JsonVal::Object(vec![
("name".into(), JsonVal::Str("test".into())),
("count".into(), JsonVal::Number(42.0)),
("items".into(), JsonVal::Array(vec![
JsonVal::Number(1.0),
JsonVal::Null,
JsonVal::Bool(true),
])),
]);
let s = val.to_json_string();
let parsed = json_parse(&s).unwrap();
assert_eq!(parsed, val);
}
}

View File

@@ -5,6 +5,10 @@ pub mod transform;
pub mod hierarchy;
pub mod world_transform;
pub mod scene;
pub mod scheduler;
pub mod json;
pub mod component_registry;
pub mod binary_scene;
pub use entity::{Entity, EntityAllocator};
pub use sparse_set::SparseSet;
@@ -12,4 +16,7 @@ pub use world::World;
pub use transform::Transform;
pub use hierarchy::{Parent, Children, add_child, remove_child, despawn_recursive, roots};
pub use world_transform::{WorldTransform, propagate_transforms};
pub use scene::{Tag, serialize_scene, deserialize_scene};
pub use scene::{Tag, serialize_scene, deserialize_scene, serialize_scene_json, deserialize_scene_json};
pub use scheduler::{Scheduler, System};
pub use component_registry::ComponentRegistry;
pub use binary_scene::{serialize_scene_binary, deserialize_scene_binary};

View File

@@ -4,6 +4,8 @@ use crate::entity::Entity;
use crate::world::World;
use crate::transform::Transform;
use crate::hierarchy::{add_child, Parent};
use crate::component_registry::ComponentRegistry;
use crate::json::{self, JsonVal};
/// String tag for entity identification.
#[derive(Debug, Clone)]
@@ -152,10 +154,160 @@ pub fn deserialize_scene(world: &mut World, source: &str) -> Vec<Entity> {
created
}
// ── Hex encoding helpers ────────────────────────────────────────────
fn bytes_to_hex(data: &[u8]) -> String {
let mut s = String::with_capacity(data.len() * 2);
for &b in data {
s.push_str(&format!("{:02x}", b));
}
s
}
fn hex_to_bytes(hex: &str) -> Result<Vec<u8>, String> {
if hex.len() % 2 != 0 {
return Err("Hex string has odd length".into());
}
let mut bytes = Vec::with_capacity(hex.len() / 2);
let mut i = 0;
let chars: Vec<u8> = hex.bytes().collect();
while i < chars.len() {
let hi = hex_digit(chars[i])?;
let lo = hex_digit(chars[i + 1])?;
bytes.push((hi << 4) | lo);
i += 2;
}
Ok(bytes)
}
fn hex_digit(c: u8) -> Result<u8, String> {
match c {
b'0'..=b'9' => Ok(c - b'0'),
b'a'..=b'f' => Ok(c - b'a' + 10),
b'A'..=b'F' => Ok(c - b'A' + 10),
_ => Err(format!("Invalid hex digit: {}", c as char)),
}
}
// ── JSON scene serialization ────────────────────────────────────────
/// Serialize all entities with a Transform to JSON format using the component registry.
/// Format: {"version":1,"entities":[{"parent":null_or_idx,"components":{"name":"hex",...}}]}
pub fn serialize_scene_json(world: &World, registry: &ComponentRegistry) -> String {
let entities_with_transform: Vec<(Entity, Transform)> = world
.query::<Transform>()
.map(|(e, t)| (e, *t))
.collect();
let entity_to_index: HashMap<Entity, usize> = entities_with_transform
.iter()
.enumerate()
.map(|(i, (e, _))| (*e, i))
.collect();
// Build entity JSON values
let mut entity_vals = Vec::new();
for (entity, _) in &entities_with_transform {
// Parent index
let parent_val = if let Some(parent_comp) = world.get::<Parent>(*entity) {
if let Some(&idx) = entity_to_index.get(&parent_comp.0) {
JsonVal::Number(idx as f64)
} else {
JsonVal::Null
}
} else {
JsonVal::Null
};
// Components
let mut comp_pairs = Vec::new();
for entry in registry.entries() {
if let Some(data) = (entry.serialize)(world, *entity) {
comp_pairs.push((entry.name.clone(), JsonVal::Str(bytes_to_hex(&data))));
}
}
entity_vals.push(JsonVal::Object(vec![
("parent".into(), parent_val),
("components".into(), JsonVal::Object(comp_pairs)),
]));
}
let root = JsonVal::Object(vec![
("version".into(), JsonVal::Number(1.0)),
("entities".into(), JsonVal::Array(entity_vals)),
]);
root.to_json_string()
}
/// Deserialize entities from a JSON scene string.
pub fn deserialize_scene_json(
world: &mut World,
json_str: &str,
registry: &ComponentRegistry,
) -> Result<Vec<Entity>, String> {
let root = json::json_parse(json_str)?;
let version = root.get("version")
.and_then(|v| v.as_f64())
.ok_or("Missing or invalid 'version'")?;
if version as u32 != 1 {
return Err(format!("Unsupported version: {}", version));
}
let entities_arr = root.get("entities")
.and_then(|v| v.as_array())
.ok_or("Missing or invalid 'entities'")?;
// First pass: create entities and deserialize components
let mut created: Vec<Entity> = Vec::with_capacity(entities_arr.len());
let mut parent_indices: Vec<Option<usize>> = Vec::with_capacity(entities_arr.len());
for entity_val in entities_arr {
let entity = world.spawn();
// Parse parent index
let parent_idx = match entity_val.get("parent") {
Some(JsonVal::Number(n)) => Some(*n as usize),
_ => None,
};
parent_indices.push(parent_idx);
// Deserialize components
if let Some(comps) = entity_val.get("components").and_then(|v| v.as_object()) {
for (name, hex_val) in comps {
if let Some(hex_str) = hex_val.as_str() {
let data = hex_to_bytes(hex_str)?;
if let Some(entry) = registry.find(name) {
(entry.deserialize)(world, entity, &data)?;
}
}
}
}
created.push(entity);
}
// Second pass: apply parent relationships
for (child_idx, parent_idx_opt) in parent_indices.iter().enumerate() {
if let Some(parent_idx) = parent_idx_opt {
if *parent_idx < created.len() {
let child_entity = created[child_idx];
let parent_entity = created[*parent_idx];
add_child(world, parent_entity, child_entity);
}
}
}
Ok(created)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::hierarchy::{add_child, roots, Parent};
use crate::component_registry::ComponentRegistry;
use voltex_math::Vec3;
#[test]
@@ -270,4 +422,75 @@ entity 2
let scene_roots = roots(&world);
assert_eq!(scene_roots.len(), 2, "should have exactly 2 root entities");
}
// ── JSON scene tests ────────────────────────────────────────────
#[test]
fn test_hex_roundtrip() {
let data = vec![0u8, 1, 15, 16, 255];
let hex = bytes_to_hex(&data);
assert_eq!(hex, "00010f10ff");
let back = hex_to_bytes(&hex).unwrap();
assert_eq!(back, data);
}
#[test]
fn test_json_roundtrip() {
let mut registry = ComponentRegistry::new();
registry.register_defaults();
let mut world = World::new();
let e = world.spawn();
world.add(e, Transform::from_position(Vec3::new(1.0, 2.0, 3.0)));
world.add(e, Tag("player".into()));
let json = serialize_scene_json(&world, &registry);
assert!(json.contains("\"version\":1"));
let mut world2 = World::new();
let entities = deserialize_scene_json(&mut world2, &json, &registry).unwrap();
assert_eq!(entities.len(), 1);
let t = world2.get::<Transform>(entities[0]).unwrap();
assert!((t.position.x - 1.0).abs() < 1e-4);
assert!((t.position.y - 2.0).abs() < 1e-4);
assert!((t.position.z - 3.0).abs() < 1e-4);
let tag = world2.get::<Tag>(entities[0]).unwrap();
assert_eq!(tag.0, "player");
}
#[test]
fn test_json_with_parent() {
let mut registry = ComponentRegistry::new();
registry.register_defaults();
let mut world = World::new();
let parent = world.spawn();
let child = world.spawn();
world.add(parent, Transform::new());
world.add(child, Transform::new());
add_child(&mut world, parent, child);
let json = serialize_scene_json(&world, &registry);
let mut world2 = World::new();
let entities = deserialize_scene_json(&mut world2, &json, &registry).unwrap();
assert_eq!(entities.len(), 2);
assert!(world2.get::<Parent>(entities[1]).is_some());
let parent_comp = world2.get::<Parent>(entities[1]).unwrap();
assert_eq!(parent_comp.0, entities[0]);
}
#[test]
fn test_json_multiple_entities() {
let mut registry = ComponentRegistry::new();
registry.register_defaults();
let mut world = World::new();
for i in 0..5 {
let e = world.spawn();
world.add(e, Transform::from_position(Vec3::new(i as f32, 0.0, 0.0)));
world.add(e, Tag(format!("entity_{}", i)));
}
let json = serialize_scene_json(&world, &registry);
let mut world2 = World::new();
let entities = deserialize_scene_json(&mut world2, &json, &registry).unwrap();
assert_eq!(entities.len(), 5);
}
}

View File

@@ -0,0 +1,121 @@
use crate::World;
/// A system that can be run on the world.
pub trait System {
fn run(&mut self, world: &mut World);
}
/// Blanket impl: any FnMut(&mut World) is a System.
impl<F: FnMut(&mut World)> System for F {
fn run(&mut self, world: &mut World) {
(self)(world);
}
}
/// Runs registered systems in order.
pub struct Scheduler {
systems: Vec<Box<dyn System>>,
}
impl Scheduler {
pub fn new() -> Self {
Self { systems: Vec::new() }
}
/// Add a system. Systems run in the order they are added.
pub fn add<S: System + 'static>(&mut self, system: S) -> &mut Self {
self.systems.push(Box::new(system));
self
}
/// Run all systems in registration order.
pub fn run_all(&mut self, world: &mut World) {
for system in &mut self.systems {
system.run(world);
}
}
/// Number of registered systems.
pub fn len(&self) -> usize {
self.systems.len()
}
pub fn is_empty(&self) -> bool {
self.systems.is_empty()
}
}
impl Default for Scheduler {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::World;
#[derive(Debug, PartialEq)]
struct Counter(u32);
#[test]
fn test_scheduler_runs_in_order() {
let mut world = World::new();
let e = world.spawn();
world.add(e, Counter(0));
let mut scheduler = Scheduler::new();
scheduler.add(|world: &mut World| {
let e = world.query::<Counter>().next().unwrap().0;
let c = world.get_mut::<Counter>(e).unwrap();
c.0 += 1; // 0 -> 1
});
scheduler.add(|world: &mut World| {
let e = world.query::<Counter>().next().unwrap().0;
let c = world.get_mut::<Counter>(e).unwrap();
c.0 *= 10; // 1 -> 10
});
scheduler.run_all(&mut world);
let c = world.get::<Counter>(e).unwrap();
assert_eq!(c.0, 10); // proves order: add first, then multiply
}
#[test]
fn test_scheduler_empty() {
let mut world = World::new();
let mut scheduler = Scheduler::new();
scheduler.run_all(&mut world); // should not panic
}
#[test]
fn test_scheduler_multiple_runs() {
let mut world = World::new();
let e = world.spawn();
world.add(e, Counter(0));
let mut scheduler = Scheduler::new();
scheduler.add(|world: &mut World| {
let e = world.query::<Counter>().next().unwrap().0;
let c = world.get_mut::<Counter>(e).unwrap();
c.0 += 1;
});
scheduler.run_all(&mut world);
scheduler.run_all(&mut world);
scheduler.run_all(&mut world);
assert_eq!(world.get::<Counter>(e).unwrap().0, 3);
}
#[test]
fn test_scheduler_add_chaining() {
let mut scheduler = Scheduler::new();
scheduler
.add(|_: &mut World| {})
.add(|_: &mut World| {});
assert_eq!(scheduler.len(), 2);
}
}

View File

@@ -5,6 +5,8 @@ pub struct SparseSet<T> {
sparse: Vec<Option<usize>>,
dense_entities: Vec<Entity>,
dense_data: Vec<T>,
ticks: Vec<u64>,
current_tick: u64,
}
impl<T> SparseSet<T> {
@@ -13,6 +15,8 @@ impl<T> SparseSet<T> {
sparse: Vec::new(),
dense_entities: Vec::new(),
dense_data: Vec::new(),
ticks: Vec::new(),
current_tick: 1,
}
}
@@ -27,11 +31,13 @@ impl<T> SparseSet<T> {
// Overwrite existing
self.dense_data[dense_idx] = value;
self.dense_entities[dense_idx] = entity;
self.ticks[dense_idx] = self.current_tick;
} else {
let dense_idx = self.dense_data.len();
self.sparse[id] = Some(dense_idx);
self.dense_entities.push(entity);
self.dense_data.push(value);
self.ticks.push(self.current_tick);
}
}
@@ -49,12 +55,14 @@ impl<T> SparseSet<T> {
if dense_idx == last_idx {
self.dense_entities.pop();
self.ticks.pop();
Some(self.dense_data.pop().unwrap())
} else {
// Swap with last
let swapped_entity = self.dense_entities[last_idx];
self.sparse[swapped_entity.id as usize] = Some(dense_idx);
self.dense_entities.swap_remove(dense_idx);
self.ticks.swap_remove(dense_idx);
Some(self.dense_data.swap_remove(dense_idx))
}
}
@@ -74,6 +82,7 @@ impl<T> SparseSet<T> {
if self.dense_entities[dense_idx] != entity {
return None;
}
self.ticks[dense_idx] = self.current_tick;
Some(&mut self.dense_data[dense_idx])
}
@@ -114,6 +123,29 @@ impl<T> SparseSet<T> {
pub fn data_mut(&mut self) -> &mut [T] {
&mut self.dense_data
}
/// Check if an entity's component was changed this tick.
pub fn is_changed(&self, entity: Entity) -> bool {
if let Some(&index) = self.sparse.get(entity.id as usize).and_then(|o| o.as_ref()) {
self.ticks[index] == self.current_tick
} else {
false
}
}
/// Advance the tick counter (call at end of frame).
pub fn increment_tick(&mut self) {
self.current_tick += 1;
}
/// Return entities changed this tick with their data.
pub fn iter_changed(&self) -> impl Iterator<Item = (Entity, &T)> + '_ {
self.dense_entities.iter()
.zip(self.dense_data.iter())
.zip(self.ticks.iter())
.filter(move |((_, _), &tick)| tick == self.current_tick)
.map(|((entity, data), _)| (*entity, data))
}
}
impl<T> Default for SparseSet<T> {
@@ -127,6 +159,7 @@ pub trait ComponentStorage: Any {
fn as_any_mut(&mut self) -> &mut dyn Any;
fn remove_entity(&mut self, entity: Entity);
fn storage_len(&self) -> usize;
fn increment_tick(&mut self);
}
impl<T: 'static> ComponentStorage for SparseSet<T> {
@@ -142,6 +175,9 @@ impl<T: 'static> ComponentStorage for SparseSet<T> {
fn storage_len(&self) -> usize {
self.dense_data.len()
}
fn increment_tick(&mut self) {
self.current_tick += 1;
}
}
#[cfg(test)]
@@ -235,6 +271,74 @@ mod tests {
assert!(!set.contains(e));
}
#[test]
fn test_insert_is_changed() {
let mut set = SparseSet::<u32>::new();
let e = make_entity(0, 0);
set.insert(e, 42);
assert!(set.is_changed(e));
}
#[test]
fn test_get_mut_marks_changed() {
let mut set = SparseSet::<u32>::new();
let e = make_entity(0, 0);
set.insert(e, 42);
set.increment_tick();
assert!(!set.is_changed(e));
let _ = set.get_mut(e);
assert!(set.is_changed(e));
}
#[test]
fn test_get_not_changed() {
let mut set = SparseSet::<u32>::new();
let e = make_entity(0, 0);
set.insert(e, 42);
set.increment_tick();
let _ = set.get(e);
assert!(!set.is_changed(e));
}
#[test]
fn test_clear_resets_changed() {
let mut set = SparseSet::<u32>::new();
let e = make_entity(0, 0);
set.insert(e, 42);
assert!(set.is_changed(e));
set.increment_tick();
assert!(!set.is_changed(e));
}
#[test]
fn test_iter_changed() {
let mut set = SparseSet::<u32>::new();
let e1 = make_entity(0, 0);
let e2 = make_entity(1, 0);
let e3 = make_entity(2, 0);
set.insert(e1, 10);
set.insert(e2, 20);
set.insert(e3, 30);
set.increment_tick();
let _ = set.get_mut(e2);
let changed: Vec<_> = set.iter_changed().collect();
assert_eq!(changed.len(), 1);
assert_eq!(changed[0].0.id, 1);
}
#[test]
fn test_remove_preserves_ticks() {
let mut set = SparseSet::<u32>::new();
let e1 = make_entity(0, 0);
let e2 = make_entity(1, 0);
set.insert(e1, 10);
set.insert(e2, 20);
set.increment_tick();
let _ = set.get_mut(e2);
set.remove(e1);
assert!(set.is_changed(e2));
}
#[test]
fn test_swap_remove_correctness() {
let mut set: SparseSet<i32> = SparseSet::new();

View File

@@ -114,6 +114,184 @@ impl World {
}
result
}
pub fn query3<A: 'static, B: 'static, C: 'static>(&self) -> Vec<(Entity, &A, &B, &C)> {
let a_storage = match self.storage::<A>() {
Some(s) => s,
None => return Vec::new(),
};
let b_storage = match self.storage::<B>() {
Some(s) => s,
None => return Vec::new(),
};
let c_storage = match self.storage::<C>() {
Some(s) => s,
None => return Vec::new(),
};
// Find the smallest storage to iterate
let a_len = a_storage.len();
let b_len = b_storage.len();
let c_len = c_storage.len();
let mut result = Vec::new();
if a_len <= b_len && a_len <= c_len {
for (entity, a) in a_storage.iter() {
if let (Some(b), Some(c)) = (b_storage.get(entity), c_storage.get(entity)) {
result.push((entity, a, b, c));
}
}
} else if b_len <= a_len && b_len <= c_len {
for (entity, b) in b_storage.iter() {
if let (Some(a), Some(c)) = (a_storage.get(entity), c_storage.get(entity)) {
result.push((entity, a, b, c));
}
}
} else {
for (entity, c) in c_storage.iter() {
if let (Some(a), Some(b)) = (a_storage.get(entity), b_storage.get(entity)) {
result.push((entity, a, b, c));
}
}
}
result
}
pub fn query4<A: 'static, B: 'static, C: 'static, D: 'static>(
&self,
) -> Vec<(Entity, &A, &B, &C, &D)> {
let a_storage = match self.storage::<A>() {
Some(s) => s,
None => return Vec::new(),
};
let b_storage = match self.storage::<B>() {
Some(s) => s,
None => return Vec::new(),
};
let c_storage = match self.storage::<C>() {
Some(s) => s,
None => return Vec::new(),
};
let d_storage = match self.storage::<D>() {
Some(s) => s,
None => return Vec::new(),
};
// Find the smallest storage to iterate
let a_len = a_storage.len();
let b_len = b_storage.len();
let c_len = c_storage.len();
let d_len = d_storage.len();
let mut result = Vec::new();
if a_len <= b_len && a_len <= c_len && a_len <= d_len {
for (entity, a) in a_storage.iter() {
if let (Some(b), Some(c), Some(d)) = (
b_storage.get(entity),
c_storage.get(entity),
d_storage.get(entity),
) {
result.push((entity, a, b, c, d));
}
}
} else if b_len <= a_len && b_len <= c_len && b_len <= d_len {
for (entity, b) in b_storage.iter() {
if let (Some(a), Some(c), Some(d)) = (
a_storage.get(entity),
c_storage.get(entity),
d_storage.get(entity),
) {
result.push((entity, a, b, c, d));
}
}
} else if c_len <= a_len && c_len <= b_len && c_len <= d_len {
for (entity, c) in c_storage.iter() {
if let (Some(a), Some(b), Some(d)) = (
a_storage.get(entity),
b_storage.get(entity),
d_storage.get(entity),
) {
result.push((entity, a, b, c, d));
}
}
} else {
for (entity, d) in d_storage.iter() {
if let (Some(a), Some(b), Some(c)) = (
a_storage.get(entity),
b_storage.get(entity),
c_storage.get(entity),
) {
result.push((entity, a, b, c, d));
}
}
}
result
}
/// Query entities whose component T was changed this tick.
pub fn query_changed<T: 'static>(&self) -> Vec<(Entity, &T)> {
if let Some(storage) = self.storages.get(&TypeId::of::<T>()) {
let set = storage.as_any().downcast_ref::<SparseSet<T>>().unwrap();
set.iter_changed().collect()
} else {
Vec::new()
}
}
/// Advance tick on all component storages (call at end of frame).
pub fn clear_changed(&mut self) {
for storage in self.storages.values_mut() {
storage.increment_tick();
}
}
pub fn has_component<T: 'static>(&self, entity: Entity) -> bool {
self.storage::<T>().map_or(false, |s| s.contains(entity))
}
/// Query entities that have component T AND also have component W.
pub fn query_with<T: 'static, W: 'static>(&self) -> Vec<(Entity, &T)> {
let t_storage = match self.storage::<T>() {
Some(s) => s,
None => return Vec::new(),
};
let mut result = Vec::new();
for (entity, data) in t_storage.iter() {
if self.has_component::<W>(entity) {
result.push((entity, data));
}
}
result
}
/// Query entities that have component T but NOT component W.
pub fn query_without<T: 'static, W: 'static>(&self) -> Vec<(Entity, &T)> {
let t_storage = match self.storage::<T>() {
Some(s) => s,
None => return Vec::new(),
};
let mut result = Vec::new();
for (entity, data) in t_storage.iter() {
if !self.has_component::<W>(entity) {
result.push((entity, data));
}
}
result
}
/// Query entities with components A and B, that also have component W.
pub fn query2_with<A: 'static, B: 'static, W: 'static>(&self) -> Vec<(Entity, &A, &B)> {
self.query2::<A, B>().into_iter()
.filter(|(e, _, _)| self.has_component::<W>(*e))
.collect()
}
/// Query entities with components A and B, that do NOT have component W.
pub fn query2_without<A: 'static, B: 'static, W: 'static>(&self) -> Vec<(Entity, &A, &B)> {
self.query2::<A, B>().into_iter()
.filter(|(e, _, _)| !self.has_component::<W>(*e))
.collect()
}
}
impl Default for World {
@@ -222,6 +400,180 @@ mod tests {
assert!(!entities.contains(&e2));
}
#[test]
fn test_query3() {
#[derive(Debug, PartialEq)]
struct Health(i32);
let mut world = World::new();
let e0 = world.spawn();
world.add(e0, Position { x: 1.0, y: 0.0 });
world.add(e0, Velocity { dx: 1.0, dy: 0.0 });
world.add(e0, Health(100));
let e1 = world.spawn();
world.add(e1, Position { x: 2.0, y: 0.0 });
world.add(e1, Velocity { dx: 2.0, dy: 0.0 });
// e1 has no Health
let results = world.query3::<Position, Velocity, Health>();
assert_eq!(results.len(), 1);
assert_eq!(results[0].0, e0);
}
#[test]
fn test_query4() {
#[derive(Debug, PartialEq)]
struct Health(i32);
#[derive(Debug, PartialEq)]
struct Tag(u8);
let mut world = World::new();
let e0 = world.spawn();
world.add(e0, Position { x: 1.0, y: 0.0 });
world.add(e0, Velocity { dx: 1.0, dy: 0.0 });
world.add(e0, Health(100));
world.add(e0, Tag(1));
let e1 = world.spawn();
world.add(e1, Position { x: 2.0, y: 0.0 });
world.add(e1, Velocity { dx: 2.0, dy: 0.0 });
world.add(e1, Health(50));
// e1 has no Tag
let e2 = world.spawn();
world.add(e2, Position { x: 3.0, y: 0.0 });
world.add(e2, Velocity { dx: 3.0, dy: 0.0 });
world.add(e2, Tag(2));
// e2 has no Health
let results = world.query4::<Position, Velocity, Health, Tag>();
assert_eq!(results.len(), 1);
assert_eq!(results[0].0, e0);
}
#[test]
fn test_has_component() {
let mut world = World::new();
let e = world.spawn();
world.add(e, Position { x: 1.0, y: 2.0 });
assert!(world.has_component::<Position>(e));
assert!(!world.has_component::<Velocity>(e));
}
#[test]
fn test_query_with() {
let mut world = World::new();
let e0 = world.spawn();
let e1 = world.spawn();
let e2 = world.spawn();
world.add(e0, Position { x: 1.0, y: 0.0 });
world.add(e0, Velocity { dx: 1.0, dy: 0.0 });
world.add(e1, Position { x: 2.0, y: 0.0 });
// e1 has Position but no Velocity
world.add(e2, Position { x: 3.0, y: 0.0 });
world.add(e2, Velocity { dx: 3.0, dy: 0.0 });
let results = world.query_with::<Position, Velocity>();
assert_eq!(results.len(), 2);
let entities: Vec<Entity> = results.iter().map(|(e, _)| *e).collect();
assert!(entities.contains(&e0));
assert!(entities.contains(&e2));
assert!(!entities.contains(&e1));
}
#[test]
fn test_query_without() {
let mut world = World::new();
let e0 = world.spawn();
let e1 = world.spawn();
let e2 = world.spawn();
world.add(e0, Position { x: 1.0, y: 0.0 });
world.add(e0, Velocity { dx: 1.0, dy: 0.0 });
world.add(e1, Position { x: 2.0, y: 0.0 });
// e1 has Position but no Velocity — should be included
world.add(e2, Position { x: 3.0, y: 0.0 });
world.add(e2, Velocity { dx: 3.0, dy: 0.0 });
let results = world.query_without::<Position, Velocity>();
assert_eq!(results.len(), 1);
assert_eq!(results[0].0, e1);
}
#[test]
fn test_query2_with() {
#[derive(Debug, PartialEq)]
struct Health(i32);
let mut world = World::new();
let e0 = world.spawn();
world.add(e0, Position { x: 1.0, y: 0.0 });
world.add(e0, Velocity { dx: 1.0, dy: 0.0 });
world.add(e0, Health(100));
let e1 = world.spawn();
world.add(e1, Position { x: 2.0, y: 0.0 });
world.add(e1, Velocity { dx: 2.0, dy: 0.0 });
// e1 has no Health
let results = world.query2_with::<Position, Velocity, Health>();
assert_eq!(results.len(), 1);
assert_eq!(results[0].0, e0);
}
#[test]
fn test_query2_without() {
#[derive(Debug, PartialEq)]
struct Health(i32);
let mut world = World::new();
let e0 = world.spawn();
world.add(e0, Position { x: 1.0, y: 0.0 });
world.add(e0, Velocity { dx: 1.0, dy: 0.0 });
world.add(e0, Health(100));
let e1 = world.spawn();
world.add(e1, Position { x: 2.0, y: 0.0 });
world.add(e1, Velocity { dx: 2.0, dy: 0.0 });
// e1 has no Health
let results = world.query2_without::<Position, Velocity, Health>();
assert_eq!(results.len(), 1);
assert_eq!(results[0].0, e1);
}
#[test]
fn test_query_changed() {
let mut world = World::new();
let e1 = world.spawn();
let e2 = world.spawn();
world.add(e1, 10u32);
world.add(e2, 20u32);
world.clear_changed();
if let Some(v) = world.get_mut::<u32>(e1) {
*v = 100;
}
let changed = world.query_changed::<u32>();
assert_eq!(changed.len(), 1);
assert_eq!(*changed[0].1, 100);
}
#[test]
fn test_clear_changed_all_storages() {
let mut world = World::new();
let e = world.spawn();
world.add(e, 42u32);
world.add(e, 3.14f32);
let changed_u32 = world.query_changed::<u32>();
assert_eq!(changed_u32.len(), 1);
world.clear_changed();
let changed_u32 = world.query_changed::<u32>();
assert_eq!(changed_u32.len(), 0);
let changed_f32 = world.query_changed::<f32>();
assert_eq!(changed_f32.len(), 0);
}
#[test]
fn test_entity_count() {
let mut world = World::new();

View File

@@ -0,0 +1,11 @@
[package]
name = "voltex_editor"
version = "0.1.0"
edition = "2021"
[dependencies]
bytemuck = { workspace = true }
voltex_math = { workspace = true }
voltex_ecs = { workspace = true }
voltex_renderer = { workspace = true }
wgpu = { workspace = true }

View File

@@ -0,0 +1,297 @@
use std::path::PathBuf;
use crate::ui_context::UiContext;
use crate::dock::Rect;
use crate::layout::LayoutState;
const COLOR_SELECTED: [u8; 4] = [0x44, 0x66, 0x88, 0xFF];
const COLOR_TEXT: [u8; 4] = [0xEE, 0xEE, 0xEE, 0xFF];
const PADDING: f32 = 4.0;
pub struct DirEntry {
pub name: String,
pub is_dir: bool,
pub size: u64,
}
pub struct AssetBrowser {
pub root: PathBuf,
pub current: PathBuf,
pub entries: Vec<DirEntry>,
pub selected_file: Option<String>,
}
pub fn format_size(bytes: u64) -> String {
if bytes < 1024 {
format!("{} B", bytes)
} else if bytes < 1024 * 1024 {
format!("{:.1} KB", bytes as f64 / 1024.0)
} else {
format!("{:.1} MB", bytes as f64 / (1024.0 * 1024.0))
}
}
impl AssetBrowser {
pub fn new(root: PathBuf) -> Self {
let root = std::fs::canonicalize(&root).unwrap_or(root);
let current = root.clone();
let mut browser = AssetBrowser {
root,
current,
entries: Vec::new(),
selected_file: None,
};
browser.refresh();
browser
}
pub fn refresh(&mut self) {
self.entries.clear();
self.selected_file = None;
if let Ok(read_dir) = std::fs::read_dir(&self.current) {
for entry in read_dir.flatten() {
let meta = entry.metadata().ok();
let is_dir = meta.as_ref().map_or(false, |m| m.is_dir());
let size = meta.as_ref().map_or(0, |m| m.len());
let name = entry.file_name().to_string_lossy().to_string();
self.entries.push(DirEntry { name, is_dir, size });
}
}
self.entries.sort_by(|a, b| {
b.is_dir.cmp(&a.is_dir).then(a.name.cmp(&b.name))
});
}
pub fn navigate_to(&mut self, dir_name: &str) {
let target = self.current.join(dir_name);
if target.starts_with(&self.root) && target.is_dir() {
self.current = target;
self.refresh();
}
}
pub fn go_up(&mut self) {
if self.current != self.root {
if let Some(parent) = self.current.parent() {
let parent = parent.to_path_buf();
if parent.starts_with(&self.root) || parent == self.root {
self.current = parent;
self.refresh();
}
}
}
}
pub fn relative_path(&self) -> String {
self.current
.strip_prefix(&self.root)
.unwrap_or(&self.current)
.to_string_lossy()
.to_string()
}
}
pub fn asset_browser_panel(
ui: &mut UiContext,
browser: &mut AssetBrowser,
rect: &Rect,
) {
ui.layout = LayoutState::new(rect.x + PADDING, rect.y + PADDING);
let path_text = format!("Path: /{}", browser.relative_path());
ui.text(&path_text);
// Go up button
if browser.current != browser.root {
if ui.button("[..]") {
browser.go_up();
return;
}
}
let gw = ui.font.glyph_width as f32;
let gh = ui.font.glyph_height as f32;
let line_h = gh + PADDING;
// Collect click action to avoid borrow conflict
let mut clicked_dir: Option<String> = None;
let entries_snapshot: Vec<(String, bool)> = browser.entries.iter()
.map(|e| (e.name.clone(), e.is_dir))
.collect();
for (name, is_dir) in &entries_snapshot {
let y = ui.layout.cursor_y;
let x = rect.x + PADDING;
// Highlight selected file
if !is_dir {
if browser.selected_file.as_deref() == Some(name.as_str()) {
ui.draw_list.add_rect(rect.x, y, rect.w, line_h, COLOR_SELECTED);
}
}
// Click
if ui.mouse_clicked && ui.mouse_in_rect(rect.x, y, rect.w, line_h) {
if *is_dir {
clicked_dir = Some(name.clone());
} else {
browser.selected_file = Some(name.clone());
}
}
// Label
let label = if *is_dir {
format!("[D] {}", name)
} else {
format!(" {}", name)
};
let text_y = y + (line_h - gh) * 0.5;
let mut cx = x;
for ch in label.chars() {
let (u0, v0, u1, v1) = ui.font.glyph_uv(ch);
ui.draw_list.add_rect_uv(cx, text_y, gw, gh, u0, v0, u1, v1, COLOR_TEXT);
cx += gw;
}
ui.layout.cursor_y += line_h;
}
if let Some(dir) = clicked_dir {
browser.navigate_to(&dir);
}
// File info
if let Some(ref file_name) = browser.selected_file.clone() {
ui.text("-- File Info --");
ui.text(&format!("Name: {}", file_name));
if let Some(entry) = browser.entries.iter().find(|e| e.name == *file_name) {
ui.text(&format!("Size: {}", format_size(entry.size)));
if let Some(dot_pos) = file_name.rfind('.') {
let ext = &file_name[dot_pos..];
ui.text(&format!("Type: {}", ext));
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs;
fn make_temp_dir(name: &str) -> PathBuf {
let dir = std::env::temp_dir().join(format!("voltex_ab_test_{}", name));
let _ = fs::remove_dir_all(&dir);
fs::create_dir_all(&dir).unwrap();
dir
}
#[test]
fn test_new_scans_entries() {
let dir = make_temp_dir("scan");
fs::write(dir.join("file1.txt"), "hello").unwrap();
fs::write(dir.join("file2.png"), "data").unwrap();
fs::create_dir_all(dir.join("subdir")).unwrap();
let browser = AssetBrowser::new(dir.clone());
assert_eq!(browser.entries.len(), 3);
let _ = fs::remove_dir_all(&dir);
}
#[test]
fn test_entries_sorted_dirs_first() {
let dir = make_temp_dir("sort");
fs::write(dir.join("zebra.txt"), "z").unwrap();
fs::write(dir.join("alpha.txt"), "a").unwrap();
fs::create_dir_all(dir.join("middle_dir")).unwrap();
let browser = AssetBrowser::new(dir.clone());
// Dir should come first
assert!(browser.entries[0].is_dir);
assert_eq!(browser.entries[0].name, "middle_dir");
// Then files alphabetically
assert_eq!(browser.entries[1].name, "alpha.txt");
assert_eq!(browser.entries[2].name, "zebra.txt");
let _ = fs::remove_dir_all(&dir);
}
#[test]
fn test_navigate_to() {
let dir = make_temp_dir("nav");
fs::create_dir_all(dir.join("sub")).unwrap();
fs::write(dir.join("sub").join("inner.txt"), "x").unwrap();
let mut browser = AssetBrowser::new(dir.clone());
browser.navigate_to("sub");
assert!(browser.current.ends_with("sub"));
assert_eq!(browser.entries.len(), 1);
assert_eq!(browser.entries[0].name, "inner.txt");
let _ = fs::remove_dir_all(&dir);
}
#[test]
fn test_go_up() {
let dir = make_temp_dir("goup");
fs::create_dir_all(dir.join("child")).unwrap();
let mut browser = AssetBrowser::new(dir.clone());
browser.navigate_to("child");
assert!(browser.current.ends_with("child"));
browser.go_up();
assert_eq!(browser.current, std::fs::canonicalize(&dir).unwrap());
let _ = fs::remove_dir_all(&dir);
}
#[test]
fn test_go_up_at_root() {
let dir = make_temp_dir("goup_root");
let browser_root = std::fs::canonicalize(&dir).unwrap();
let mut browser = AssetBrowser::new(dir.clone());
browser.go_up(); // should be no-op
assert_eq!(browser.current, browser_root);
let _ = fs::remove_dir_all(&dir);
}
#[test]
fn test_root_guard() {
let dir = make_temp_dir("guard");
let mut browser = AssetBrowser::new(dir.clone());
browser.navigate_to(".."); // should be rejected
assert_eq!(browser.current, std::fs::canonicalize(&dir).unwrap());
let _ = fs::remove_dir_all(&dir);
}
#[test]
fn test_format_size() {
assert_eq!(format_size(0), "0 B");
assert_eq!(format_size(500), "500 B");
assert_eq!(format_size(1024), "1.0 KB");
assert_eq!(format_size(1536), "1.5 KB");
assert_eq!(format_size(1048576), "1.0 MB");
}
#[test]
fn test_panel_draws_commands() {
let dir = make_temp_dir("panel");
fs::write(dir.join("test.txt"), "hello").unwrap();
let mut browser = AssetBrowser::new(dir.clone());
let mut ui = UiContext::new(800.0, 600.0);
let rect = Rect { x: 0.0, y: 0.0, w: 300.0, h: 400.0 };
ui.begin_frame(0.0, 0.0, false);
asset_browser_panel(&mut ui, &mut browser, &rect);
ui.end_frame();
assert!(ui.draw_list.commands.len() > 0);
let _ = fs::remove_dir_all(&dir);
}
}

View File

@@ -0,0 +1,558 @@
use crate::ui_context::UiContext;
const TAB_BAR_HEIGHT: f32 = 20.0;
const MIN_RATIO: f32 = 0.1;
const MAX_RATIO: f32 = 0.9;
const RESIZE_HANDLE_HALF: f32 = 3.0;
const GLYPH_W: f32 = 8.0;
const TAB_PADDING: f32 = 8.0;
#[derive(Clone, Copy, Debug)]
pub struct Rect {
pub x: f32,
pub y: f32,
pub w: f32,
pub h: f32,
}
impl Rect {
pub fn contains(&self, px: f32, py: f32) -> bool {
px >= self.x && px < self.x + self.w && py >= self.y && py < self.y + self.h
}
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Axis {
Horizontal,
Vertical,
}
pub enum DockNode {
Leaf { tabs: Vec<u32>, active: usize },
Split { axis: Axis, ratio: f32, children: [Box<DockNode>; 2] },
}
impl DockNode {
pub fn leaf(tabs: Vec<u32>) -> Self {
DockNode::Leaf { tabs, active: 0 }
}
pub fn split(axis: Axis, ratio: f32, a: DockNode, b: DockNode) -> Self {
DockNode::Split {
axis,
ratio: ratio.clamp(MIN_RATIO, MAX_RATIO),
children: [Box::new(a), Box::new(b)],
}
}
}
pub struct LeafLayout {
pub leaf_index: usize,
pub tabs: Vec<u32>,
pub active: usize,
pub tab_bar_rect: Rect,
pub content_rect: Rect,
}
struct SplitLayout {
rect: Rect,
axis: Axis,
boundary: f32,
path: Vec<usize>,
}
struct ResizeState {
path: Vec<usize>,
axis: Axis,
origin: f32,
size: f32,
}
enum UpdateAction {
None,
StartResize(ResizeState),
SetActiveTab { leaf_index: usize, new_active: usize },
}
pub struct DockTree {
root: DockNode,
names: Vec<&'static str>,
cached_leaves: Vec<LeafLayout>,
cached_splits: Vec<SplitLayout>,
resizing: Option<ResizeState>,
prev_mouse_down: bool,
}
fn layout_recursive(
node: &DockNode,
rect: Rect,
path: &mut Vec<usize>,
leaf_counter: &mut usize,
leaves: &mut Vec<LeafLayout>,
splits: &mut Vec<SplitLayout>,
) {
match node {
DockNode::Leaf { tabs, active } => {
assert!(!tabs.is_empty(), "DockNode::Leaf must have at least one tab");
let idx = *leaf_counter;
*leaf_counter += 1;
leaves.push(LeafLayout {
leaf_index: idx,
tabs: tabs.clone(),
active: (*active).min(tabs.len().saturating_sub(1)),
tab_bar_rect: Rect { x: rect.x, y: rect.y, w: rect.w, h: TAB_BAR_HEIGHT },
content_rect: Rect {
x: rect.x,
y: rect.y + TAB_BAR_HEIGHT,
w: rect.w,
h: (rect.h - TAB_BAR_HEIGHT).max(0.0),
},
});
}
DockNode::Split { axis, ratio, children } => {
let (r1, r2, boundary) = match axis {
Axis::Horizontal => {
let w1 = rect.w * ratio;
let b = rect.x + w1;
(
Rect { x: rect.x, y: rect.y, w: w1, h: rect.h },
Rect { x: b, y: rect.y, w: rect.w - w1, h: rect.h },
b,
)
}
Axis::Vertical => {
let h1 = rect.h * ratio;
let b = rect.y + h1;
(
Rect { x: rect.x, y: rect.y, w: rect.w, h: h1 },
Rect { x: rect.x, y: b, w: rect.w, h: rect.h - h1 },
b,
)
}
};
splits.push(SplitLayout { rect, axis: *axis, boundary, path: path.clone() });
path.push(0);
layout_recursive(&children[0], r1, path, leaf_counter, leaves, splits);
path.pop();
path.push(1);
layout_recursive(&children[1], r2, path, leaf_counter, leaves, splits);
path.pop();
}
}
}
impl DockTree {
pub fn new(root: DockNode, names: Vec<&'static str>) -> Self {
DockTree {
root,
names,
cached_leaves: Vec::new(),
cached_splits: Vec::new(),
resizing: None,
prev_mouse_down: false,
}
}
pub fn update(&mut self, mouse_x: f32, mouse_y: f32, mouse_down: bool) {
let just_clicked = mouse_down && !self.prev_mouse_down;
self.prev_mouse_down = mouse_down;
// Active resize in progress
if let Some(ref state) = self.resizing {
if mouse_down {
let new_ratio = match state.axis {
Axis::Horizontal => (mouse_x - state.origin) / state.size,
Axis::Vertical => (mouse_y - state.origin) / state.size,
};
let clamped = new_ratio.clamp(MIN_RATIO, MAX_RATIO);
let path = state.path.clone();
Self::set_ratio_at_path(&mut self.root, &path, clamped);
} else {
self.resizing = None;
}
return;
}
if !just_clicked { return; }
let action = self.find_click_action(mouse_x, mouse_y);
match action {
UpdateAction::StartResize(state) => { self.resizing = Some(state); }
UpdateAction::SetActiveTab { leaf_index, new_active } => {
Self::set_active_nth(&mut self.root, leaf_index, new_active, &mut 0);
}
UpdateAction::None => {}
}
}
fn find_click_action(&self, mx: f32, my: f32) -> UpdateAction {
// Check resize handles first (priority over tab clicks)
for split in &self.cached_splits {
let hit = match split.axis {
Axis::Horizontal => {
mx >= split.boundary - RESIZE_HANDLE_HALF
&& mx <= split.boundary + RESIZE_HANDLE_HALF
&& my >= split.rect.y
&& my < split.rect.y + split.rect.h
}
Axis::Vertical => {
my >= split.boundary - RESIZE_HANDLE_HALF
&& my <= split.boundary + RESIZE_HANDLE_HALF
&& mx >= split.rect.x
&& mx < split.rect.x + split.rect.w
}
};
if hit {
let (origin, size) = match split.axis {
Axis::Horizontal => (split.rect.x, split.rect.w),
Axis::Vertical => (split.rect.y, split.rect.h),
};
return UpdateAction::StartResize(ResizeState {
path: split.path.clone(),
axis: split.axis,
origin,
size,
});
}
}
// Check tab bar clicks
for leaf in &self.cached_leaves {
if leaf.tabs.len() <= 1 { continue; }
if !leaf.tab_bar_rect.contains(mx, my) { continue; }
let mut tx = leaf.tab_bar_rect.x;
for (i, &panel_id) in leaf.tabs.iter().enumerate() {
let name_len = self.names.get(panel_id as usize).map(|n| n.len()).unwrap_or(1);
let tab_w = name_len as f32 * GLYPH_W + TAB_PADDING;
if mx >= tx && mx < tx + tab_w {
return UpdateAction::SetActiveTab { leaf_index: leaf.leaf_index, new_active: i };
}
tx += tab_w;
}
}
UpdateAction::None
}
fn set_ratio_at_path(node: &mut DockNode, path: &[usize], ratio: f32) {
if path.is_empty() {
if let DockNode::Split { ratio: r, .. } = node { *r = ratio; }
return;
}
if let DockNode::Split { children, .. } = node {
Self::set_ratio_at_path(&mut children[path[0]], &path[1..], ratio);
}
}
fn set_active_nth(node: &mut DockNode, target: usize, new_active: usize, count: &mut usize) {
match node {
DockNode::Leaf { active, .. } => {
if *count == target { *active = new_active; }
*count += 1;
}
DockNode::Split { children, .. } => {
Self::set_active_nth(&mut children[0], target, new_active, count);
Self::set_active_nth(&mut children[1], target, new_active, count);
}
}
}
pub fn layout(&mut self, rect: Rect) -> Vec<(u32, Rect)> {
self.cached_leaves.clear();
self.cached_splits.clear();
let mut path = Vec::new();
let mut counter = 0;
layout_recursive(
&self.root,
rect,
&mut path,
&mut counter,
&mut self.cached_leaves,
&mut self.cached_splits,
);
self.cached_leaves
.iter()
.map(|l| {
let active = l.active.min(l.tabs.len().saturating_sub(1));
(l.tabs[active], l.content_rect)
})
.collect()
}
pub fn draw_chrome(&self, ui: &mut UiContext) {
let glyph_w = ui.font.glyph_width as f32;
let glyph_h = ui.font.glyph_height as f32;
const COLOR_TAB_ACTIVE: [u8; 4] = [60, 60, 60, 255];
const COLOR_TAB_INACTIVE: [u8; 4] = [40, 40, 40, 255];
const COLOR_TEXT: [u8; 4] = [0xEE, 0xEE, 0xEE, 0xFF];
const COLOR_SPLIT: [u8; 4] = [30, 30, 30, 255];
const COLOR_SPLIT_ACTIVE: [u8; 4] = [100, 100, 200, 255];
const COLOR_SEPARATOR: [u8; 4] = [50, 50, 50, 255];
// Draw tab bars for each leaf
for leaf in &self.cached_leaves {
let bar = &leaf.tab_bar_rect;
let active = leaf.active.min(leaf.tabs.len().saturating_sub(1));
let mut tx = bar.x;
for (i, &panel_id) in leaf.tabs.iter().enumerate() {
let name = self.names.get(panel_id as usize).copied().unwrap_or("?");
let tab_w = name.len() as f32 * glyph_w + TAB_PADDING;
let bg = if i == active { COLOR_TAB_ACTIVE } else { COLOR_TAB_INACTIVE };
ui.draw_list.add_rect(tx, bar.y, tab_w, bar.h, bg);
// Inline text rendering (avoids borrow conflict with ui.font + ui.draw_list)
let text_x = tx + TAB_PADDING * 0.5;
let text_y = bar.y + (bar.h - glyph_h) * 0.5;
let mut cx = text_x;
for ch in name.chars() {
let (u0, v0, u1, v1) = ui.font.glyph_uv(ch);
ui.draw_list.add_rect_uv(cx, text_y, glyph_w, glyph_h, u0, v0, u1, v1, COLOR_TEXT);
cx += glyph_w;
}
tx += tab_w;
}
// Tab bar bottom separator
ui.draw_list.add_rect(bar.x, bar.y + bar.h - 1.0, bar.w, 1.0, COLOR_SEPARATOR);
}
// Draw split lines
for split in &self.cached_splits {
let color = if self.resizing.as_ref().map(|r| &r.path) == Some(&split.path) {
COLOR_SPLIT_ACTIVE
} else {
COLOR_SPLIT
};
match split.axis {
Axis::Horizontal => {
ui.draw_list.add_rect(split.boundary - 0.5, split.rect.y, 1.0, split.rect.h, color);
}
Axis::Vertical => {
ui.draw_list.add_rect(split.rect.x, split.boundary - 0.5, split.rect.w, 1.0, color);
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::ui_context::UiContext;
#[test]
fn test_rect_contains() {
let r = Rect { x: 10.0, y: 20.0, w: 100.0, h: 50.0 };
assert!(r.contains(50.0, 40.0));
assert!(!r.contains(5.0, 40.0));
assert!(!r.contains(50.0, 80.0));
}
#[test]
fn test_layout_single_leaf() {
let mut dock = DockTree::new(DockNode::leaf(vec![0]), vec!["Panel0"]);
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
assert_eq!(areas.len(), 1);
assert_eq!(areas[0].0, 0);
let r = &areas[0].1;
assert!((r.y - 20.0).abs() < 1e-3);
assert!((r.h - 280.0).abs() < 1e-3);
}
#[test]
fn test_layout_horizontal_split() {
let mut dock = DockTree::new(
DockNode::split(Axis::Horizontal, 0.25, DockNode::leaf(vec![0]), DockNode::leaf(vec![1])),
vec!["Left", "Right"],
);
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
assert_eq!(areas.len(), 2);
let left = areas.iter().find(|(id, _)| *id == 0).unwrap();
assert!((left.1.w - 100.0).abs() < 1e-3);
let right = areas.iter().find(|(id, _)| *id == 1).unwrap();
assert!((right.1.w - 300.0).abs() < 1e-3);
}
#[test]
fn test_layout_vertical_split() {
let mut dock = DockTree::new(
DockNode::split(Axis::Vertical, 0.5, DockNode::leaf(vec![0]), DockNode::leaf(vec![1])),
vec!["Top", "Bottom"],
);
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
let top = areas.iter().find(|(id, _)| *id == 0).unwrap();
assert!((top.1.h - 130.0).abs() < 1e-3);
}
#[test]
fn test_layout_nested_split() {
let mut dock = DockTree::new(
DockNode::split(
Axis::Horizontal,
0.25,
DockNode::leaf(vec![0]),
DockNode::split(Axis::Vertical, 0.5, DockNode::leaf(vec![1]), DockNode::leaf(vec![2])),
),
vec!["A", "B", "C"],
);
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
assert_eq!(areas.len(), 3);
}
#[test]
fn test_layout_active_tab_only() {
let mut dock = DockTree::new(
DockNode::Leaf { tabs: vec![0, 1, 2], active: 1 },
vec!["A", "B", "C"],
);
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
assert_eq!(areas[0].0, 1);
}
#[test]
fn test_active_clamped_if_out_of_bounds() {
let mut dock = DockTree::new(
DockNode::Leaf { tabs: vec![0], active: 5 },
vec!["A"],
);
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
assert_eq!(areas[0].0, 0);
}
#[test]
#[should_panic]
fn test_empty_tabs_panics() {
let mut dock = DockTree::new(
DockNode::Leaf { tabs: vec![], active: 0 },
vec![],
);
dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
}
#[test]
fn test_tab_click_switches_active() {
let mut dock = DockTree::new(
DockNode::Leaf { tabs: vec![0, 1, 2], active: 0 },
vec!["A", "B", "C"],
);
dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
// Tab "A": w = 1*8+8 = 16, tab "B" starts at x=16. Click at x=20 (inside "B").
dock.update(20.0, 10.0, true);
dock.update(20.0, 10.0, false);
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
assert_eq!(areas[0].0, 1);
}
#[test]
fn test_tab_click_no_change_on_single_tab() {
let mut dock = DockTree::new(DockNode::leaf(vec![0]), vec!["Only"]);
dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
dock.update(10.0, 10.0, true);
dock.update(10.0, 10.0, false);
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
assert_eq!(areas[0].0, 0);
}
#[test]
fn test_resize_horizontal_drag() {
let mut dock = DockTree::new(
DockNode::split(Axis::Horizontal, 0.5, DockNode::leaf(vec![0]), DockNode::leaf(vec![1])),
vec!["L", "R"],
);
dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
dock.update(200.0, 150.0, true); // click on boundary at x=200
dock.update(120.0, 150.0, true); // drag to x=120
dock.update(120.0, 150.0, false); // release
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
let left = areas.iter().find(|(id, _)| *id == 0).unwrap();
assert!((left.1.w - 120.0).abs() < 5.0, "left w={}", left.1.w);
}
#[test]
fn test_resize_clamps_ratio() {
let mut dock = DockTree::new(
DockNode::split(Axis::Horizontal, 0.5, DockNode::leaf(vec![0]), DockNode::leaf(vec![1])),
vec!["L", "R"],
);
dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
dock.update(200.0, 150.0, true);
dock.update(5.0, 150.0, true);
dock.update(5.0, 150.0, false);
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
let left = areas.iter().find(|(id, _)| *id == 0).unwrap();
assert!((left.1.w - 40.0).abs() < 1e-3, "left w={}", left.1.w);
}
#[test]
fn test_resize_priority_over_tab_click() {
let mut dock = DockTree::new(
DockNode::split(Axis::Horizontal, 0.5,
DockNode::Leaf { tabs: vec![0, 1], active: 0 },
DockNode::leaf(vec![2]),
),
vec!["A", "B", "C"],
);
dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
dock.update(200.0, 10.0, true); // click at boundary within tab bar
dock.update(180.0, 10.0, true); // drag
dock.update(180.0, 10.0, false); // release
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
let left = areas.iter().find(|(id, _)| *id == 0 || *id == 1).unwrap();
assert!((left.1.w - 180.0).abs() < 5.0, "resize should have priority, w={}", left.1.w);
}
#[test]
fn test_draw_chrome_produces_draw_commands() {
let mut dock = DockTree::new(
DockNode::split(Axis::Horizontal, 0.5,
DockNode::Leaf { tabs: vec![0, 1], active: 0 },
DockNode::leaf(vec![2]),
),
vec!["A", "B", "C"],
);
let mut ui = UiContext::new(800.0, 600.0);
ui.begin_frame(0.0, 0.0, false);
dock.layout(Rect { x: 0.0, y: 0.0, w: 800.0, h: 600.0 });
dock.draw_chrome(&mut ui);
assert!(ui.draw_list.commands.len() >= 5);
}
#[test]
fn test_draw_chrome_active_tab_color() {
let mut dock = DockTree::new(
DockNode::Leaf { tabs: vec![0, 1], active: 1 },
vec!["AA", "BB"],
);
let mut ui = UiContext::new(800.0, 600.0);
ui.begin_frame(0.0, 0.0, false);
dock.layout(Rect { x: 0.0, y: 0.0, w: 400.0, h: 300.0 });
dock.draw_chrome(&mut ui);
// First tab bg (inactive "AA"): color [40, 40, 40, 255]
let first_bg = &ui.draw_list.vertices[0];
assert_eq!(first_bg.color, [40, 40, 40, 255]);
}
#[test]
fn test_full_frame_cycle() {
let mut dock = DockTree::new(
DockNode::split(Axis::Horizontal, 0.3,
DockNode::Leaf { tabs: vec![0, 1], active: 0 },
DockNode::split(Axis::Vertical, 0.6, DockNode::leaf(vec![2]), DockNode::leaf(vec![3])),
),
vec!["Hierarchy", "Inspector", "Viewport", "Console"],
);
let mut ui = UiContext::new(1280.0, 720.0);
for _ in 0..3 {
ui.begin_frame(100.0, 100.0, false);
let areas = dock.layout(Rect { x: 0.0, y: 0.0, w: 1280.0, h: 720.0 });
dock.update(100.0, 100.0, false);
dock.draw_chrome(&mut ui);
assert_eq!(areas.len(), 3);
for (_, r) in &areas {
assert!(r.w > 0.0);
assert!(r.h > 0.0);
}
ui.end_frame();
}
}
}

View File

@@ -0,0 +1,154 @@
use bytemuck::{Pod, Zeroable};
use crate::font::FontAtlas;
#[repr(C)]
#[derive(Copy, Clone, Debug, Pod, Zeroable)]
pub struct DrawVertex {
pub position: [f32; 2],
pub uv: [f32; 2],
pub color: [u8; 4],
}
/// A scissor rectangle for content clipping, in pixel coordinates.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct ScissorRect {
pub x: u32,
pub y: u32,
pub w: u32,
pub h: u32,
}
pub struct DrawCommand {
pub index_offset: u32,
pub index_count: u32,
/// Optional scissor rect for clipping. None means no clipping.
pub scissor: Option<ScissorRect>,
}
pub struct DrawList {
pub vertices: Vec<DrawVertex>,
pub indices: Vec<u16>,
pub commands: Vec<DrawCommand>,
scissor_stack: Vec<ScissorRect>,
}
impl DrawList {
pub fn new() -> Self {
DrawList {
vertices: Vec::new(),
indices: Vec::new(),
commands: Vec::new(),
scissor_stack: Vec::new(),
}
}
pub fn clear(&mut self) {
self.vertices.clear();
self.indices.clear();
self.commands.clear();
self.scissor_stack.clear();
}
/// Push a scissor rect onto the stack. All subsequent draw commands will
/// be clipped to this rectangle until `pop_scissor` is called.
pub fn push_scissor(&mut self, x: u32, y: u32, w: u32, h: u32) {
self.scissor_stack.push(ScissorRect { x, y, w, h });
}
/// Pop the current scissor rect from the stack.
pub fn pop_scissor(&mut self) {
self.scissor_stack.pop();
}
/// Returns the current scissor rect (top of stack), or None.
fn current_scissor(&self) -> Option<ScissorRect> {
self.scissor_stack.last().copied()
}
/// Add a solid-color rectangle. UV is (0,0) for solid color rendering.
pub fn add_rect(&mut self, x: f32, y: f32, w: f32, h: f32, color: [u8; 4]) {
self.add_rect_uv(x, y, w, h, 0.0, 0.0, 0.0, 0.0, color);
}
/// Add a textured quad with explicit UV coordinates.
pub fn add_rect_uv(
&mut self,
x: f32, y: f32, w: f32, h: f32,
u0: f32, v0: f32, u1: f32, v1: f32,
color: [u8; 4],
) {
let index_offset = self.indices.len() as u32;
let base_vertex = self.vertices.len() as u16;
// 4 vertices: top-left, top-right, bottom-right, bottom-left
self.vertices.push(DrawVertex { position: [x, y ], uv: [u0, v0], color });
self.vertices.push(DrawVertex { position: [x + w, y ], uv: [u1, v0], color });
self.vertices.push(DrawVertex { position: [x + w, y + h], uv: [u1, v1], color });
self.vertices.push(DrawVertex { position: [x, y + h], uv: [u0, v1], color });
// 2 triangles = 6 indices
self.indices.push(base_vertex);
self.indices.push(base_vertex + 1);
self.indices.push(base_vertex + 2);
self.indices.push(base_vertex);
self.indices.push(base_vertex + 2);
self.indices.push(base_vertex + 3);
self.commands.push(DrawCommand {
index_offset,
index_count: 6,
scissor: self.current_scissor(),
});
}
/// Add text at the given position. One quad per character, using glyph UVs from the atlas.
pub fn add_text(&mut self, font: &FontAtlas, text: &str, x: f32, y: f32, color: [u8; 4]) {
let gw = font.glyph_width as f32;
let gh = font.glyph_height as f32;
let mut cx = x;
for ch in text.chars() {
let (u0, v0, u1, v1) = font.glyph_uv(ch);
self.add_rect_uv(cx, y, gw, gh, u0, v0, u1, v1, color);
cx += gw;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::font::FontAtlas;
#[test]
fn test_add_rect_vertex_index_count() {
let mut dl = DrawList::new();
dl.add_rect(0.0, 0.0, 100.0, 50.0, [255, 0, 0, 255]);
assert_eq!(dl.vertices.len(), 4);
assert_eq!(dl.indices.len(), 6);
assert_eq!(dl.commands.len(), 1);
assert_eq!(dl.commands[0].index_count, 6);
assert_eq!(dl.commands[0].index_offset, 0);
}
#[test]
fn test_add_text_char_count() {
let font = FontAtlas::generate();
let mut dl = DrawList::new();
let text = "Hello";
dl.add_text(&font, text, 0.0, 0.0, [255, 255, 255, 255]);
// 5 chars => 5 quads => 5*4=20 vertices, 5*6=30 indices
assert_eq!(dl.vertices.len(), 5 * 4);
assert_eq!(dl.indices.len(), 5 * 6);
assert_eq!(dl.commands.len(), 5);
}
#[test]
fn test_clear() {
let mut dl = DrawList::new();
dl.add_rect(0.0, 0.0, 50.0, 50.0, [0, 0, 0, 255]);
dl.clear();
assert!(dl.vertices.is_empty());
assert!(dl.indices.is_empty());
assert!(dl.commands.is_empty());
}
}

View File

@@ -0,0 +1,313 @@
/// Bitmap font atlas for ASCII 32-126.
/// 8x12 pixel glyphs arranged in 16 columns x 6 rows = 128x72 texture.
pub struct FontAtlas {
pub width: u32,
pub height: u32,
pub glyph_width: u32,
pub glyph_height: u32,
pub pixels: Vec<u8>, // R8 grayscale
}
/// Classic 8x12 PC BIOS bitmap font data for ASCII 32-126 (95 characters).
/// Each character is 12 bytes (one byte per row, MSB = leftmost pixel).
#[rustfmt::skip]
const FONT_DATA: [u8; 95 * 12] = [
// 32: Space
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// 33: !
0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18, 0x00, 0x00, 0x00,
// 34: "
0x00, 0x6C, 0x6C, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// 35: #
0x00, 0x6C, 0x6C, 0xFE, 0x6C, 0xFE, 0x6C, 0x6C, 0x00, 0x00, 0x00, 0x00,
// 36: $
0x00, 0x18, 0x3E, 0x60, 0x3C, 0x06, 0x7C, 0x18, 0x00, 0x00, 0x00, 0x00,
// 37: %
0x00, 0x00, 0x62, 0x64, 0x08, 0x10, 0x26, 0x46, 0x00, 0x00, 0x00, 0x00,
// 38: &
0x00, 0x30, 0x48, 0x48, 0x30, 0x4A, 0x44, 0x3A, 0x00, 0x00, 0x00, 0x00,
// 39: '
0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// 40: (
0x00, 0x0C, 0x18, 0x30, 0x30, 0x30, 0x18, 0x0C, 0x00, 0x00, 0x00, 0x00,
// 41: )
0x00, 0x30, 0x18, 0x0C, 0x0C, 0x0C, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
// 42: *
0x00, 0x00, 0x66, 0x3C, 0xFF, 0x3C, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00,
// 43: +
0x00, 0x00, 0x18, 0x18, 0x7E, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00,
// 44: ,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00,
// 45: -
0x00, 0x00, 0x00, 0x00, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// 46: .
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00,
// 47: /
0x00, 0x02, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x40, 0x00, 0x00, 0x00, 0x00,
// 48: 0
0x00, 0x3C, 0x66, 0x6E, 0x7E, 0x76, 0x66, 0x3C, 0x00, 0x00, 0x00, 0x00,
// 49: 1
0x00, 0x18, 0x38, 0x18, 0x18, 0x18, 0x18, 0x7E, 0x00, 0x00, 0x00, 0x00,
// 50: 2
0x00, 0x3C, 0x66, 0x06, 0x0C, 0x18, 0x30, 0x7E, 0x00, 0x00, 0x00, 0x00,
// 51: 3
0x00, 0x3C, 0x66, 0x06, 0x1C, 0x06, 0x66, 0x3C, 0x00, 0x00, 0x00, 0x00,
// 52: 4
0x00, 0x0C, 0x1C, 0x2C, 0x4C, 0x7E, 0x0C, 0x0C, 0x00, 0x00, 0x00, 0x00,
// 53: 5
0x00, 0x7E, 0x60, 0x7C, 0x06, 0x06, 0x66, 0x3C, 0x00, 0x00, 0x00, 0x00,
// 54: 6
0x00, 0x1C, 0x30, 0x60, 0x7C, 0x66, 0x66, 0x3C, 0x00, 0x00, 0x00, 0x00,
// 55: 7
0x00, 0x7E, 0x06, 0x0C, 0x18, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00,
// 56: 8
0x00, 0x3C, 0x66, 0x66, 0x3C, 0x66, 0x66, 0x3C, 0x00, 0x00, 0x00, 0x00,
// 57: 9
0x00, 0x3C, 0x66, 0x66, 0x3E, 0x06, 0x0C, 0x38, 0x00, 0x00, 0x00, 0x00,
// 58: :
0x00, 0x00, 0x18, 0x18, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00,
// 59: ;
0x00, 0x00, 0x18, 0x18, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
// 60: <
0x00, 0x06, 0x0C, 0x18, 0x30, 0x18, 0x0C, 0x06, 0x00, 0x00, 0x00, 0x00,
// 61: =
0x00, 0x00, 0x00, 0x7E, 0x00, 0x7E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// 62: >
0x00, 0x60, 0x30, 0x18, 0x0C, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00,
// 63: ?
0x00, 0x3C, 0x66, 0x06, 0x0C, 0x18, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00,
// 64: @
0x00, 0x3C, 0x66, 0x6E, 0x6A, 0x6E, 0x60, 0x3E, 0x00, 0x00, 0x00, 0x00,
// 65: A
0x00, 0x18, 0x3C, 0x66, 0x66, 0x7E, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
// 66: B
0x00, 0x7C, 0x66, 0x66, 0x7C, 0x66, 0x66, 0x7C, 0x00, 0x00, 0x00, 0x00,
// 67: C
0x00, 0x3C, 0x66, 0x60, 0x60, 0x60, 0x66, 0x3C, 0x00, 0x00, 0x00, 0x00,
// 68: D
0x00, 0x78, 0x6C, 0x66, 0x66, 0x66, 0x6C, 0x78, 0x00, 0x00, 0x00, 0x00,
// 69: E
0x00, 0x7E, 0x60, 0x60, 0x7C, 0x60, 0x60, 0x7E, 0x00, 0x00, 0x00, 0x00,
// 70: F
0x00, 0x7E, 0x60, 0x60, 0x7C, 0x60, 0x60, 0x60, 0x00, 0x00, 0x00, 0x00,
// 71: G
0x00, 0x3C, 0x66, 0x60, 0x6E, 0x66, 0x66, 0x3E, 0x00, 0x00, 0x00, 0x00,
// 72: H
0x00, 0x66, 0x66, 0x66, 0x7E, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
// 73: I
0x00, 0x7E, 0x18, 0x18, 0x18, 0x18, 0x18, 0x7E, 0x00, 0x00, 0x00, 0x00,
// 74: J
0x00, 0x06, 0x06, 0x06, 0x06, 0x06, 0x66, 0x3C, 0x00, 0x00, 0x00, 0x00,
// 75: K
0x00, 0x66, 0x6C, 0x78, 0x70, 0x78, 0x6C, 0x66, 0x00, 0x00, 0x00, 0x00,
// 76: L
0x00, 0x60, 0x60, 0x60, 0x60, 0x60, 0x60, 0x7E, 0x00, 0x00, 0x00, 0x00,
// 77: M
0x00, 0xC6, 0xEE, 0xFE, 0xD6, 0xC6, 0xC6, 0xC6, 0x00, 0x00, 0x00, 0x00,
// 78: N
0x00, 0x66, 0x76, 0x7E, 0x7E, 0x6E, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
// 79: O
0x00, 0x3C, 0x66, 0x66, 0x66, 0x66, 0x66, 0x3C, 0x00, 0x00, 0x00, 0x00,
// 80: P
0x00, 0x7C, 0x66, 0x66, 0x7C, 0x60, 0x60, 0x60, 0x00, 0x00, 0x00, 0x00,
// 81: Q
0x00, 0x3C, 0x66, 0x66, 0x66, 0x6A, 0x6C, 0x36, 0x00, 0x00, 0x00, 0x00,
// 82: R
0x00, 0x7C, 0x66, 0x66, 0x7C, 0x6C, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
// 83: S
0x00, 0x3C, 0x66, 0x60, 0x3C, 0x06, 0x66, 0x3C, 0x00, 0x00, 0x00, 0x00,
// 84: T
0x00, 0x7E, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00,
// 85: U
0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x3C, 0x00, 0x00, 0x00, 0x00,
// 86: V
0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x3C, 0x18, 0x00, 0x00, 0x00, 0x00,
// 87: W
0x00, 0xC6, 0xC6, 0xC6, 0xD6, 0xFE, 0xEE, 0xC6, 0x00, 0x00, 0x00, 0x00,
// 88: X
0x00, 0x66, 0x66, 0x3C, 0x18, 0x3C, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
// 89: Y
0x00, 0x66, 0x66, 0x66, 0x3C, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00,
// 90: Z
0x00, 0x7E, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x7E, 0x00, 0x00, 0x00, 0x00,
// 91: [
0x00, 0x3C, 0x30, 0x30, 0x30, 0x30, 0x30, 0x3C, 0x00, 0x00, 0x00, 0x00,
// 92: backslash
0x00, 0x40, 0x60, 0x30, 0x18, 0x0C, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
// 93: ]
0x00, 0x3C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x3C, 0x00, 0x00, 0x00, 0x00,
// 94: ^
0x00, 0x18, 0x3C, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// 95: _
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0x00, 0x00, 0x00,
// 96: `
0x00, 0x30, 0x18, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
// 97: a
0x00, 0x00, 0x00, 0x3C, 0x06, 0x3E, 0x66, 0x3E, 0x00, 0x00, 0x00, 0x00,
// 98: b
0x00, 0x60, 0x60, 0x7C, 0x66, 0x66, 0x66, 0x7C, 0x00, 0x00, 0x00, 0x00,
// 99: c
0x00, 0x00, 0x00, 0x3C, 0x66, 0x60, 0x66, 0x3C, 0x00, 0x00, 0x00, 0x00,
// 100: d
0x00, 0x06, 0x06, 0x3E, 0x66, 0x66, 0x66, 0x3E, 0x00, 0x00, 0x00, 0x00,
// 101: e
0x00, 0x00, 0x00, 0x3C, 0x66, 0x7E, 0x60, 0x3C, 0x00, 0x00, 0x00, 0x00,
// 102: f
0x00, 0x1C, 0x30, 0x30, 0x7C, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00,
// 103: g
0x00, 0x00, 0x00, 0x3E, 0x66, 0x66, 0x3E, 0x06, 0x3C, 0x00, 0x00, 0x00,
// 104: h
0x00, 0x60, 0x60, 0x7C, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
// 105: i
0x00, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x3C, 0x00, 0x00, 0x00, 0x00,
// 106: j
0x00, 0x0C, 0x00, 0x1C, 0x0C, 0x0C, 0x0C, 0x0C, 0x78, 0x00, 0x00, 0x00,
// 107: k
0x00, 0x60, 0x60, 0x66, 0x6C, 0x78, 0x6C, 0x66, 0x00, 0x00, 0x00, 0x00,
// 108: l
0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3C, 0x00, 0x00, 0x00, 0x00,
// 109: m
0x00, 0x00, 0x00, 0xEC, 0xFE, 0xD6, 0xC6, 0xC6, 0x00, 0x00, 0x00, 0x00,
// 110: n
0x00, 0x00, 0x00, 0x7C, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
// 111: o
0x00, 0x00, 0x00, 0x3C, 0x66, 0x66, 0x66, 0x3C, 0x00, 0x00, 0x00, 0x00,
// 112: p
0x00, 0x00, 0x00, 0x7C, 0x66, 0x66, 0x7C, 0x60, 0x60, 0x00, 0x00, 0x00,
// 113: q
0x00, 0x00, 0x00, 0x3E, 0x66, 0x66, 0x3E, 0x06, 0x06, 0x00, 0x00, 0x00,
// 114: r
0x00, 0x00, 0x00, 0x7C, 0x66, 0x60, 0x60, 0x60, 0x00, 0x00, 0x00, 0x00,
// 115: s
0x00, 0x00, 0x00, 0x3E, 0x60, 0x3C, 0x06, 0x7C, 0x00, 0x00, 0x00, 0x00,
// 116: t
0x00, 0x30, 0x30, 0x7C, 0x30, 0x30, 0x30, 0x1C, 0x00, 0x00, 0x00, 0x00,
// 117: u
0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x3E, 0x00, 0x00, 0x00, 0x00,
// 118: v
0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x3C, 0x18, 0x00, 0x00, 0x00, 0x00,
// 119: w
0x00, 0x00, 0x00, 0xC6, 0xC6, 0xD6, 0xFE, 0x6C, 0x00, 0x00, 0x00, 0x00,
// 120: x
0x00, 0x00, 0x00, 0x66, 0x3C, 0x18, 0x3C, 0x66, 0x00, 0x00, 0x00, 0x00,
// 121: y
0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x3E, 0x06, 0x3C, 0x00, 0x00, 0x00,
// 122: z
0x00, 0x00, 0x00, 0x7E, 0x0C, 0x18, 0x30, 0x7E, 0x00, 0x00, 0x00, 0x00,
// 123: {
0x00, 0x0E, 0x18, 0x18, 0x70, 0x18, 0x18, 0x0E, 0x00, 0x00, 0x00, 0x00,
// 124: |
0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00,
// 125: }
0x00, 0x70, 0x18, 0x18, 0x0E, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00,
// 126: ~
0x00, 0x32, 0x4C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
];
impl FontAtlas {
/// Generate a bitmap font atlas with real 8x12 PC BIOS-style glyphs.
/// Each glyph is 8x12 pixels. Atlas is 16 cols x 6 rows = 128x72.
pub fn generate() -> Self {
let glyph_width: u32 = 8;
let glyph_height: u32 = 12;
let cols: u32 = 16;
let rows: u32 = 6;
let width = cols * glyph_width; // 128
let height = rows * glyph_height; // 72
let mut pixels = vec![0u8; (width * height) as usize];
// Unpack 1-bit-per-pixel FONT_DATA into R8 pixel atlas
for code in 32u8..=126u8 {
let index = (code - 32) as u32;
let col = index % cols;
let row = index / cols;
let base_x = col * glyph_width;
let base_y = row * glyph_height;
let data_offset = (index as usize) * 12;
for py in 0..12u32 {
let row_byte = FONT_DATA[data_offset + py as usize];
for px in 0..8u32 {
if (row_byte >> (7 - px)) & 1 != 0 {
let pixel_idx = ((base_y + py) * width + (base_x + px)) as usize;
pixels[pixel_idx] = 255;
}
}
}
}
// Ensure pixel (0,0) is white (255) so solid-color rects can sample
// UV (0,0) and get full brightness for tinting.
pixels[0] = 255;
FontAtlas {
width,
height,
glyph_width,
glyph_height,
pixels,
}
}
/// Returns (u0, v0, u1, v1) UV coordinates for a given character.
/// Returns coordinates for space if character is out of ASCII range.
pub fn glyph_uv(&self, ch: char) -> (f32, f32, f32, f32) {
let code = ch as u32;
let index = if code >= 32 && code <= 126 {
code - 32
} else {
0 // space
};
let cols = self.width / self.glyph_width;
let col = index % cols;
let row = index / cols;
let u0 = (col * self.glyph_width) as f32 / self.width as f32;
let v0 = (row * self.glyph_height) as f32 / self.height as f32;
let u1 = u0 + self.glyph_width as f32 / self.width as f32;
let v1 = v0 + self.glyph_height as f32 / self.height as f32;
(u0, v0, u1, v1)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_generate_size() {
let atlas = FontAtlas::generate();
assert_eq!(atlas.width, 128);
assert_eq!(atlas.height, 72);
assert_eq!(atlas.pixels.len(), (128 * 72) as usize);
}
#[test]
fn test_glyph_uv_space() {
let atlas = FontAtlas::generate();
let (u0, v0, u1, v1) = atlas.glyph_uv(' ');
assert!((u0 - 0.0).abs() < 1e-6);
assert!((v0 - 0.0).abs() < 1e-6);
assert!((u1 - 8.0 / 128.0).abs() < 1e-6);
assert!((v1 - 12.0 / 72.0).abs() < 1e-6);
}
#[test]
fn test_glyph_uv_a() {
let atlas = FontAtlas::generate();
// 'A' = ASCII 65, index = 65 - 32 = 33
// col = 33 % 16 = 1, row = 33 / 16 = 2
let (u0, v0, u1, v1) = atlas.glyph_uv('A');
let expected_u0 = 1.0 * 8.0 / 128.0;
let expected_v0 = 2.0 * 12.0 / 72.0;
let expected_u1 = expected_u0 + 8.0 / 128.0;
let expected_v1 = expected_v0 + 12.0 / 72.0;
assert!((u0 - expected_u0).abs() < 1e-6, "u0 mismatch: {} vs {}", u0, expected_u0);
assert!((v0 - expected_v0).abs() < 1e-6, "v0 mismatch: {} vs {}", v0, expected_v0);
assert!((u1 - expected_u1).abs() < 1e-6);
assert!((v1 - expected_v1).abs() < 1e-6);
}
}

View File

@@ -0,0 +1,197 @@
use std::collections::HashMap;
pub const ATLAS_SIZE: u32 = 1024;
#[derive(Clone, Debug)]
pub struct GlyphInfo {
pub uv: [f32; 4], // u0, v0, u1, v1
pub width: f32,
pub height: f32,
pub advance: f32,
pub bearing_x: f32,
pub bearing_y: f32,
}
pub struct GlyphCache {
pub atlas_data: Vec<u8>,
pub atlas_width: u32,
pub atlas_height: u32,
glyphs: HashMap<char, GlyphInfo>,
cursor_x: u32,
cursor_y: u32,
row_height: u32,
pub dirty: bool,
}
impl GlyphCache {
pub fn new(width: u32, height: u32) -> Self {
GlyphCache {
atlas_data: vec![0u8; (width * height) as usize],
atlas_width: width,
atlas_height: height,
glyphs: HashMap::new(),
cursor_x: 0,
cursor_y: 0,
row_height: 0,
dirty: false,
}
}
pub fn get(&self, ch: char) -> Option<&GlyphInfo> {
self.glyphs.get(&ch)
}
/// Insert a rasterized glyph bitmap into the atlas.
/// Returns reference to the cached GlyphInfo.
pub fn insert(
&mut self,
ch: char,
bitmap: &[u8],
bmp_w: u32,
bmp_h: u32,
advance: f32,
bearing_x: f32,
bearing_y: f32,
) -> &GlyphInfo {
// Handle zero-size glyphs (e.g., space)
if bmp_w == 0 || bmp_h == 0 {
self.glyphs.insert(ch, GlyphInfo {
uv: [0.0, 0.0, 0.0, 0.0],
width: 0.0,
height: 0.0,
advance,
bearing_x,
bearing_y,
});
return self.glyphs.get(&ch).unwrap();
}
// Check if we need to wrap to next row
if self.cursor_x + bmp_w > self.atlas_width {
self.cursor_y += self.row_height + 1; // +1 pixel gap
self.cursor_x = 0;
self.row_height = 0;
}
// Check if atlas is full (would overflow vertically)
if self.cursor_y + bmp_h > self.atlas_height {
// Atlas full — insert with zero UV (glyph won't render but won't crash)
self.glyphs.insert(ch, GlyphInfo {
uv: [0.0, 0.0, 0.0, 0.0],
width: bmp_w as f32,
height: bmp_h as f32,
advance,
bearing_x,
bearing_y,
});
return self.glyphs.get(&ch).unwrap();
}
// Copy bitmap into atlas
for row in 0..bmp_h {
let src_start = (row * bmp_w) as usize;
let src_end = src_start + bmp_w as usize;
let dst_y = self.cursor_y + row;
let dst_x = self.cursor_x;
let dst_start = (dst_y * self.atlas_width + dst_x) as usize;
if src_end <= bitmap.len() && dst_start + bmp_w as usize <= self.atlas_data.len() {
self.atlas_data[dst_start..dst_start + bmp_w as usize]
.copy_from_slice(&bitmap[src_start..src_end]);
}
}
// Calculate UV coordinates
let u0 = self.cursor_x as f32 / self.atlas_width as f32;
let v0 = self.cursor_y as f32 / self.atlas_height as f32;
let u1 = (self.cursor_x + bmp_w) as f32 / self.atlas_width as f32;
let v1 = (self.cursor_y + bmp_h) as f32 / self.atlas_height as f32;
let info = GlyphInfo {
uv: [u0, v0, u1, v1],
width: bmp_w as f32,
height: bmp_h as f32,
advance,
bearing_x,
bearing_y,
};
// Advance cursor
self.cursor_x += bmp_w + 1; // +1 pixel gap
if bmp_h > self.row_height {
self.row_height = bmp_h;
}
self.dirty = true;
self.glyphs.insert(ch, info);
self.glyphs.get(&ch).unwrap()
}
pub fn clear_dirty(&mut self) {
self.dirty = false;
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_insert_and_get() {
let mut cache = GlyphCache::new(256, 256);
let bitmap = vec![255u8; 10 * 12]; // 10x12 glyph
cache.insert('A', &bitmap, 10, 12, 11.0, 0.5, 10.0);
let info = cache.get('A');
assert!(info.is_some());
let info = info.unwrap();
assert!((info.width - 10.0).abs() < 0.1);
assert!((info.advance - 11.0).abs() < 0.1);
assert!(info.uv[0] >= 0.0 && info.uv[2] <= 1.0);
}
#[test]
fn test_cache_hit() {
let mut cache = GlyphCache::new(256, 256);
let bitmap = vec![255u8; 8 * 8];
cache.insert('B', &bitmap, 8, 8, 9.0, 0.0, 8.0);
cache.clear_dirty();
// Second access should be cache hit (no dirty)
let _info = cache.get('B').unwrap();
assert!(!cache.dirty);
}
#[test]
fn test_row_wrap() {
let mut cache = GlyphCache::new(64, 64);
let bitmap = vec![255u8; 20 * 10];
// Insert 4 glyphs of width 20 in a 64-wide atlas
// 3 fit in first row (20+1 + 20+1 + 20 = 62), 4th wraps
cache.insert('A', &bitmap, 20, 10, 21.0, 0.0, 10.0);
cache.insert('B', &bitmap, 20, 10, 21.0, 0.0, 10.0);
cache.insert('C', &bitmap, 20, 10, 21.0, 0.0, 10.0);
cache.insert('D', &bitmap, 20, 10, 21.0, 0.0, 10.0);
let d = cache.get('D').unwrap();
// D should be on a different row (v0 > 0)
assert!(d.uv[1] > 0.0, "D should be on second row, v0={}", d.uv[1]);
}
#[test]
fn test_uv_range() {
let mut cache = GlyphCache::new(256, 256);
let bitmap = vec![128u8; 15 * 20];
cache.insert('X', &bitmap, 15, 20, 16.0, 1.0, 18.0);
let info = cache.get('X').unwrap();
assert!(info.uv[0] >= 0.0 && info.uv[0] < 1.0);
assert!(info.uv[1] >= 0.0 && info.uv[1] < 1.0);
assert!(info.uv[2] > info.uv[0] && info.uv[2] <= 1.0);
assert!(info.uv[3] > info.uv[1] && info.uv[3] <= 1.0);
}
#[test]
fn test_zero_size_glyph() {
let mut cache = GlyphCache::new(256, 256);
cache.insert(' ', &[], 0, 0, 5.0, 0.0, 0.0);
let info = cache.get(' ').unwrap();
assert!((info.advance - 5.0).abs() < 0.1);
assert!((info.width - 0.0).abs() < 0.1);
}
}

View File

@@ -0,0 +1,307 @@
use crate::ui_context::UiContext;
use crate::dock::Rect;
use crate::layout::LayoutState;
use voltex_ecs::world::World;
use voltex_ecs::entity::Entity;
use voltex_ecs::scene::Tag;
use voltex_ecs::hierarchy::{roots, Children, Parent};
use voltex_ecs::transform::Transform;
const COLOR_SELECTED: [u8; 4] = [0x44, 0x66, 0x88, 0xFF];
const COLOR_TEXT: [u8; 4] = [0xEE, 0xEE, 0xEE, 0xFF];
const LINE_HEIGHT: f32 = 16.0;
const INDENT: f32 = 16.0;
const PADDING: f32 = 4.0;
/// Count total nodes in subtree (including self).
pub fn count_entity_nodes(world: &World, entity: Entity) -> usize {
let mut count = 1;
if let Some(children) = world.get::<Children>(entity) {
for &child in &children.0 {
count += count_entity_nodes(world, child);
}
}
count
}
/// Draw a single entity row, then recurse into children.
fn draw_entity_node(
ui: &mut UiContext,
world: &World,
entity: Entity,
depth: usize,
selected: &mut Option<Entity>,
base_x: f32,
row_w: f32,
) {
let y = ui.layout.cursor_y;
let x = base_x + PADDING + depth as f32 * INDENT;
// Highlight selected
if *selected == Some(entity) {
ui.draw_list.add_rect(base_x, y, row_w, LINE_HEIGHT, COLOR_SELECTED);
}
// Click detection
if ui.mouse_clicked && ui.mouse_in_rect(base_x, y, row_w, LINE_HEIGHT) {
*selected = Some(entity);
}
// Build label
let has_children = world.get::<Children>(entity).map_or(false, |c| !c.0.is_empty());
let prefix = if has_children { "> " } else { " " };
let name = if let Some(tag) = world.get::<Tag>(entity) {
format!("{}{}", prefix, tag.0)
} else {
format!("{}Entity({})", prefix, entity.id)
};
// Draw text (inline glyph rendering)
let gw = ui.font.glyph_width as f32;
let gh = ui.font.glyph_height as f32;
let text_y = y + (LINE_HEIGHT - gh) * 0.5;
let mut cx = x;
for ch in name.chars() {
let (u0, v0, u1, v1) = ui.font.glyph_uv(ch);
ui.draw_list.add_rect_uv(cx, text_y, gw, gh, u0, v0, u1, v1, COLOR_TEXT);
cx += gw;
}
ui.layout.cursor_y += LINE_HEIGHT;
// Recurse children
if let Some(children) = world.get::<Children>(entity) {
let child_list: Vec<Entity> = children.0.clone();
for child in child_list {
draw_entity_node(ui, world, child, depth + 1, selected, base_x, row_w);
}
}
}
/// Hierarchy panel: displays entity tree, handles selection.
pub fn hierarchy_panel(
ui: &mut UiContext,
world: &World,
selected: &mut Option<Entity>,
rect: &Rect,
) {
ui.layout = LayoutState::new(rect.x + PADDING, rect.y + PADDING);
let root_entities = roots(world);
for &entity in &root_entities {
draw_entity_node(ui, world, entity, 0, selected, rect.x, rect.w);
}
}
/// Inspector panel: edit Transform, Tag, Parent for selected entity.
/// `tag_buffer` is caller-owned staging buffer for Tag text input.
pub fn inspector_panel(
ui: &mut UiContext,
world: &mut World,
selected: Option<Entity>,
rect: &Rect,
tag_buffer: &mut String,
) {
ui.layout = LayoutState::new(rect.x + PADDING, rect.y + PADDING);
let entity = match selected {
Some(e) => e,
None => {
ui.text("No entity selected");
return;
}
};
// --- Transform ---
if world.has_component::<Transform>(entity) {
ui.text("-- Transform --");
// Copy out values (immutable borrow ends with block)
let (px, py, pz, rx, ry, rz, sx, sy, sz) = {
let t = world.get::<Transform>(entity).unwrap();
(t.position.x, t.position.y, t.position.z,
t.rotation.x, t.rotation.y, t.rotation.z,
t.scale.x, t.scale.y, t.scale.z)
};
// Sliders (no world borrow active)
let new_px = ui.slider("Pos X", px, -50.0, 50.0);
let new_py = ui.slider("Pos Y", py, -50.0, 50.0);
let new_pz = ui.slider("Pos Z", pz, -50.0, 50.0);
let new_rx = ui.slider("Rot X", rx, -3.15, 3.15);
let new_ry = ui.slider("Rot Y", ry, -3.15, 3.15);
let new_rz = ui.slider("Rot Z", rz, -3.15, 3.15);
let new_sx = ui.slider("Scl X", sx, 0.01, 10.0);
let new_sy = ui.slider("Scl Y", sy, 0.01, 10.0);
let new_sz = ui.slider("Scl Z", sz, 0.01, 10.0);
// Write back (mutable borrow)
if let Some(t) = world.get_mut::<Transform>(entity) {
t.position.x = new_px;
t.position.y = new_py;
t.position.z = new_pz;
t.rotation.x = new_rx;
t.rotation.y = new_ry;
t.rotation.z = new_rz;
t.scale.x = new_sx;
t.scale.y = new_sy;
t.scale.z = new_sz;
}
}
// --- Tag ---
if world.has_component::<Tag>(entity) {
ui.text("-- Tag --");
// Sync buffer from world
if let Some(tag) = world.get::<Tag>(entity) {
if tag_buffer.is_empty() || *tag_buffer != tag.0 {
*tag_buffer = tag.0.clone();
}
}
let input_x = rect.x + PADDING;
let input_y = ui.layout.cursor_y;
let input_w = (rect.w - PADDING * 2.0).max(50.0);
if ui.text_input(8888, tag_buffer, input_x, input_y, input_w) {
if let Some(tag) = world.get_mut::<Tag>(entity) {
tag.0 = tag_buffer.clone();
}
}
ui.layout.advance_line();
}
// --- Parent ---
if let Some(parent) = world.get::<Parent>(entity) {
ui.text("-- Parent --");
let parent_text = format!("Parent: Entity({})", parent.0.id);
ui.text(&parent_text);
}
}
#[cfg(test)]
mod tests {
use super::*;
use voltex_ecs::transform::Transform;
use voltex_math::Vec3;
fn make_test_world() -> (World, Entity, Entity, Entity) {
let mut world = World::new();
let e1 = world.spawn();
world.add(e1, Transform::from_position(Vec3::new(0.0, 0.0, 0.0)));
world.add(e1, Tag("Root".to_string()));
let e2 = world.spawn();
world.add(e2, Transform::from_position(Vec3::new(1.0, 0.0, 0.0)));
world.add(e2, Tag("Child1".to_string()));
let e3 = world.spawn();
world.add(e3, Transform::from_position(Vec3::new(2.0, 0.0, 0.0)));
world.add(e3, Tag("Child2".to_string()));
voltex_ecs::hierarchy::add_child(&mut world, e1, e2);
voltex_ecs::hierarchy::add_child(&mut world, e1, e3);
(world, e1, e2, e3)
}
#[test]
fn test_count_nodes() {
let (world, e1, _, _) = make_test_world();
assert_eq!(count_entity_nodes(&world, e1), 3);
}
#[test]
fn test_hierarchy_draws_commands() {
let (world, _, _, _) = make_test_world();
let mut ui = UiContext::new(800.0, 600.0);
let mut selected: Option<Entity> = None;
let rect = Rect { x: 0.0, y: 0.0, w: 200.0, h: 400.0 };
ui.begin_frame(0.0, 0.0, false);
hierarchy_panel(&mut ui, &world, &mut selected, &rect);
ui.end_frame();
assert!(ui.draw_list.commands.len() > 0);
}
#[test]
fn test_hierarchy_click_selects() {
let (world, e1, _, _) = make_test_world();
let mut ui = UiContext::new(800.0, 600.0);
let mut selected: Option<Entity> = None;
let rect = Rect { x: 0.0, y: 0.0, w: 200.0, h: 400.0 };
// Click on first row (y = PADDING + small offset)
ui.begin_frame(50.0, PADDING + 2.0, true);
hierarchy_panel(&mut ui, &world, &mut selected, &rect);
ui.end_frame();
assert_eq!(selected, Some(e1));
}
#[test]
fn test_hierarchy_empty_world() {
let world = World::new();
let mut ui = UiContext::new(800.0, 600.0);
let mut selected: Option<Entity> = None;
let rect = Rect { x: 0.0, y: 0.0, w: 200.0, h: 400.0 };
ui.begin_frame(0.0, 0.0, false);
hierarchy_panel(&mut ui, &world, &mut selected, &rect);
ui.end_frame();
assert!(selected.is_none());
}
#[test]
fn test_inspector_no_selection() {
let mut world = World::new();
let mut ui = UiContext::new(800.0, 600.0);
let rect = Rect { x: 0.0, y: 0.0, w: 300.0, h: 400.0 };
ui.begin_frame(0.0, 0.0, false);
let mut tag_buf = String::new();
inspector_panel(&mut ui, &mut world, None, &rect, &mut tag_buf);
ui.end_frame();
// "No entity selected" text produces draw commands
assert!(ui.draw_list.commands.len() > 0);
}
#[test]
fn test_inspector_with_transform() {
let mut world = World::new();
let e = world.spawn();
world.add(e, Transform::from_position(Vec3::new(1.0, 2.0, 3.0)));
let mut ui = UiContext::new(800.0, 600.0);
let rect = Rect { x: 0.0, y: 0.0, w: 300.0, h: 400.0 };
ui.begin_frame(0.0, 0.0, false);
let mut tag_buf = String::new();
inspector_panel(&mut ui, &mut world, Some(e), &rect, &mut tag_buf);
ui.end_frame();
// Header + 9 sliders produce many draw commands
assert!(ui.draw_list.commands.len() > 10);
}
#[test]
fn test_inspector_with_tag() {
let mut world = World::new();
let e = world.spawn();
world.add(e, Transform::new());
world.add(e, Tag("TestTag".to_string()));
let mut ui = UiContext::new(800.0, 600.0);
let rect = Rect { x: 0.0, y: 0.0, w: 300.0, h: 400.0 };
ui.begin_frame(0.0, 0.0, false);
let mut tag_buf = String::new();
inspector_panel(&mut ui, &mut world, Some(e), &rect, &mut tag_buf);
ui.end_frame();
// tag_buf should be synced from world
assert_eq!(tag_buf, "TestTag");
}
}

View File

@@ -0,0 +1,32 @@
/// Simple cursor-based layout state for immediate mode UI.
pub struct LayoutState {
pub cursor_x: f32,
pub cursor_y: f32,
pub indent: f32,
pub line_height: f32,
pub padding: f32,
}
impl LayoutState {
/// Create a new layout state starting at (x, y).
pub fn new(x: f32, y: f32) -> Self {
LayoutState {
cursor_x: x,
cursor_y: y,
indent: x,
line_height: 12.0,
padding: 4.0,
}
}
/// Advance to the next line: cursor_y += line_height + padding, cursor_x = indent.
pub fn advance_line(&mut self) {
self.cursor_y += self.line_height + self.padding;
self.cursor_x = self.indent;
}
/// Advance horizontally: cursor_x += width + padding.
pub fn advance_x(&mut self, width: f32) {
self.cursor_x += width + self.padding;
}
}

View File

@@ -0,0 +1,33 @@
pub mod font;
pub mod draw_list;
pub mod layout;
pub mod renderer;
pub mod ui_context;
pub mod widgets;
pub mod dock;
pub mod inspector;
pub mod orbit_camera;
pub use font::FontAtlas;
pub use draw_list::{DrawVertex, DrawCommand, DrawList};
pub use layout::LayoutState;
pub use renderer::UiRenderer;
pub use ui_context::UiContext;
pub use dock::{DockTree, DockNode, Axis, Rect, LeafLayout};
pub use inspector::{hierarchy_panel, inspector_panel};
pub use orbit_camera::OrbitCamera;
pub mod viewport_texture;
pub use viewport_texture::{ViewportTexture, VIEWPORT_COLOR_FORMAT, VIEWPORT_DEPTH_FORMAT};
pub mod viewport_renderer;
pub use viewport_renderer::ViewportRenderer;
pub mod asset_browser;
pub use asset_browser::{AssetBrowser, asset_browser_panel};
pub mod ttf_parser;
pub mod rasterizer;
pub mod glyph_cache;
pub mod ttf_font;
pub use ttf_font::TtfFont;

View File

@@ -0,0 +1,166 @@
use voltex_math::{Vec3, Mat4};
use std::f32::consts::PI;
const PITCH_LIMIT: f32 = PI / 2.0 - 0.01;
const MIN_DISTANCE: f32 = 0.5;
const MAX_DISTANCE: f32 = 50.0;
const ORBIT_SENSITIVITY: f32 = 0.005;
const ZOOM_FACTOR: f32 = 0.1;
const PAN_SENSITIVITY: f32 = 0.01;
pub struct OrbitCamera {
pub target: Vec3,
pub distance: f32,
pub yaw: f32,
pub pitch: f32,
pub fov_y: f32,
pub near: f32,
pub far: f32,
}
impl OrbitCamera {
pub fn new() -> Self {
OrbitCamera {
target: Vec3::ZERO,
distance: 5.0,
yaw: 0.0,
pitch: 0.3,
fov_y: PI / 4.0,
near: 0.1,
far: 100.0,
}
}
pub fn position(&self) -> Vec3 {
let cp = self.pitch.cos();
let sp = self.pitch.sin();
let cy = self.yaw.cos();
let sy = self.yaw.sin();
Vec3::new(
self.target.x + self.distance * cp * sy,
self.target.y + self.distance * sp,
self.target.z + self.distance * cp * cy,
)
}
pub fn orbit(&mut self, dx: f32, dy: f32) {
self.yaw += dx * ORBIT_SENSITIVITY;
self.pitch += dy * ORBIT_SENSITIVITY;
self.pitch = self.pitch.clamp(-PITCH_LIMIT, PITCH_LIMIT);
}
pub fn zoom(&mut self, delta: f32) {
self.distance *= 1.0 - delta * ZOOM_FACTOR;
self.distance = self.distance.clamp(MIN_DISTANCE, MAX_DISTANCE);
}
pub fn pan(&mut self, dx: f32, dy: f32) {
let forward = (self.target - self.position()).normalize();
let right = forward.cross(Vec3::Y);
let right = if right.length() < 1e-4 { Vec3::X } else { right.normalize() };
let up = right.cross(forward).normalize();
let offset_x = right * (-dx * PAN_SENSITIVITY * self.distance);
let offset_y = up * (dy * PAN_SENSITIVITY * self.distance);
self.target = self.target + offset_x + offset_y;
}
pub fn view_matrix(&self) -> Mat4 {
Mat4::look_at(self.position(), self.target, Vec3::Y)
}
pub fn projection_matrix(&self, aspect: f32) -> Mat4 {
Mat4::perspective(self.fov_y, aspect, self.near, self.far)
}
pub fn view_projection(&self, aspect: f32) -> Mat4 {
self.projection_matrix(aspect).mul_mat4(&self.view_matrix())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_default_position() {
let cam = OrbitCamera::new();
let pos = cam.position();
assert!((pos.x).abs() < 1e-3);
assert!(pos.y > 0.0);
assert!(pos.z > 0.0);
}
#[test]
fn test_orbit_changes_yaw_pitch() {
let mut cam = OrbitCamera::new();
let old_yaw = cam.yaw;
let old_pitch = cam.pitch;
cam.orbit(100.0, 50.0);
assert!((cam.yaw - old_yaw - 100.0 * ORBIT_SENSITIVITY).abs() < 1e-6);
assert!((cam.pitch - old_pitch - 50.0 * ORBIT_SENSITIVITY).abs() < 1e-6);
}
#[test]
fn test_pitch_clamped() {
let mut cam = OrbitCamera::new();
cam.orbit(0.0, 100000.0);
assert!(cam.pitch <= PITCH_LIMIT);
cam.orbit(0.0, -200000.0);
assert!(cam.pitch >= -PITCH_LIMIT);
}
#[test]
fn test_zoom_changes_distance() {
let mut cam = OrbitCamera::new();
let d0 = cam.distance;
cam.zoom(1.0);
assert!(cam.distance < d0);
}
#[test]
fn test_zoom_clamped() {
let mut cam = OrbitCamera::new();
cam.zoom(1000.0);
assert!(cam.distance >= MIN_DISTANCE);
cam.zoom(-10000.0);
assert!(cam.distance <= MAX_DISTANCE);
}
#[test]
fn test_pan_moves_target() {
let mut cam = OrbitCamera::new();
let t0 = cam.target;
cam.pan(10.0, 0.0);
assert!((cam.target.x - t0.x).abs() > 1e-4 || (cam.target.z - t0.z).abs() > 1e-4);
}
#[test]
fn test_view_matrix_not_zero() {
let cam = OrbitCamera::new();
let v = cam.view_matrix();
let sum: f32 = v.cols.iter().flat_map(|c| c.iter()).map(|x| x.abs()).sum();
assert!(sum > 1.0);
}
#[test]
fn test_projection_matrix() {
let cam = OrbitCamera::new();
let p = cam.projection_matrix(16.0 / 9.0);
assert!(p.cols[0][0] > 0.0);
}
#[test]
fn test_view_projection() {
let cam = OrbitCamera::new();
let vp = cam.view_projection(1.0);
let v = cam.view_matrix();
let p = cam.projection_matrix(1.0);
let expected = p.mul_mat4(&v);
for i in 0..4 {
for j in 0..4 {
assert!((vp.cols[i][j] - expected.cols[i][j]).abs() < 1e-4,
"mismatch at [{i}][{j}]: {} vs {}", vp.cols[i][j], expected.cols[i][j]);
}
}
}
}

View File

@@ -0,0 +1,220 @@
use crate::ttf_parser::GlyphOutline;
/// Result of rasterizing a single glyph.
pub struct RasterResult {
pub width: u32,
pub height: u32,
/// R8 alpha bitmap, row-major.
pub bitmap: Vec<u8>,
/// Left bearing in pixels.
pub offset_x: f32,
/// Distance from baseline to top of glyph bbox in pixels.
pub offset_y: f32,
}
/// Recursively flatten a quadratic bezier into line segments.
pub fn flatten_quad(
x0: f32, y0: f32,
cx: f32, cy: f32,
x1: f32, y1: f32,
edges: &mut Vec<(f32, f32, f32, f32)>,
) {
// Check if curve is flat enough (midpoint distance < 0.5px)
let mx = (x0 + 2.0 * cx + x1) / 4.0;
let my = (y0 + 2.0 * cy + y1) / 4.0;
let lx = (x0 + x1) / 2.0;
let ly = (y0 + y1) / 2.0;
let dx = mx - lx;
let dy = my - ly;
if dx * dx + dy * dy < 0.25 {
edges.push((x0, y0, x1, y1));
} else {
// Subdivide at t=0.5
let m01x = (x0 + cx) / 2.0;
let m01y = (y0 + cy) / 2.0;
let m12x = (cx + x1) / 2.0;
let m12y = (cy + y1) / 2.0;
let midx = (m01x + m12x) / 2.0;
let midy = (m01y + m12y) / 2.0;
flatten_quad(x0, y0, m01x, m01y, midx, midy, edges);
flatten_quad(midx, midy, m12x, m12y, x1, y1, edges);
}
}
/// Rasterize a glyph outline at the given scale into an alpha bitmap.
pub fn rasterize(outline: &GlyphOutline, scale: f32) -> RasterResult {
// Handle empty outline (e.g., space)
if outline.contours.is_empty() || outline.x_max <= outline.x_min {
return RasterResult {
width: 0,
height: 0,
bitmap: vec![],
offset_x: 0.0,
offset_y: 0.0,
};
}
let x_min = outline.x_min as f32;
let y_min = outline.y_min as f32;
let x_max = outline.x_max as f32;
let y_max = outline.y_max as f32;
let w = ((x_max - x_min) * scale).ceil() as u32 + 2;
let h = ((y_max - y_min) * scale).ceil() as u32 + 2;
// Transform a font-space coordinate to bitmap pixel space.
let transform = |px: f32, py: f32| -> (f32, f32) {
let bx = (px - x_min) * scale + 1.0;
let by = (y_max - py) * scale + 1.0;
(bx, by)
};
// Build edges from contours
let mut edges: Vec<(f32, f32, f32, f32)> = Vec::new();
for contour in &outline.contours {
let n = contour.len();
if n < 2 {
continue;
}
let mut i = 0;
while i < n {
let p0 = &contour[i];
let p1 = &contour[(i + 1) % n];
if p0.on_curve && p1.on_curve {
// Line segment
let (px0, py0) = transform(p0.x, p0.y);
let (px1, py1) = transform(p1.x, p1.y);
edges.push((px0, py0, px1, py1));
i += 1;
} else if p0.on_curve && !p1.on_curve {
// Quadratic bezier: p0 -> p1(control) -> p2(on_curve)
let p2 = &contour[(i + 2) % n];
let (px0, py0) = transform(p0.x, p0.y);
let (cx, cy) = transform(p1.x, p1.y);
let (px1, py1) = transform(p2.x, p2.y);
flatten_quad(px0, py0, cx, cy, px1, py1, &mut edges);
i += 2;
} else {
// Skip unexpected off-curve start
i += 1;
}
}
}
// Scanline fill (non-zero winding)
let mut bitmap = vec![0u8; (w * h) as usize];
for row in 0..h {
let scan_y = row as f32 + 0.5;
let mut intersections: Vec<(f32, i32)> = Vec::new();
for &(x0, y0, x1, y1) in &edges {
if (y0 <= scan_y && y1 > scan_y) || (y1 <= scan_y && y0 > scan_y) {
let t = (scan_y - y0) / (y1 - y0);
let ix = x0 + t * (x1 - x0);
let dir = if y1 > y0 { 1 } else { -1 };
intersections.push((ix, dir));
}
}
intersections.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
// Fill using non-zero winding rule
let mut winding = 0i32;
let mut fill_start = 0.0f32;
for &(x, dir) in &intersections {
let old_winding = winding;
winding += dir;
if old_winding == 0 && winding != 0 {
fill_start = x;
}
if old_winding != 0 && winding == 0 {
let px_start = (fill_start.floor() as i32).max(0) as u32;
let px_end = (x.ceil() as u32).min(w);
for px in px_start..px_end {
bitmap[(row * w + px) as usize] = 255;
}
}
}
}
let offset_x = x_min * scale;
let offset_y = y_max * scale;
RasterResult {
width: w,
height: h,
bitmap,
offset_x,
offset_y,
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::ttf_parser::TtfParser;
fn load_test_font() -> Option<TtfParser> {
let paths = [
"C:/Windows/Fonts/arial.ttf",
"C:/Windows/Fonts/consola.ttf",
];
for p in &paths {
if let Ok(data) = std::fs::read(p) {
if let Ok(parser) = TtfParser::parse(data) {
return Some(parser);
}
}
}
None
}
#[test]
fn test_rasterize_produces_bitmap() {
let parser = load_test_font().expect("no font");
let gid = parser.glyph_index(0x41); // 'A'
let outline = parser.glyph_outline(gid).expect("no outline");
let scale = 32.0 / parser.units_per_em as f32;
let result = rasterize(&outline, scale);
assert!(result.width > 0);
assert!(result.height > 0);
assert!(!result.bitmap.is_empty());
}
#[test]
fn test_rasterize_has_filled_pixels() {
let parser = load_test_font().expect("no font");
let gid = parser.glyph_index(0x41);
let outline = parser.glyph_outline(gid).expect("no outline");
let scale = 32.0 / parser.units_per_em as f32;
let result = rasterize(&outline, scale);
let filled = result.bitmap.iter().filter(|&&b| b > 0).count();
assert!(filled > 0, "bitmap should have filled pixels");
}
#[test]
fn test_rasterize_empty() {
let parser = load_test_font().expect("no font");
let gid = parser.glyph_index(0x20); // space
let outline = parser.glyph_outline(gid);
if let Some(o) = outline {
let scale = 32.0 / parser.units_per_em as f32;
let result = rasterize(&o, scale);
// Space should be empty or zero-sized
assert!(
result.width == 0 || result.bitmap.iter().all(|&b| b == 0)
);
}
}
#[test]
fn test_flatten_quad_produces_edges() {
let mut edges = Vec::new();
flatten_quad(0.0, 0.0, 5.0, 10.0, 10.0, 0.0, &mut edges);
assert!(
edges.len() >= 2,
"bezier should flatten to multiple segments"
);
}
}

View File

@@ -0,0 +1,344 @@
use wgpu::util::DeviceExt;
use crate::draw_list::{DrawList, DrawVertex};
use crate::font::FontAtlas;
/// Vertex buffer layout for DrawVertex: position(Float32x2), uv(Float32x2), color(Unorm8x4).
const VERTEX_LAYOUT: wgpu::VertexBufferLayout<'static> = wgpu::VertexBufferLayout {
array_stride: std::mem::size_of::<DrawVertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &[
// position: vec2<f32>
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float32x2,
},
// uv: vec2<f32>
wgpu::VertexAttribute {
offset: 8,
shader_location: 1,
format: wgpu::VertexFormat::Float32x2,
},
// color: vec4<f32> (from [u8;4] via Unorm8x4)
wgpu::VertexAttribute {
offset: 16,
shader_location: 2,
format: wgpu::VertexFormat::Unorm8x4,
},
],
};
/// GPU-side UI renderer that turns a DrawList into a rendered frame.
pub struct UiRenderer {
pipeline: wgpu::RenderPipeline,
#[allow(dead_code)]
bind_group_layout: wgpu::BindGroupLayout,
font_bind_group: wgpu::BindGroup,
uniform_buffer: wgpu::Buffer,
projection: [f32; 16],
last_screen_w: f32,
last_screen_h: f32,
}
impl UiRenderer {
/// Create a new UI renderer.
///
/// `font` is used to build the font atlas GPU texture. Pixel (0,0) must be 255
/// so solid-color rects can sample UV (0,0) for white.
pub fn new(
device: &wgpu::Device,
queue: &wgpu::Queue,
surface_format: wgpu::TextureFormat,
font: &FontAtlas,
) -> Self {
// --- Shader ---
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("UI Shader"),
source: wgpu::ShaderSource::Wgsl(include_str!("ui_shader.wgsl").into()),
});
// --- Bind group layout: uniform + texture + sampler ---
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("UI Bind Group Layout"),
entries: &[
// @binding(0): uniform buffer (projection mat4x4)
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
// @binding(1): texture
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
},
count: None,
},
// @binding(2): sampler
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
});
// --- Pipeline layout ---
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("UI Pipeline Layout"),
bind_group_layouts: &[&bind_group_layout],
immediate_size: 0,
});
// --- Render pipeline: alpha blend, no depth ---
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("UI Render Pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: Some("vs_main"),
buffers: &[VERTEX_LAYOUT],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: Some("fs_main"),
targets: &[Some(wgpu::ColorTargetState {
format: surface_format,
blend: Some(wgpu::BlendState {
color: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
}),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: None,
unclipped_depth: false,
polygon_mode: wgpu::PolygonMode::Fill,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
multiview_mask: None,
cache: None,
});
// --- Font atlas GPU texture (R8Unorm) ---
let atlas_texture = device.create_texture_with_data(
queue,
&wgpu::TextureDescriptor {
label: Some("UI Font Atlas"),
size: wgpu::Extent3d {
width: font.width,
height: font.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::R8Unorm,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[],
},
wgpu::util::TextureDataOrder::LayerMajor,
&font.pixels,
);
let atlas_view = atlas_texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("UI Sampler"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Nearest,
min_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
// --- Uniform buffer (projection) ---
let projection = ortho_projection(800.0, 600.0);
let uniform_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("UI Uniform Buffer"),
contents: bytemuck::cast_slice(&projection),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
});
// --- Bind group ---
let font_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("UI Font Bind Group"),
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: uniform_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(&atlas_view),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::Sampler(&sampler),
},
],
});
UiRenderer {
pipeline,
bind_group_layout: bind_group_layout,
font_bind_group,
uniform_buffer,
projection,
last_screen_w: 800.0,
last_screen_h: 600.0,
}
}
/// Render the draw list onto the given target view.
///
/// Uses `LoadOp::Load` so the UI overlays whatever was previously rendered.
/// For a standalone UI demo, clear the surface before calling this.
pub fn render(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
encoder: &mut wgpu::CommandEncoder,
target_view: &wgpu::TextureView,
draw_list: &DrawList,
screen_w: f32,
screen_h: f32,
) {
if draw_list.vertices.is_empty() || draw_list.indices.is_empty() {
return;
}
// Update projection if screen size changed
if (screen_w - self.last_screen_w).abs() > 0.5
|| (screen_h - self.last_screen_h).abs() > 0.5
{
self.projection = ortho_projection(screen_w, screen_h);
queue.write_buffer(
&self.uniform_buffer,
0,
bytemuck::cast_slice(&self.projection),
);
self.last_screen_w = screen_w;
self.last_screen_h = screen_h;
}
// Create vertex and index buffers from DrawList
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("UI Vertex Buffer"),
contents: bytemuck::cast_slice(&draw_list.vertices),
usage: wgpu::BufferUsages::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("UI Index Buffer"),
contents: bytemuck::cast_slice(&draw_list.indices),
usage: wgpu::BufferUsages::INDEX,
});
// Begin render pass (Load to overlay on existing content)
{
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("UI Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: target_view,
resolve_target: None,
depth_slice: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
multiview_mask: None,
});
pass.set_pipeline(&self.pipeline);
pass.set_bind_group(0, &self.font_bind_group, &[]);
pass.set_vertex_buffer(0, vertex_buffer.slice(..));
pass.set_index_buffer(index_buffer.slice(..), wgpu::IndexFormat::Uint16);
// Draw each command (with optional scissor clipping)
for cmd in &draw_list.commands {
if let Some(scissor) = &cmd.scissor {
pass.set_scissor_rect(scissor.x, scissor.y, scissor.w, scissor.h);
} else {
pass.set_scissor_rect(0, 0, screen_w as u32, screen_h as u32);
}
pass.draw_indexed(
cmd.index_offset..cmd.index_offset + cmd.index_count,
0,
0..1,
);
}
}
}
}
/// Orthographic projection: left=0, right=w, top=0, bottom=h (Y down), near=-1, far=1.
/// Returns a column-major 4x4 matrix as [f32; 16].
fn ortho_projection(w: f32, h: f32) -> [f32; 16] {
let l = 0.0_f32;
let r = w;
let t = 0.0_f32;
let b = h;
let n = -1.0_f32;
let f = 1.0_f32;
// Column-major layout for wgpu/WGSL mat4x4
[
2.0 / (r - l), 0.0, 0.0, 0.0,
0.0, 2.0 / (t - b), 0.0, 0.0,
0.0, 0.0, 1.0 / (f - n), 0.0,
-(r + l) / (r - l), -(t + b) / (t - b), -n / (f - n), 1.0,
]
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_ortho_projection_corners() {
let proj = ortho_projection(800.0, 600.0);
// Top-left (0,0) should map to NDC (-1, 1)
let x = proj[0] * 0.0 + proj[4] * 0.0 + proj[8] * 0.0 + proj[12];
let y = proj[1] * 0.0 + proj[5] * 0.0 + proj[9] * 0.0 + proj[13];
assert!((x - (-1.0)).abs() < 1e-5, "top-left x: {}", x);
assert!((y - 1.0).abs() < 1e-5, "top-left y: {}", y);
// Bottom-right (800, 600) should map to NDC (1, -1)
let x2 = proj[0] * 800.0 + proj[4] * 600.0 + proj[8] * 0.0 + proj[12];
let y2 = proj[1] * 800.0 + proj[5] * 600.0 + proj[9] * 0.0 + proj[13];
assert!((x2 - 1.0).abs() < 1e-5, "bot-right x: {}", x2);
assert!((y2 - (-1.0)).abs() < 1e-5, "bot-right y: {}", y2);
}
}

View File

@@ -0,0 +1,153 @@
use crate::ttf_parser::TtfParser;
use crate::rasterizer::rasterize;
use crate::glyph_cache::{GlyphCache, GlyphInfo, ATLAS_SIZE};
pub struct TtfFont {
parser: TtfParser,
cache: GlyphCache,
pub font_size: f32,
pub line_height: f32,
pub ascender: f32,
pub descender: f32,
scale: f32,
}
impl TtfFont {
pub fn new(data: &[u8], font_size: f32) -> Result<Self, String> {
let parser = TtfParser::parse(data.to_vec())?;
let scale = font_size / parser.units_per_em as f32;
let ascender = parser.ascender as f32 * scale;
let descender = parser.descender as f32 * scale;
let line_height = (parser.ascender - parser.descender + parser.line_gap) as f32 * scale;
let cache = GlyphCache::new(ATLAS_SIZE, ATLAS_SIZE);
Ok(TtfFont {
parser,
cache,
font_size,
line_height,
ascender,
descender,
scale,
})
}
/// Get glyph info for a character, rasterizing on cache miss.
pub fn glyph(&mut self, ch: char) -> &GlyphInfo {
if self.cache.get(ch).is_some() {
return self.cache.get(ch).unwrap();
}
let glyph_id = self.parser.glyph_index(ch as u32);
let metrics = self.parser.glyph_metrics(glyph_id);
let advance = metrics.advance_width as f32 * self.scale;
let bearing_x = metrics.left_side_bearing as f32 * self.scale;
let outline = self.parser.glyph_outline(glyph_id);
match outline {
Some(ref o) if !o.contours.is_empty() => {
let result = rasterize(o, self.scale);
// bearing_y = y_max * scale (distance above baseline)
// When rendering: glyph_y = baseline_y - bearing_y
// For our UI: text_y is the top of the line, baseline = text_y + ascender
// So: glyph_y = text_y + ascender - bearing_y
let bearing_y = o.y_max as f32 * self.scale;
self.cache.insert(
ch,
&result.bitmap,
result.width,
result.height,
advance,
bearing_x,
bearing_y,
);
}
_ => {
// No outline (space, etc.) — insert empty glyph
self.cache.insert(ch, &[], 0, 0, advance, 0.0, 0.0);
}
}
self.cache.get(ch).unwrap()
}
/// Calculate total width of a text string.
pub fn text_width(&mut self, text: &str) -> f32 {
let mut width = 0.0;
for ch in text.chars() {
let info = self.glyph(ch);
width += info.advance;
}
width
}
pub fn atlas_data(&self) -> &[u8] {
&self.cache.atlas_data
}
pub fn atlas_size(&self) -> (u32, u32) {
(self.cache.atlas_width, self.cache.atlas_height)
}
pub fn is_dirty(&self) -> bool {
self.cache.dirty
}
pub fn clear_dirty(&mut self) {
self.cache.clear_dirty();
}
}
#[cfg(test)]
mod tests {
use super::*;
fn load_test_font() -> Option<TtfFont> {
let paths = ["C:/Windows/Fonts/arial.ttf", "C:/Windows/Fonts/consola.ttf"];
for p in &paths {
if let Ok(data) = std::fs::read(p) {
if let Ok(font) = TtfFont::new(&data, 24.0) {
return Some(font);
}
}
}
None
}
#[test]
fn test_new() {
let font = load_test_font().expect("no test font");
assert!(font.font_size == 24.0);
assert!(font.ascender > 0.0);
assert!(font.line_height > 0.0);
}
#[test]
fn test_glyph_caches() {
let mut font = load_test_font().expect("no test font");
let _g1 = font.glyph('A');
assert!(font.is_dirty());
font.clear_dirty();
let _g2 = font.glyph('A'); // cache hit
assert!(!font.is_dirty());
}
#[test]
fn test_text_width() {
let mut font = load_test_font().expect("no test font");
let w1 = font.text_width("Hello");
let w2 = font.text_width("Hello World");
assert!(w1 > 0.0);
assert!(w2 > w1);
}
#[test]
fn test_space_glyph() {
let mut font = load_test_font().expect("no test font");
let info = font.glyph(' ');
assert!(info.advance > 0.0); // space has advance but no bitmap
assert!(info.width == 0.0);
}
}

View File

@@ -0,0 +1,518 @@
use std::collections::HashMap;
// --- Byte helpers (big-endian) ---
fn read_u8(data: &[u8], off: usize) -> u8 {
data[off]
}
fn read_u16(data: &[u8], off: usize) -> u16 {
u16::from_be_bytes([data[off], data[off + 1]])
}
fn read_i16(data: &[u8], off: usize) -> i16 {
i16::from_be_bytes([data[off], data[off + 1]])
}
fn read_u32(data: &[u8], off: usize) -> u32 {
u32::from_be_bytes([data[off], data[off + 1], data[off + 2], data[off + 3]])
}
// --- Data types ---
#[derive(Debug, Clone)]
pub struct OutlinePoint {
pub x: f32,
pub y: f32,
pub on_curve: bool,
}
#[derive(Debug, Clone)]
pub struct GlyphOutline {
pub contours: Vec<Vec<OutlinePoint>>,
pub x_min: i16,
pub y_min: i16,
pub x_max: i16,
pub y_max: i16,
}
#[derive(Debug, Clone, Copy)]
pub struct GlyphMetrics {
pub advance_width: u16,
pub left_side_bearing: i16,
}
// --- Flag bits for simple glyph parsing ---
const ON_CURVE: u8 = 0x01;
const X_SHORT: u8 = 0x02;
const Y_SHORT: u8 = 0x04;
const REPEAT_FLAG: u8 = 0x08;
const X_SAME_OR_POS: u8 = 0x10;
const Y_SAME_OR_POS: u8 = 0x20;
// --- TtfParser ---
pub struct TtfParser {
data: Vec<u8>,
pub tables: HashMap<[u8; 4], (u32, u32)>, // tag -> (offset, length)
pub units_per_em: u16,
pub num_glyphs: u16,
pub ascender: i16,
pub descender: i16,
pub line_gap: i16,
pub num_h_metrics: u16,
pub loca_format: i16,
}
impl TtfParser {
/// Parse a TTF file from raw bytes.
pub fn parse(data: Vec<u8>) -> Result<Self, String> {
if data.len() < 12 {
return Err("File too short for offset table".into());
}
let _sf_version = read_u32(&data, 0);
let num_tables = read_u16(&data, 4) as usize;
if data.len() < 12 + num_tables * 16 {
return Err("File too short for table records".into());
}
let mut tables = HashMap::new();
for i in 0..num_tables {
let rec_off = 12 + i * 16;
let mut tag = [0u8; 4];
tag.copy_from_slice(&data[rec_off..rec_off + 4]);
let offset = read_u32(&data, rec_off + 8);
let length = read_u32(&data, rec_off + 12);
tables.insert(tag, (offset, length));
}
// Parse head table
let &(head_off, _) = tables
.get(b"head")
.ok_or("Missing head table")?;
let head_off = head_off as usize;
let units_per_em = read_u16(&data, head_off + 18);
let loca_format = read_i16(&data, head_off + 50);
// Parse hhea table
let &(hhea_off, _) = tables
.get(b"hhea")
.ok_or("Missing hhea table")?;
let hhea_off = hhea_off as usize;
let ascender = read_i16(&data, hhea_off + 4);
let descender = read_i16(&data, hhea_off + 6);
let line_gap = read_i16(&data, hhea_off + 8);
let num_h_metrics = read_u16(&data, hhea_off + 34);
// Parse maxp table
let &(maxp_off, _) = tables
.get(b"maxp")
.ok_or("Missing maxp table")?;
let maxp_off = maxp_off as usize;
let num_glyphs = read_u16(&data, maxp_off + 4);
Ok(Self {
data,
tables,
units_per_em,
num_glyphs,
ascender,
descender,
line_gap,
num_h_metrics,
loca_format,
})
}
/// Look up the glyph index for a Unicode codepoint via cmap Format 4.
pub fn glyph_index(&self, codepoint: u32) -> u16 {
let &(cmap_off, _) = match self.tables.get(b"cmap") {
Some(v) => v,
None => return 0,
};
let cmap_off = cmap_off as usize;
let num_subtables = read_u16(&self.data, cmap_off + 2) as usize;
// Find a Format 4 subtable (prefer platform 3 encoding 1, or platform 0)
let mut fmt4_offset: Option<usize> = None;
for i in 0..num_subtables {
let rec = cmap_off + 4 + i * 8;
let platform_id = read_u16(&self.data, rec);
let encoding_id = read_u16(&self.data, rec + 2);
let sub_offset = read_u32(&self.data, rec + 4) as usize;
let abs_off = cmap_off + sub_offset;
if abs_off + 2 > self.data.len() {
continue;
}
let format = read_u16(&self.data, abs_off);
if format == 4 {
// Prefer Windows Unicode BMP (3,1)
if platform_id == 3 && encoding_id == 1 {
fmt4_offset = Some(abs_off);
break;
}
// Accept platform 0 as fallback
if platform_id == 0 && fmt4_offset.is_none() {
fmt4_offset = Some(abs_off);
}
}
}
let sub_off = match fmt4_offset {
Some(o) => o,
None => return 0,
};
// Parse Format 4
let seg_count_x2 = read_u16(&self.data, sub_off + 6) as usize;
let seg_count = seg_count_x2 / 2;
let end_code_base = sub_off + 14;
let start_code_base = end_code_base + seg_count * 2 + 2; // +2 for reservedPad
let id_delta_base = start_code_base + seg_count * 2;
let id_range_offset_base = id_delta_base + seg_count * 2;
for i in 0..seg_count {
let end_code = read_u16(&self.data, end_code_base + i * 2) as u32;
let start_code = read_u16(&self.data, start_code_base + i * 2) as u32;
if end_code >= codepoint && start_code <= codepoint {
let id_delta = read_i16(&self.data, id_delta_base + i * 2);
let id_range_offset = read_u16(&self.data, id_range_offset_base + i * 2) as usize;
if id_range_offset == 0 {
return (codepoint as i32 + id_delta as i32) as u16;
} else {
let offset_in_bytes =
id_range_offset + 2 * (codepoint - start_code) as usize;
let glyph_addr = id_range_offset_base + i * 2 + offset_in_bytes;
if glyph_addr + 1 < self.data.len() {
let glyph = read_u16(&self.data, glyph_addr);
if glyph != 0 {
return (glyph as i32 + id_delta as i32) as u16;
}
}
return 0;
}
}
}
0
}
/// Get the offset of a glyph in the glyf table using loca.
fn glyph_offset(&self, glyph_id: u16) -> Option<(usize, usize)> {
let &(loca_off, _) = self.tables.get(b"loca")?;
let &(glyf_off, _) = self.tables.get(b"glyf")?;
let loca_off = loca_off as usize;
let glyf_off = glyf_off as usize;
if glyph_id >= self.num_glyphs {
return None;
}
let (offset, next_offset) = if self.loca_format == 0 {
// Short format: u16 * 2
let o = read_u16(&self.data, loca_off + glyph_id as usize * 2) as usize * 2;
let n = read_u16(&self.data, loca_off + (glyph_id as usize + 1) * 2) as usize * 2;
(o, n)
} else {
// Long format: u32
let o = read_u32(&self.data, loca_off + glyph_id as usize * 4) as usize;
let n = read_u32(&self.data, loca_off + (glyph_id as usize + 1) * 4) as usize;
(o, n)
};
if offset == next_offset {
// Empty glyph (e.g., space)
return None;
}
Some((glyf_off + offset, next_offset - offset))
}
/// Parse the outline of a simple glyph.
pub fn glyph_outline(&self, glyph_id: u16) -> Option<GlyphOutline> {
let (glyph_off, _glyph_len) = match self.glyph_offset(glyph_id) {
Some(v) => v,
None => return None, // empty glyph
};
let num_contours = read_i16(&self.data, glyph_off);
if num_contours < 0 {
// Compound glyph — not supported
return None;
}
let num_contours = num_contours as usize;
if num_contours == 0 {
return Some(GlyphOutline {
contours: Vec::new(),
x_min: read_i16(&self.data, glyph_off + 2),
y_min: read_i16(&self.data, glyph_off + 4),
x_max: read_i16(&self.data, glyph_off + 6),
y_max: read_i16(&self.data, glyph_off + 8),
});
}
let x_min = read_i16(&self.data, glyph_off + 2);
let y_min = read_i16(&self.data, glyph_off + 4);
let x_max = read_i16(&self.data, glyph_off + 6);
let y_max = read_i16(&self.data, glyph_off + 8);
// endPtsOfContours
let mut end_pts = Vec::with_capacity(num_contours);
let mut off = glyph_off + 10;
for _ in 0..num_contours {
end_pts.push(read_u16(&self.data, off) as usize);
off += 2;
}
let num_points = end_pts[num_contours - 1] + 1;
// Skip instructions
let instruction_length = read_u16(&self.data, off) as usize;
off += 2 + instruction_length;
// Parse flags
let mut flags = Vec::with_capacity(num_points);
while flags.len() < num_points {
let flag = read_u8(&self.data, off);
off += 1;
flags.push(flag);
if flag & REPEAT_FLAG != 0 {
let repeat_count = read_u8(&self.data, off) as usize;
off += 1;
for _ in 0..repeat_count {
flags.push(flag);
}
}
}
// Parse x-coordinates (delta-encoded)
let mut x_coords = Vec::with_capacity(num_points);
let mut x: i32 = 0;
for i in 0..num_points {
let flag = flags[i];
if flag & X_SHORT != 0 {
let dx = read_u8(&self.data, off) as i32;
off += 1;
x += if flag & X_SAME_OR_POS != 0 { dx } else { -dx };
} else if flag & X_SAME_OR_POS != 0 {
// delta = 0
} else {
let dx = read_i16(&self.data, off) as i32;
off += 2;
x += dx;
}
x_coords.push(x);
}
// Parse y-coordinates (delta-encoded)
let mut y_coords = Vec::with_capacity(num_points);
let mut y: i32 = 0;
for i in 0..num_points {
let flag = flags[i];
if flag & Y_SHORT != 0 {
let dy = read_u8(&self.data, off) as i32;
off += 1;
y += if flag & Y_SAME_OR_POS != 0 { dy } else { -dy };
} else if flag & Y_SAME_OR_POS != 0 {
// delta = 0
} else {
let dy = read_i16(&self.data, off) as i32;
off += 2;
y += dy;
}
y_coords.push(y);
}
// Build contours with implicit on-curve point insertion
let mut contours = Vec::with_capacity(num_contours);
let mut start = 0;
for &end in &end_pts {
let raw_points: Vec<(f32, f32, bool)> = (start..=end)
.map(|i| {
(
x_coords[i] as f32,
y_coords[i] as f32,
flags[i] & ON_CURVE != 0,
)
})
.collect();
let mut contour = Vec::new();
let n = raw_points.len();
if n == 0 {
start = end + 1;
contours.push(contour);
continue;
}
for j in 0..n {
let (cx, cy, c_on) = raw_points[j];
let (nx, ny, n_on) = raw_points[(j + 1) % n];
contour.push(OutlinePoint {
x: cx,
y: cy,
on_curve: c_on,
});
// If both current and next are off-curve, insert implicit midpoint
if !c_on && !n_on {
contour.push(OutlinePoint {
x: (cx + nx) * 0.5,
y: (cy + ny) * 0.5,
on_curve: true,
});
}
}
start = end + 1;
contours.push(contour);
}
Some(GlyphOutline {
contours,
x_min,
y_min,
x_max,
y_max,
})
}
/// Get the horizontal metrics for a glyph.
pub fn glyph_metrics(&self, glyph_id: u16) -> GlyphMetrics {
let &(hmtx_off, _) = match self.tables.get(b"hmtx") {
Some(v) => v,
None => {
return GlyphMetrics {
advance_width: 0,
left_side_bearing: 0,
}
}
};
let hmtx_off = hmtx_off as usize;
if (glyph_id as u16) < self.num_h_metrics {
let rec = hmtx_off + glyph_id as usize * 4;
GlyphMetrics {
advance_width: read_u16(&self.data, rec),
left_side_bearing: read_i16(&self.data, rec + 2),
}
} else {
// Use last advance_width, lsb from separate array
let last_aw_off = hmtx_off + (self.num_h_metrics as usize - 1) * 4;
let advance_width = read_u16(&self.data, last_aw_off);
let lsb_array_off = hmtx_off + self.num_h_metrics as usize * 4;
let idx = glyph_id as usize - self.num_h_metrics as usize;
let left_side_bearing = read_i16(&self.data, lsb_array_off + idx * 2);
GlyphMetrics {
advance_width,
left_side_bearing,
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
fn load_test_font() -> Option<TtfParser> {
let paths = [
"C:/Windows/Fonts/arial.ttf",
"C:/Windows/Fonts/consola.ttf",
];
for path in &paths {
if let Ok(data) = std::fs::read(path) {
if let Ok(parser) = TtfParser::parse(data) {
return Some(parser);
}
}
}
None
}
#[test]
fn test_parse_loads_tables() {
let parser = load_test_font().expect("no test font found");
assert!(parser.tables.contains_key(b"head"));
assert!(parser.tables.contains_key(b"cmap"));
assert!(parser.tables.contains_key(b"glyf"));
}
#[test]
fn test_head_values() {
let parser = load_test_font().expect("no test font found");
assert!(parser.units_per_em > 0);
assert!(parser.loca_format == 0 || parser.loca_format == 1);
}
#[test]
fn test_hhea_values() {
let parser = load_test_font().expect("no test font found");
assert!(parser.ascender > 0);
assert!(parser.num_h_metrics > 0);
}
#[test]
fn test_maxp_values() {
let parser = load_test_font().expect("no test font found");
assert!(parser.num_glyphs > 0);
}
#[test]
fn test_cmap_ascii() {
let parser = load_test_font().expect("no test font found");
let glyph_a = parser.glyph_index(0x41); // 'A'
assert!(glyph_a > 0, "glyph index for 'A' should be > 0");
}
#[test]
fn test_cmap_space() {
let parser = load_test_font().expect("no test font found");
let glyph = parser.glyph_index(0x20); // space
assert!(glyph > 0);
}
#[test]
fn test_cmap_unmapped() {
let parser = load_test_font().expect("no test font found");
let glyph = parser.glyph_index(0xFFFD0); // unlikely codepoint
assert_eq!(glyph, 0);
}
#[test]
fn test_glyph_outline_has_contours() {
let parser = load_test_font().expect("no test font found");
let gid = parser.glyph_index(0x41); // 'A'
let outline = parser.glyph_outline(gid);
assert!(outline.is_some());
let outline = outline.unwrap();
assert!(!outline.contours.is_empty(), "A should have contours");
}
#[test]
fn test_glyph_metrics() {
let parser = load_test_font().expect("no test font found");
let gid = parser.glyph_index(0x41);
let metrics = parser.glyph_metrics(gid);
assert!(metrics.advance_width > 0);
}
#[test]
fn test_space_no_contours() {
let parser = load_test_font().expect("no test font found");
let gid = parser.glyph_index(0x20);
let outline = parser.glyph_outline(gid);
// Space may have no outline or empty contours
if let Some(o) = outline {
assert!(o.contours.is_empty());
}
}
}

View File

@@ -0,0 +1,184 @@
use std::collections::HashMap;
use crate::draw_list::DrawList;
use crate::font::FontAtlas;
use crate::layout::LayoutState;
/// Key events the UI system understands.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Key {
Left,
Right,
Backspace,
Delete,
Home,
End,
}
pub struct UiContext {
pub hot: Option<u64>,
pub active: Option<u64>,
pub draw_list: DrawList,
pub layout: LayoutState,
pub mouse_x: f32,
pub mouse_y: f32,
pub mouse_down: bool,
pub mouse_clicked: bool,
pub mouse_released: bool,
pub mouse_scroll: f32,
pub screen_width: f32,
pub screen_height: f32,
pub font: FontAtlas,
id_counter: u64,
prev_mouse_down: bool,
// Text input state
pub focused_id: Option<u32>,
pub cursor_pos: usize,
input_chars: Vec<char>,
input_keys: Vec<Key>,
// Scroll panel state
pub scroll_offsets: HashMap<u32, f32>,
// Drag and drop state
pub dragging: Option<(u32, u64)>,
pub drag_start: (f32, f32),
pub(crate) drag_started: bool,
/// TTF font for high-quality text rendering (None = bitmap fallback).
pub ttf_font: Option<crate::ttf_font::TtfFont>,
}
impl UiContext {
/// Create a new UiContext for the given screen dimensions.
pub fn new(screen_w: f32, screen_h: f32) -> Self {
UiContext {
hot: None,
active: None,
draw_list: DrawList::new(),
layout: LayoutState::new(0.0, 0.0),
mouse_x: 0.0,
mouse_y: 0.0,
mouse_down: false,
mouse_clicked: false,
mouse_released: false,
mouse_scroll: 0.0,
screen_width: screen_w,
screen_height: screen_h,
font: FontAtlas::generate(),
id_counter: 0,
prev_mouse_down: false,
focused_id: None,
cursor_pos: 0,
input_chars: Vec::new(),
input_keys: Vec::new(),
scroll_offsets: HashMap::new(),
dragging: None,
drag_start: (0.0, 0.0),
drag_started: false,
ttf_font: None,
}
}
/// Begin a new frame: clear draw list, reset id counter, update mouse state.
pub fn begin_frame(&mut self, mx: f32, my: f32, mouse_down: bool) {
self.draw_list.clear();
self.id_counter = 0;
self.hot = None;
self.mouse_x = mx;
self.mouse_y = my;
// Compute transitions
self.mouse_clicked = !self.prev_mouse_down && mouse_down;
self.mouse_released = self.prev_mouse_down && !mouse_down;
self.mouse_down = mouse_down;
self.prev_mouse_down = mouse_down;
// Reset layout to top-left
self.layout = LayoutState::new(0.0, 0.0);
}
/// Feed a character input event (printable ASCII) for text input widgets.
pub fn input_char(&mut self, ch: char) {
if ch.is_ascii() && !ch.is_ascii_control() {
self.input_chars.push(ch);
}
}
/// Feed a key input event for text input widgets.
pub fn input_key(&mut self, key: Key) {
self.input_keys.push(key);
}
/// Set mouse scroll delta for this frame (positive = scroll up).
pub fn set_scroll(&mut self, delta: f32) {
self.mouse_scroll = delta;
}
/// Drain all pending input chars (consumed by text_input widget).
pub(crate) fn drain_chars(&mut self) -> Vec<char> {
std::mem::take(&mut self.input_chars)
}
/// Drain all pending key events (consumed by text_input widget).
pub(crate) fn drain_keys(&mut self) -> Vec<Key> {
std::mem::take(&mut self.input_keys)
}
/// End the current frame.
pub fn end_frame(&mut self) {
self.mouse_scroll = 0.0;
// Clear any unconsumed input
self.input_chars.clear();
self.input_keys.clear();
}
/// Generate a new unique ID for this frame.
pub fn gen_id(&mut self) -> u64 {
self.id_counter += 1;
self.id_counter
}
/// Check if the mouse cursor is inside the given rectangle.
pub fn mouse_in_rect(&self, x: f32, y: f32, w: f32, h: f32) -> bool {
self.mouse_x >= x
&& self.mouse_x < x + w
&& self.mouse_y >= y
&& self.mouse_y < y + h
}
/// Draw text using TTF font if available, otherwise bitmap font fallback.
pub fn draw_text(&mut self, text: &str, x: f32, y: f32, color: [u8; 4]) {
if let Some(ref mut ttf) = self.ttf_font {
let ascender = ttf.ascender;
let mut cx = x;
for ch in text.chars() {
let info = ttf.glyph(ch).clone();
if info.width > 0.0 && info.height > 0.0 {
let gx = cx + info.bearing_x;
let gy = y + ascender - info.bearing_y;
let (u0, v0, u1, v1) = (info.uv[0], info.uv[1], info.uv[2], info.uv[3]);
self.draw_list.add_rect_uv(gx, gy, info.width, info.height, u0, v0, u1, v1, color);
}
cx += info.advance;
}
} else {
// Bitmap font fallback
let gw = self.font.glyph_width as f32;
let gh = self.font.glyph_height as f32;
let mut cx = x;
for ch in text.chars() {
let (u0, v0, u1, v1) = self.font.glyph_uv(ch);
self.draw_list.add_rect_uv(cx, y, gw, gh, u0, v0, u1, v1, color);
cx += gw;
}
}
}
/// Calculate text width using TTF or bitmap font.
pub fn ttf_text_width(&mut self, text: &str) -> f32 {
if let Some(ref mut ttf) = self.ttf_font {
ttf.text_width(text)
} else {
text.len() as f32 * self.font.glyph_width as f32
}
}
}

View File

@@ -0,0 +1,34 @@
struct UiUniform {
projection: mat4x4<f32>,
};
@group(0) @binding(0) var<uniform> ui_uniform: UiUniform;
@group(0) @binding(1) var t_atlas: texture_2d<f32>;
@group(0) @binding(2) var s_atlas: sampler;
struct VertexInput {
@location(0) position: vec2<f32>,
@location(1) uv: vec2<f32>,
@location(2) color: vec4<f32>,
};
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
@location(0) uv: vec2<f32>,
@location(1) color: vec4<f32>,
};
@vertex
fn vs_main(in: VertexInput) -> VertexOutput {
var out: VertexOutput;
out.clip_position = ui_uniform.projection * vec4<f32>(in.position, 0.0, 1.0);
out.uv = in.uv;
out.color = in.color;
return out;
}
@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
let tex_alpha = textureSample(t_atlas, s_atlas, in.uv).r;
return vec4<f32>(in.color.rgb * tex_alpha, in.color.a * tex_alpha);
}

View File

@@ -0,0 +1,182 @@
use bytemuck::{Pod, Zeroable};
#[repr(C)]
#[derive(Copy, Clone, Pod, Zeroable)]
struct RectUniform {
rect: [f32; 4],
screen: [f32; 2],
_pad: [f32; 2],
}
pub struct ViewportRenderer {
pipeline: wgpu::RenderPipeline,
bind_group_layout: wgpu::BindGroupLayout,
sampler: wgpu::Sampler,
uniform_buffer: wgpu::Buffer,
}
impl ViewportRenderer {
pub fn new(device: &wgpu::Device, surface_format: wgpu::TextureFormat) -> Self {
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Viewport Blit Shader"),
source: wgpu::ShaderSource::Wgsl(include_str!("viewport_shader.wgsl").into()),
});
let bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Viewport Blit BGL"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Viewport Blit PL"),
bind_group_layouts: &[&bind_group_layout],
immediate_size: 0,
});
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Viewport Blit Pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: Some("vs_main"),
buffers: &[],
compilation_options: wgpu::PipelineCompilationOptions::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: Some("fs_main"),
targets: &[Some(wgpu::ColorTargetState {
format: surface_format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: wgpu::PipelineCompilationOptions::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
..Default::default()
},
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
multiview_mask: None,
cache: None,
});
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("Viewport Sampler"),
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
..Default::default()
});
let uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Viewport Rect Uniform"),
size: std::mem::size_of::<RectUniform>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
ViewportRenderer {
pipeline,
bind_group_layout,
sampler,
uniform_buffer,
}
}
pub fn render(
&self,
device: &wgpu::Device,
queue: &wgpu::Queue,
encoder: &mut wgpu::CommandEncoder,
target_view: &wgpu::TextureView,
viewport_color_view: &wgpu::TextureView,
screen_w: f32,
screen_h: f32,
rect_x: f32,
rect_y: f32,
rect_w: f32,
rect_h: f32,
) {
let uniform = RectUniform {
rect: [rect_x, rect_y, rect_w, rect_h],
screen: [screen_w, screen_h],
_pad: [0.0; 2],
};
queue.write_buffer(&self.uniform_buffer, 0, bytemuck::cast_slice(&[uniform]));
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Viewport Blit BG"),
layout: &self.bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: self.uniform_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(viewport_color_view),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::Sampler(&self.sampler),
},
],
});
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Viewport Blit Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: target_view,
resolve_target: None,
depth_slice: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
multiview_mask: None,
});
rpass.set_scissor_rect(
rect_x.max(0.0) as u32,
rect_y.max(0.0) as u32,
rect_w.ceil().max(1.0) as u32,
rect_h.ceil().max(1.0) as u32,
);
rpass.set_pipeline(&self.pipeline);
rpass.set_bind_group(0, &bind_group, &[]);
rpass.draw(0..6, 0..1);
}
}

View File

@@ -0,0 +1,42 @@
struct RectUniform {
rect: vec4<f32>,
screen: vec2<f32>,
_pad: vec2<f32>,
};
@group(0) @binding(0) var<uniform> u: RectUniform;
@group(0) @binding(1) var t_viewport: texture_2d<f32>;
@group(0) @binding(2) var s_viewport: sampler;
struct VertexOutput {
@builtin(position) position: vec4<f32>,
@location(0) uv: vec2<f32>,
};
@vertex
fn vs_main(@builtin(vertex_index) idx: u32) -> VertexOutput {
var positions = array<vec2<f32>, 6>(
vec2<f32>(0.0, 0.0),
vec2<f32>(1.0, 0.0),
vec2<f32>(1.0, 1.0),
vec2<f32>(0.0, 0.0),
vec2<f32>(1.0, 1.0),
vec2<f32>(0.0, 1.0),
);
let p = positions[idx];
let px = u.rect.x + p.x * u.rect.z;
let py = u.rect.y + p.y * u.rect.w;
let ndc_x = (px / u.screen.x) * 2.0 - 1.0;
let ndc_y = 1.0 - (py / u.screen.y) * 2.0;
var out: VertexOutput;
out.position = vec4<f32>(ndc_x, ndc_y, 0.0, 1.0);
out.uv = p;
return out;
}
@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
return textureSample(t_viewport, s_viewport, in.uv);
}

View File

@@ -0,0 +1,69 @@
pub const VIEWPORT_COLOR_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Rgba8Unorm;
pub const VIEWPORT_DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
pub struct ViewportTexture {
pub color_texture: wgpu::Texture,
pub color_view: wgpu::TextureView,
pub depth_texture: wgpu::Texture,
pub depth_view: wgpu::TextureView,
pub width: u32,
pub height: u32,
}
impl ViewportTexture {
pub fn new(device: &wgpu::Device, width: u32, height: u32) -> Self {
let w = width.max(1);
let h = height.max(1);
let color_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Viewport Color"),
size: wgpu::Extent3d {
width: w,
height: h,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: VIEWPORT_COLOR_FORMAT,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[],
});
let color_view = color_texture.create_view(&wgpu::TextureViewDescriptor::default());
let depth_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Viewport Depth"),
size: wgpu::Extent3d {
width: w,
height: h,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: VIEWPORT_DEPTH_FORMAT,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
view_formats: &[],
});
let depth_view = depth_texture.create_view(&wgpu::TextureViewDescriptor::default());
ViewportTexture {
color_texture,
color_view,
depth_texture,
depth_view,
width: w,
height: h,
}
}
pub fn ensure_size(&mut self, device: &wgpu::Device, width: u32, height: u32) -> bool {
let w = width.max(1);
let h = height.max(1);
if w == self.width && h == self.height {
return false;
}
*self = Self::new(device, w, h);
true
}
}

View File

@@ -0,0 +1,745 @@
use crate::ui_context::{Key, UiContext};
// Color palette
const COLOR_BG: [u8; 4] = [0x2B, 0x2B, 0x2B, 0xFF];
const COLOR_BUTTON: [u8; 4] = [0x44, 0x44, 0x55, 0xFF];
const COLOR_BUTTON_HOT: [u8; 4] = [0x55, 0x66, 0x88, 0xFF];
const COLOR_BUTTON_ACTIVE: [u8; 4] = [0x44, 0x88, 0xFF, 0xFF];
const COLOR_TEXT: [u8; 4] = [0xEE, 0xEE, 0xEE, 0xFF];
const COLOR_PANEL: [u8; 4] = [0x33, 0x33, 0x33, 0xFF];
const COLOR_SLIDER_BG: [u8; 4] = [0x44, 0x44, 0x44, 0xFF];
const COLOR_SLIDER_HANDLE: [u8; 4] = [0x88, 0x88, 0xFF, 0xFF];
const COLOR_CHECK_BG: [u8; 4] = [0x44, 0x44, 0x44, 0xFF];
const COLOR_CHECK_MARK: [u8; 4] = [0x88, 0xFF, 0x88, 0xFF];
const COLOR_INPUT_BG: [u8; 4] = [0x22, 0x22, 0x22, 0xFF];
const COLOR_INPUT_BORDER: [u8; 4] = [0x66, 0x66, 0x66, 0xFF];
const COLOR_INPUT_FOCUSED: [u8; 4] = [0x44, 0x88, 0xFF, 0xFF];
const COLOR_CURSOR: [u8; 4] = [0xFF, 0xFF, 0xFF, 0xFF];
const COLOR_SCROLLBAR_BG: [u8; 4] = [0x33, 0x33, 0x33, 0xFF];
const COLOR_SCROLLBAR_THUMB: [u8; 4]= [0x66, 0x66, 0x77, 0xFF];
const DRAG_THRESHOLD: f32 = 5.0;
impl UiContext {
/// Draw text at the current cursor position and advance to the next line.
pub fn text(&mut self, text: &str) {
let x = self.layout.cursor_x;
let y = self.layout.cursor_y;
// We need to clone font info for the borrow checker
let gw = self.font.glyph_width as f32;
let gh = self.font.glyph_height as f32;
// Draw each character
let mut cx = x;
for ch in text.chars() {
let (u0, v0, u1, v1) = self.font.glyph_uv(ch);
self.draw_list.add_rect_uv(cx, y, gw, gh, u0, v0, u1, v1, COLOR_TEXT);
cx += gw;
}
self.layout.advance_line();
}
/// Draw a button with the given label. Returns true if clicked this frame.
pub fn button(&mut self, label: &str) -> bool {
let id = self.gen_id();
let gw = self.font.glyph_width as f32;
let gh = self.font.glyph_height as f32;
let padding = self.layout.padding;
let text_w = label.len() as f32 * gw;
let btn_w = text_w + padding * 2.0;
let btn_h = gh + padding * 2.0;
let x = self.layout.cursor_x;
let y = self.layout.cursor_y;
let hovered = self.mouse_in_rect(x, y, btn_w, btn_h);
if hovered {
self.hot = Some(id);
if self.mouse_down {
self.active = Some(id);
}
}
// Determine color
let bg_color = if self.active == Some(id) && hovered {
COLOR_BUTTON_ACTIVE
} else if self.hot == Some(id) {
COLOR_BUTTON_HOT
} else {
COLOR_BUTTON
};
// Draw background rect
self.draw_list.add_rect(x, y, btn_w, btn_h, bg_color);
// Draw text centered inside button
let text_x = x + padding;
let text_y = y + padding;
let mut cx = text_x;
for ch in label.chars() {
let (u0, v0, u1, v1) = self.font.glyph_uv(ch);
self.draw_list.add_rect_uv(cx, text_y, gw, gh, u0, v0, u1, v1, COLOR_TEXT);
cx += gw;
}
self.layout.advance_line();
// Return true if mouse was released over this button while it was active
let clicked = hovered && self.mouse_released && self.active == Some(id);
if self.mouse_released {
if self.active == Some(id) {
self.active = None;
}
}
clicked
}
/// Draw a horizontal slider. Returns the (possibly new) value after interaction.
pub fn slider(&mut self, label: &str, value: f32, min: f32, max: f32) -> f32 {
let id = self.gen_id();
let gw = self.font.glyph_width as f32;
let gh = self.font.glyph_height as f32;
let padding = self.layout.padding;
let slider_w = 150.0_f32;
let slider_h = gh + padding * 2.0;
let handle_w = 10.0_f32;
let x = self.layout.cursor_x;
let y = self.layout.cursor_y;
let hovered = self.mouse_in_rect(x, y, slider_w, slider_h);
if hovered && self.mouse_clicked {
self.active = Some(id);
}
let mut new_value = value;
if self.active == Some(id) {
if self.mouse_down {
// Map mouse_x to value
let t = ((self.mouse_x - x - handle_w / 2.0) / (slider_w - handle_w)).clamp(0.0, 1.0);
new_value = min + t * (max - min);
} else if self.mouse_released {
self.active = None;
}
}
// Clamp value
new_value = new_value.clamp(min, max);
// Draw background bar
self.draw_list.add_rect(x, y, slider_w, slider_h, COLOR_SLIDER_BG);
// Draw handle
let t = if (max - min).abs() < 1e-6 {
0.0
} else {
(new_value - min) / (max - min)
};
let handle_x = x + t * (slider_w - handle_w);
self.draw_list.add_rect(handle_x, y, handle_w, slider_h, COLOR_SLIDER_HANDLE);
// Draw label to the right of the slider
let label_x = x + slider_w + padding;
let label_y = y + padding;
let mut cx = label_x;
for ch in label.chars() {
let (u0, v0, u1, v1) = self.font.glyph_uv(ch);
self.draw_list.add_rect_uv(cx, label_y, gw, gh, u0, v0, u1, v1, COLOR_TEXT);
cx += gw;
}
self.layout.advance_line();
new_value
}
/// Draw a checkbox. Returns the new checked state (toggled on click).
pub fn checkbox(&mut self, label: &str, checked: bool) -> bool {
let id = self.gen_id();
let gw = self.font.glyph_width as f32;
let gh = self.font.glyph_height as f32;
let padding = self.layout.padding;
let box_size = gh;
let x = self.layout.cursor_x;
let y = self.layout.cursor_y;
let hovered = self.mouse_in_rect(x, y, box_size + padding + label.len() as f32 * gw, gh + padding);
if hovered {
self.hot = Some(id);
}
let mut new_checked = checked;
if hovered && self.mouse_clicked {
new_checked = !checked;
}
// Draw checkbox background
self.draw_list.add_rect(x, y, box_size, box_size, COLOR_CHECK_BG);
// Draw check mark if checked
if new_checked {
let inner = 3.0;
self.draw_list.add_rect(
x + inner,
y + inner,
box_size - inner * 2.0,
box_size - inner * 2.0,
COLOR_CHECK_MARK,
);
}
// Draw label
let label_x = x + box_size + padding;
let label_y = y;
let mut cx = label_x;
for ch in label.chars() {
let (u0, v0, u1, v1) = self.font.glyph_uv(ch);
self.draw_list.add_rect_uv(cx, label_y, gw, gh, u0, v0, u1, v1, COLOR_TEXT);
cx += gw;
}
self.layout.advance_line();
new_checked
}
/// Begin a panel: draw background and title, set cursor inside panel.
pub fn begin_panel(&mut self, title: &str, x: f32, y: f32, w: f32, h: f32) {
let gh = self.font.glyph_height as f32;
let padding = self.layout.padding;
// Draw panel background
self.draw_list.add_rect(x, y, w, h, COLOR_PANEL);
// Draw title bar (slightly darker background handled by same panel color here)
let title_bar_h = gh + padding * 2.0;
self.draw_list.add_rect(x, y, w, title_bar_h, COLOR_BG);
// Draw title text
let gw = self.font.glyph_width as f32;
let mut cx = x + padding;
let ty = y + padding;
for ch in title.chars() {
let (u0, v0, u1, v1) = self.font.glyph_uv(ch);
self.draw_list.add_rect_uv(cx, ty, gw, gh, u0, v0, u1, v1, COLOR_TEXT);
cx += gw;
}
// Set cursor to inside the panel (below title bar)
self.layout = crate::layout::LayoutState::new(x + padding, y + title_bar_h + padding);
}
/// End a panel — currently a no-op; cursor remains where it was.
pub fn end_panel(&mut self) {
// Nothing for now; future could restore outer cursor state.
}
// ── Text Input Widget ─────────────────────────────────────────────
/// Draw an editable single-line text input. Returns true if the buffer changed.
///
/// `id` must be unique per text input. The widget renders a box at (x, y) with
/// the given `width`. Height is determined by the font glyph height + padding.
pub fn text_input(&mut self, id: u32, buffer: &mut String, x: f32, y: f32, width: f32) -> bool {
let gw = self.font.glyph_width as f32;
let gh = self.font.glyph_height as f32;
let padding = self.layout.padding;
let height = gh + padding * 2.0;
let hovered = self.mouse_in_rect(x, y, width, height);
// Click to focus / unfocus
if self.mouse_clicked {
if hovered {
self.focused_id = Some(id);
// Place cursor at end or at click position
let click_offset = ((self.mouse_x - x - padding) / gw).round() as usize;
self.cursor_pos = click_offset.min(buffer.len());
} else if self.focused_id == Some(id) {
self.focused_id = None;
}
}
let mut changed = false;
// Process input only if focused
if self.focused_id == Some(id) {
// Ensure cursor_pos is valid
if self.cursor_pos > buffer.len() {
self.cursor_pos = buffer.len();
}
// Process character input
let chars = self.drain_chars();
for ch in chars {
buffer.insert(self.cursor_pos, ch);
self.cursor_pos += 1;
changed = true;
}
// Process key input
let keys = self.drain_keys();
for key in keys {
match key {
Key::Backspace => {
if self.cursor_pos > 0 {
buffer.remove(self.cursor_pos - 1);
self.cursor_pos -= 1;
changed = true;
}
}
Key::Delete => {
if self.cursor_pos < buffer.len() {
buffer.remove(self.cursor_pos);
changed = true;
}
}
Key::Left => {
if self.cursor_pos > 0 {
self.cursor_pos -= 1;
}
}
Key::Right => {
if self.cursor_pos < buffer.len() {
self.cursor_pos += 1;
}
}
Key::Home => {
self.cursor_pos = 0;
}
Key::End => {
self.cursor_pos = buffer.len();
}
}
}
}
// Draw border
let border_color = if self.focused_id == Some(id) {
COLOR_INPUT_FOCUSED
} else {
COLOR_INPUT_BORDER
};
self.draw_list.add_rect(x, y, width, height, border_color);
// Draw inner background (1px border)
self.draw_list.add_rect(x + 1.0, y + 1.0, width - 2.0, height - 2.0, COLOR_INPUT_BG);
// Draw text
let text_x = x + padding;
let text_y = y + padding;
let mut cx = text_x;
for ch in buffer.chars() {
let (u0, v0, u1, v1) = self.font.glyph_uv(ch);
self.draw_list.add_rect_uv(cx, text_y, gw, gh, u0, v0, u1, v1, COLOR_TEXT);
cx += gw;
}
// Draw cursor if focused
if self.focused_id == Some(id) {
let cursor_x = text_x + self.cursor_pos as f32 * gw;
self.draw_list.add_rect(cursor_x, text_y, 1.0, gh, COLOR_CURSOR);
}
self.layout.advance_line();
changed
}
// ── Scroll Panel ──────────────────────────────────────────────────
/// Begin a scrollable panel. Content drawn between begin/end will be clipped
/// to the panel bounds. `content_height` is the total height of the content
/// inside the panel (used to compute scrollbar size).
pub fn begin_scroll_panel(&mut self, id: u32, x: f32, y: f32, w: f32, h: f32, content_height: f32) {
let scrollbar_w = 12.0_f32;
let panel_inner_w = w - scrollbar_w;
// Handle mouse wheel when hovering over the panel
let hovered = self.mouse_in_rect(x, y, w, h);
let scroll_delta = if hovered && self.mouse_scroll.abs() > 0.0 {
-self.mouse_scroll * 20.0
} else {
0.0
};
// Get or create scroll offset, apply delta and clamp
let scroll = self.scroll_offsets.entry(id).or_insert(0.0);
*scroll += scroll_delta;
let max_scroll = (content_height - h).max(0.0);
*scroll = scroll.clamp(0.0, max_scroll);
let current_scroll = *scroll;
// Draw panel background
self.draw_list.add_rect(x, y, w, h, COLOR_PANEL);
// Draw scrollbar track
let sb_x = x + panel_inner_w;
self.draw_list.add_rect(sb_x, y, scrollbar_w, h, COLOR_SCROLLBAR_BG);
// Draw scrollbar thumb
if content_height > h {
let thumb_ratio = h / content_height;
let thumb_h = (thumb_ratio * h).max(16.0);
let scroll_ratio = if max_scroll > 0.0 { current_scroll / max_scroll } else { 0.0 };
let thumb_y = y + scroll_ratio * (h - thumb_h);
self.draw_list.add_rect(sb_x, thumb_y, scrollbar_w, thumb_h, COLOR_SCROLLBAR_THUMB);
}
// Push scissor rect for content clipping
self.draw_list.push_scissor(x as u32, y as u32, panel_inner_w as u32, h as u32);
// Set cursor inside panel, offset by scroll
self.layout = crate::layout::LayoutState::new(x + self.layout.padding, y + self.layout.padding - current_scroll);
}
/// End a scrollable panel. Pops the scissor rect.
pub fn end_scroll_panel(&mut self) {
self.draw_list.pop_scissor();
}
// ── Drag and Drop ─────────────────────────────────────────────────
/// Begin dragging an item. Call this when the user presses down on a draggable element.
/// `id` identifies the source, `payload` is an arbitrary u64 value transferred on drop.
pub fn begin_drag(&mut self, id: u32, payload: u64) {
if self.mouse_clicked {
self.dragging = Some((id, payload));
self.drag_start = (self.mouse_x, self.mouse_y);
self.drag_started = false;
}
}
/// Returns true if a drag operation is currently in progress (past the threshold).
pub fn is_dragging(&self) -> bool {
if let Some(_) = self.dragging {
self.drag_started
} else {
false
}
}
/// End the current drag operation. Returns `Some((source_id, payload))` if a drag
/// was in progress and the mouse was released, otherwise `None`.
pub fn end_drag(&mut self) -> Option<(u32, u64)> {
// Update drag started state based on threshold
if let Some(_) = self.dragging {
if !self.drag_started {
let dx = self.mouse_x - self.drag_start.0;
let dy = self.mouse_y - self.drag_start.1;
if (dx * dx + dy * dy).sqrt() >= DRAG_THRESHOLD {
self.drag_started = true;
}
}
}
if self.mouse_released {
let result = if self.drag_started { self.dragging } else { None };
self.dragging = None;
self.drag_started = false;
result
} else {
None
}
}
/// Declare a drop target region. If a drag is released over this target,
/// returns the payload that was dropped. Otherwise returns `None`.
pub fn drop_target(&mut self, _id: u32, x: f32, y: f32, w: f32, h: f32) -> Option<u64> {
if self.mouse_released && self.drag_started {
if self.mouse_in_rect(x, y, w, h) {
if let Some((_src_id, payload)) = self.dragging {
return Some(payload);
}
}
}
None
}
}
#[cfg(test)]
mod tests {
use crate::ui_context::{Key, UiContext};
#[test]
fn test_button_returns_false_when_not_clicked() {
let mut ctx = UiContext::new(800.0, 600.0);
// Mouse is at (500, 500) — far from any button
ctx.begin_frame(500.0, 500.0, false);
let result = ctx.button("Click Me");
assert!(!result, "button should return false when mouse is not over it");
}
#[test]
fn test_button_returns_true_when_clicked() {
let mut ctx = UiContext::new(800.0, 600.0);
// Frame 1: mouse over button, pressed down
// Button will be at layout cursor (0, 0) with some width/height
// glyph_width=8, "OK"=2 chars, btn_w = 2*8 + 4*2 = 24, btn_h = 12 + 4*2 = 20
ctx.begin_frame(10.0, 5.0, true);
let _ = ctx.button("OK");
// Frame 2: mouse still over button, released
ctx.begin_frame(10.0, 5.0, false);
let result = ctx.button("OK");
assert!(result, "button should return true when clicked and released");
}
#[test]
fn test_slider_returns_clamped_value() {
let mut ctx = UiContext::new(800.0, 600.0);
ctx.begin_frame(0.0, 0.0, false);
// Value above max should be clamped
let v = ctx.slider("test", 150.0, 0.0, 100.0);
assert!((v - 100.0).abs() < 1e-6, "slider should clamp to max: got {}", v);
ctx.begin_frame(0.0, 0.0, false);
// Value below min should be clamped
let v2 = ctx.slider("test", -10.0, 0.0, 100.0);
assert!((v2 - 0.0).abs() < 1e-6, "slider should clamp to min: got {}", v2);
}
// ── Text Input Tests ──────────────────────────────────────────────
#[test]
fn test_text_input_basic_typing() {
let mut ctx = UiContext::new(800.0, 600.0);
let mut buf = String::new();
// Click on the text input to focus it (at x=10, y=10, width=200)
ctx.begin_frame(15.0, 15.0, true);
ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
// Now type some characters
ctx.begin_frame(15.0, 15.0, false);
ctx.input_char('H');
ctx.input_char('i');
let changed = ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
assert!(changed);
assert_eq!(buf, "Hi");
}
#[test]
fn test_text_input_backspace() {
let mut ctx = UiContext::new(800.0, 600.0);
let mut buf = String::from("abc");
// Focus — click far right so cursor goes to end (padding=4, gw=8, 3 chars → need x > 10+4+24=38)
ctx.begin_frame(50.0, 15.0, true);
ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
// Backspace
ctx.begin_frame(50.0, 15.0, false);
ctx.input_key(Key::Backspace);
let changed = ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
assert!(changed);
assert_eq!(buf, "ab");
}
#[test]
fn test_text_input_cursor_movement() {
let mut ctx = UiContext::new(800.0, 600.0);
let mut buf = String::from("abc");
// Focus — click far right so cursor goes to end
ctx.begin_frame(50.0, 15.0, true);
ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
assert_eq!(ctx.cursor_pos, 3); // cursor at end of "abc"
// Move cursor to beginning with Home
ctx.begin_frame(15.0, 15.0, false);
ctx.input_key(Key::Home);
ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
assert_eq!(ctx.cursor_pos, 0);
// Type 'X' at beginning
ctx.begin_frame(15.0, 15.0, false);
ctx.input_char('X');
let changed = ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
assert!(changed);
assert_eq!(buf, "Xabc");
assert_eq!(ctx.cursor_pos, 1);
}
#[test]
fn test_text_input_delete_key() {
let mut ctx = UiContext::new(800.0, 600.0);
let mut buf = String::from("abc");
// Focus
ctx.begin_frame(15.0, 15.0, true);
ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
// Move to Home, then Delete
ctx.begin_frame(15.0, 15.0, false);
ctx.input_key(Key::Home);
ctx.input_key(Key::Delete);
let changed = ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
assert!(changed);
assert_eq!(buf, "bc");
}
#[test]
fn test_text_input_arrow_keys() {
let mut ctx = UiContext::new(800.0, 600.0);
let mut buf = String::from("hello");
// Focus — click far right so cursor at end
ctx.begin_frame(100.0, 15.0, true);
ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
// Left twice from end (pos 5→3)
ctx.begin_frame(100.0, 15.0, false);
ctx.input_key(Key::Left);
ctx.input_key(Key::Left);
ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
assert_eq!(ctx.cursor_pos, 3);
// Type 'X' at position 3
ctx.begin_frame(100.0, 15.0, false);
ctx.input_char('X');
let changed = ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
assert!(changed);
assert_eq!(buf, "helXlo");
}
#[test]
fn test_text_input_no_change_when_not_focused() {
let mut ctx = UiContext::new(800.0, 600.0);
let mut buf = String::from("test");
// Don't click on the input — mouse at (500, 500) far away
ctx.begin_frame(500.0, 500.0, true);
ctx.input_char('X');
let changed = ctx.text_input(1, &mut buf, 10.0, 10.0, 200.0);
assert!(!changed);
assert_eq!(buf, "test");
}
// ── Scroll Panel Tests ────────────────────────────────────────────
#[test]
fn test_scroll_offset_clamping() {
let mut ctx = UiContext::new(800.0, 600.0);
// Panel at (0,0), 200x100, content_height=300
// Scroll down a lot
ctx.begin_frame(100.0, 50.0, false);
ctx.set_scroll(-100.0); // scroll down
ctx.begin_scroll_panel(1, 0.0, 0.0, 200.0, 100.0, 300.0);
ctx.end_scroll_panel();
// max_scroll = 300 - 100 = 200; scroll should be clamped
let scroll = ctx.scroll_offsets.get(&1).copied().unwrap_or(0.0);
assert!(scroll >= 0.0 && scroll <= 200.0, "scroll={}", scroll);
}
#[test]
fn test_scroll_offset_does_not_go_negative() {
let mut ctx = UiContext::new(800.0, 600.0);
// Scroll up when already at top
ctx.begin_frame(100.0, 50.0, false);
ctx.set_scroll(100.0); // scroll up
ctx.begin_scroll_panel(1, 0.0, 0.0, 200.0, 100.0, 300.0);
ctx.end_scroll_panel();
let scroll = ctx.scroll_offsets.get(&1).copied().unwrap_or(0.0);
assert!((scroll - 0.0).abs() < 1e-6, "scroll should be 0, got {}", scroll);
}
#[test]
fn test_scroll_panel_content_clipping() {
let mut ctx = UiContext::new(800.0, 600.0);
ctx.begin_frame(100.0, 50.0, false);
ctx.begin_scroll_panel(1, 10.0, 20.0, 200.0, 100.0, 300.0);
// Draw some content inside
ctx.text("Inside scroll");
// Commands drawn inside should have a scissor rect
let has_scissor = ctx.draw_list.commands.iter().any(|c| c.scissor.is_some());
assert!(has_scissor, "commands inside scroll panel should have scissor rects");
ctx.end_scroll_panel();
// Commands drawn after end_scroll_panel should NOT have scissor
let cmds_before = ctx.draw_list.commands.len();
ctx.text("Outside scroll");
let new_cmds = &ctx.draw_list.commands[cmds_before..];
let has_scissor_after = new_cmds.iter().any(|c| c.scissor.is_some());
assert!(!has_scissor_after, "commands after end_scroll_panel should not have scissor");
}
// ── Drag and Drop Tests ──────────────────────────────────────────
#[test]
fn test_drag_start_and_end() {
let mut ctx = UiContext::new(800.0, 600.0);
// Frame 1: mouse down — begin drag
ctx.begin_frame(100.0, 100.0, true);
ctx.begin_drag(1, 42);
assert!(!ctx.is_dragging(), "should not be dragging yet (below threshold)");
let _ = ctx.end_drag();
// Frame 2: mouse moved past threshold, still down
ctx.begin_frame(110.0, 100.0, true);
let _ = ctx.end_drag();
assert!(ctx.is_dragging(), "should be dragging after moving past threshold");
// Frame 3: mouse released
ctx.begin_frame(120.0, 100.0, false);
let result = ctx.end_drag();
assert!(result.is_some());
let (src_id, payload) = result.unwrap();
assert_eq!(src_id, 1);
assert_eq!(payload, 42);
}
#[test]
fn test_drop_on_target() {
let mut ctx = UiContext::new(800.0, 600.0);
// Frame 1: begin drag
ctx.begin_frame(100.0, 100.0, true);
ctx.begin_drag(1, 99);
let _ = ctx.end_drag();
// Frame 2: move past threshold
ctx.begin_frame(110.0, 100.0, true);
let _ = ctx.end_drag();
// Frame 3: release over drop target at (200, 200, 50, 50)
ctx.begin_frame(220.0, 220.0, false);
let drop_result = ctx.drop_target(2, 200.0, 200.0, 50.0, 50.0);
assert_eq!(drop_result, Some(99));
let _ = ctx.end_drag();
}
#[test]
fn test_drop_outside_target() {
let mut ctx = UiContext::new(800.0, 600.0);
// Frame 1: begin drag
ctx.begin_frame(100.0, 100.0, true);
ctx.begin_drag(1, 77);
let _ = ctx.end_drag();
// Frame 2: move past threshold
ctx.begin_frame(110.0, 100.0, true);
let _ = ctx.end_drag();
// Frame 3: release far from drop target
ctx.begin_frame(500.0, 500.0, false);
let drop_result = ctx.drop_target(2, 200.0, 200.0, 50.0, 50.0);
assert_eq!(drop_result, None);
}
}

View File

@@ -0,0 +1,122 @@
// crates/voltex_math/src/aabb.rs
use crate::Vec3;
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct AABB {
pub min: Vec3,
pub max: Vec3,
}
impl AABB {
pub fn new(min: Vec3, max: Vec3) -> Self {
Self { min, max }
}
pub fn from_center_half_extents(center: Vec3, half: Vec3) -> Self {
Self {
min: center - half,
max: center + half,
}
}
pub fn center(&self) -> Vec3 {
(self.min + self.max) * 0.5
}
pub fn half_extents(&self) -> Vec3 {
(self.max - self.min) * 0.5
}
pub fn contains_point(&self, p: Vec3) -> bool {
p.x >= self.min.x && p.x <= self.max.x
&& p.y >= self.min.y && p.y <= self.max.y
&& p.z >= self.min.z && p.z <= self.max.z
}
pub fn intersects(&self, other: &AABB) -> bool {
self.min.x <= other.max.x && self.max.x >= other.min.x
&& self.min.y <= other.max.y && self.max.y >= other.min.y
&& self.min.z <= other.max.z && self.max.z >= other.min.z
}
pub fn merged(&self, other: &AABB) -> AABB {
AABB {
min: Vec3::new(
self.min.x.min(other.min.x),
self.min.y.min(other.min.y),
self.min.z.min(other.min.z),
),
max: Vec3::new(
self.max.x.max(other.max.x),
self.max.y.max(other.max.y),
self.max.z.max(other.max.z),
),
}
}
pub fn surface_area(&self) -> f32 {
let d = self.max - self.min;
2.0 * (d.x * d.y + d.y * d.z + d.z * d.x)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_new_and_accessors() {
let a = AABB::new(Vec3::new(-1.0, -2.0, -3.0), Vec3::new(1.0, 2.0, 3.0));
let c = a.center();
assert!((c.x).abs() < 1e-6);
assert!((c.y).abs() < 1e-6);
assert!((c.z).abs() < 1e-6);
let h = a.half_extents();
assert!((h.x - 1.0).abs() < 1e-6);
assert!((h.y - 2.0).abs() < 1e-6);
assert!((h.z - 3.0).abs() < 1e-6);
}
#[test]
fn test_from_center_half_extents() {
let a = AABB::from_center_half_extents(Vec3::new(5.0, 5.0, 5.0), Vec3::new(1.0, 1.0, 1.0));
assert_eq!(a.min, Vec3::new(4.0, 4.0, 4.0));
assert_eq!(a.max, Vec3::new(6.0, 6.0, 6.0));
}
#[test]
fn test_contains_point() {
let a = AABB::new(Vec3::ZERO, Vec3::new(2.0, 2.0, 2.0));
assert!(a.contains_point(Vec3::new(1.0, 1.0, 1.0)));
assert!(a.contains_point(Vec3::ZERO));
assert!(!a.contains_point(Vec3::new(3.0, 1.0, 1.0)));
}
#[test]
fn test_intersects() {
let a = AABB::new(Vec3::ZERO, Vec3::new(2.0, 2.0, 2.0));
let b = AABB::new(Vec3::new(1.0, 1.0, 1.0), Vec3::new(3.0, 3.0, 3.0));
assert!(a.intersects(&b));
let c = AABB::new(Vec3::new(5.0, 5.0, 5.0), Vec3::new(6.0, 6.0, 6.0));
assert!(!a.intersects(&c));
let d = AABB::new(Vec3::new(2.0, 0.0, 0.0), Vec3::new(3.0, 2.0, 2.0));
assert!(a.intersects(&d));
}
#[test]
fn test_merged() {
let a = AABB::new(Vec3::ZERO, Vec3::ONE);
let b = AABB::new(Vec3::new(2.0, 2.0, 2.0), Vec3::new(3.0, 3.0, 3.0));
let m = a.merged(&b);
assert_eq!(m.min, Vec3::ZERO);
assert_eq!(m.max, Vec3::new(3.0, 3.0, 3.0));
}
#[test]
fn test_surface_area() {
let a = AABB::new(Vec3::ZERO, Vec3::new(2.0, 2.0, 2.0));
assert!((a.surface_area() - 24.0).abs() < 1e-6);
}
}

View File

@@ -2,8 +2,12 @@ pub mod vec2;
pub mod vec3;
pub mod vec4;
pub mod mat4;
pub mod aabb;
pub mod ray;
pub use vec2::Vec2;
pub use vec3::Vec3;
pub use vec4::Vec4;
pub use mat4::Mat4;
pub use aabb::AABB;
pub use ray::Ray;

View File

@@ -174,6 +174,64 @@ impl Mat4 {
)
}
/// Compute the inverse of this matrix. Returns `None` if the matrix is singular.
pub fn inverse(&self) -> Option<Self> {
let m = &self.cols;
// Flatten to row-major for cofactor expansion
// m[col][row] — so element (row, col) = m[col][row]
let e = |r: usize, c: usize| -> f32 { m[c][r] };
// Compute cofactors using 2x2 determinants
let s0 = e(0,0) * e(1,1) - e(1,0) * e(0,1);
let s1 = e(0,0) * e(1,2) - e(1,0) * e(0,2);
let s2 = e(0,0) * e(1,3) - e(1,0) * e(0,3);
let s3 = e(0,1) * e(1,2) - e(1,1) * e(0,2);
let s4 = e(0,1) * e(1,3) - e(1,1) * e(0,3);
let s5 = e(0,2) * e(1,3) - e(1,2) * e(0,3);
let c5 = e(2,2) * e(3,3) - e(3,2) * e(2,3);
let c4 = e(2,1) * e(3,3) - e(3,1) * e(2,3);
let c3 = e(2,1) * e(3,2) - e(3,1) * e(2,2);
let c2 = e(2,0) * e(3,3) - e(3,0) * e(2,3);
let c1 = e(2,0) * e(3,2) - e(3,0) * e(2,2);
let c0 = e(2,0) * e(3,1) - e(3,0) * e(2,1);
let det = s0 * c5 - s1 * c4 + s2 * c3 + s3 * c2 - s4 * c1 + s5 * c0;
if det.abs() < 1e-12 {
return None;
}
let inv_det = 1.0 / det;
// Adjugate matrix (transposed cofactor matrix), stored column-major
let inv = Self::from_cols(
[
( e(1,1) * c5 - e(1,2) * c4 + e(1,3) * c3) * inv_det,
(-e(0,1) * c5 + e(0,2) * c4 - e(0,3) * c3) * inv_det,
( e(3,1) * s5 - e(3,2) * s4 + e(3,3) * s3) * inv_det,
(-e(2,1) * s5 + e(2,2) * s4 - e(2,3) * s3) * inv_det,
],
[
(-e(1,0) * c5 + e(1,2) * c2 - e(1,3) * c1) * inv_det,
( e(0,0) * c5 - e(0,2) * c2 + e(0,3) * c1) * inv_det,
(-e(3,0) * s5 + e(3,2) * s2 - e(3,3) * s1) * inv_det,
( e(2,0) * s5 - e(2,2) * s2 + e(2,3) * s1) * inv_det,
],
[
( e(1,0) * c4 - e(1,1) * c2 + e(1,3) * c0) * inv_det,
(-e(0,0) * c4 + e(0,1) * c2 - e(0,3) * c0) * inv_det,
( e(3,0) * s4 - e(3,1) * s2 + e(3,3) * s0) * inv_det,
(-e(2,0) * s4 + e(2,1) * s2 - e(2,3) * s0) * inv_det,
],
[
(-e(1,0) * c3 + e(1,1) * c1 - e(1,2) * c0) * inv_det,
( e(0,0) * c3 - e(0,1) * c1 + e(0,2) * c0) * inv_det,
(-e(3,0) * s3 + e(3,1) * s1 - e(3,2) * s0) * inv_det,
( e(2,0) * s3 - e(2,1) * s1 + e(2,2) * s0) * inv_det,
],
);
Some(inv)
}
/// Return the transpose of this matrix.
pub fn transpose(&self) -> Self {
let c = &self.cols;

View File

@@ -0,0 +1,46 @@
use crate::Vec3;
#[derive(Debug, Clone, Copy)]
pub struct Ray {
pub origin: Vec3,
pub direction: Vec3,
}
impl Ray {
pub fn new(origin: Vec3, direction: Vec3) -> Self {
Self {
origin,
direction: direction.normalize(),
}
}
pub fn at(&self, t: f32) -> Vec3 {
self.origin + self.direction * t
}
}
#[cfg(test)]
mod tests {
use super::*;
fn approx(a: f32, b: f32) -> bool {
(a - b).abs() < 1e-5
}
fn approx_vec(a: Vec3, b: Vec3) -> bool {
approx(a.x, b.x) && approx(a.y, b.y) && approx(a.z, b.z)
}
#[test]
fn test_new_normalizes_direction() {
let r = Ray::new(Vec3::ZERO, Vec3::new(3.0, 0.0, 0.0));
assert!(approx_vec(r.direction, Vec3::X));
}
#[test]
fn test_at() {
let r = Ray::new(Vec3::new(1.0, 2.0, 3.0), Vec3::X);
let p = r.at(5.0);
assert!(approx_vec(p, Vec3::new(6.0, 2.0, 3.0)));
}
}

View File

@@ -0,0 +1,6 @@
[package]
name = "voltex_net"
version = "0.1.0"
edition = "2021"
[dependencies]

View File

@@ -0,0 +1,96 @@
use std::net::SocketAddr;
use crate::packet::Packet;
use crate::socket::NetSocket;
/// Events produced by the client during polling.
pub enum ClientEvent {
Connected { client_id: u32 },
Disconnected,
PacketReceived { packet: Packet },
}
/// A non-blocking UDP client.
pub struct NetClient {
socket: NetSocket,
server_addr: SocketAddr,
client_id: Option<u32>,
name: String,
}
impl NetClient {
/// Create and bind a new client socket.
///
/// - `local_addr`: local bind address (e.g. "127.0.0.1:0")
/// - `server_addr`: the server's `SocketAddr`
/// - `name`: client display name used in Connect packet
pub fn new(local_addr: &str, server_addr: SocketAddr, name: &str) -> Result<Self, String> {
let socket = NetSocket::bind(local_addr)?;
Ok(NetClient {
socket,
server_addr,
client_id: None,
name: name.to_string(),
})
}
/// Send a Connect packet to the server.
pub fn connect(&self) -> Result<(), String> {
let packet = Packet::Connect {
client_name: self.name.clone(),
};
self.socket.send_to(&packet, self.server_addr)
}
/// Poll for incoming packets and return any resulting client events.
pub fn poll(&mut self) -> Vec<ClientEvent> {
let mut events = Vec::new();
while let Some((packet, _addr)) = self.socket.recv_from() {
match &packet {
Packet::Accept { client_id } => {
self.client_id = Some(*client_id);
events.push(ClientEvent::Connected {
client_id: *client_id,
});
}
Packet::Disconnect { .. } => {
self.client_id = None;
events.push(ClientEvent::Disconnected);
}
_ => {
events.push(ClientEvent::PacketReceived {
packet: packet.clone(),
});
}
}
}
events
}
/// Send an arbitrary packet to the server.
pub fn send(&self, packet: Packet) -> Result<(), String> {
self.socket.send_to(&packet, self.server_addr)
}
/// Returns true if the client has received an Accept from the server.
pub fn is_connected(&self) -> bool {
self.client_id.is_some()
}
/// Returns the client id assigned by the server, or None if not yet connected.
pub fn client_id(&self) -> Option<u32> {
self.client_id
}
/// Send a Disconnect packet to the server and clear local state.
pub fn disconnect(&mut self) -> Result<(), String> {
if let Some(id) = self.client_id {
let packet = Packet::Disconnect { client_id: id };
self.socket.send_to(&packet, self.server_addr)?;
self.client_id = None;
}
Ok(())
}
}

View File

@@ -0,0 +1,140 @@
/// Simple XOR cipher with rotating key + sequence counter.
pub struct PacketCipher {
key: Vec<u8>,
send_counter: u64,
recv_counter: u64,
}
impl PacketCipher {
pub fn new(key: &[u8]) -> Self {
assert!(!key.is_empty(), "encryption key must not be empty");
PacketCipher { key: key.to_vec(), send_counter: 0, recv_counter: 0 }
}
/// Encrypt data in-place. Prepends 8-byte sequence number.
pub fn encrypt(&mut self, plaintext: &[u8]) -> Vec<u8> {
let mut output = Vec::with_capacity(8 + plaintext.len());
// Prepend sequence counter
output.extend_from_slice(&self.send_counter.to_le_bytes());
// XOR plaintext with key derived from counter + base key
let derived = self.derive_key(self.send_counter);
for (i, &byte) in plaintext.iter().enumerate() {
output.push(byte ^ derived[i % derived.len()]);
}
self.send_counter += 1;
output
}
/// Decrypt data. Validates sequence number.
pub fn decrypt(&mut self, ciphertext: &[u8]) -> Result<Vec<u8>, String> {
if ciphertext.len() < 8 {
return Err("packet too short".to_string());
}
let seq = u64::from_le_bytes(ciphertext[0..8].try_into().unwrap());
// Anti-replay: sequence must be >= expected
if seq < self.recv_counter {
return Err(format!("replay detected: got seq {}, expected >= {}", seq, self.recv_counter));
}
self.recv_counter = seq + 1;
let derived = self.derive_key(seq);
let mut plaintext = Vec::with_capacity(ciphertext.len() - 8);
for (i, &byte) in ciphertext[8..].iter().enumerate() {
plaintext.push(byte ^ derived[i % derived.len()]);
}
Ok(plaintext)
}
/// Derive a key from the base key + counter.
fn derive_key(&self, counter: u64) -> Vec<u8> {
let counter_bytes = counter.to_le_bytes();
self.key.iter().enumerate().map(|(i, &k)| {
k.wrapping_add(counter_bytes[i % 8])
}).collect()
}
}
/// Simple token-based authentication.
pub struct AuthToken {
pub player_id: u32,
pub token: Vec<u8>,
pub expires_at: f64, // timestamp
}
impl AuthToken {
/// Generate a simple auth token from player_id + secret.
pub fn generate(player_id: u32, secret: &[u8], expires_at: f64) -> Self {
let mut token = Vec::new();
token.extend_from_slice(&player_id.to_le_bytes());
token.extend_from_slice(&expires_at.to_le_bytes());
// Simple HMAC-like: XOR with secret
for (i, byte) in token.iter_mut().enumerate() {
*byte ^= secret[i % secret.len()];
}
AuthToken { player_id, token, expires_at }
}
/// Validate token against secret.
pub fn validate(&self, secret: &[u8], current_time: f64) -> bool {
if current_time > self.expires_at { return false; }
let expected = AuthToken::generate(self.player_id, secret, self.expires_at);
self.token == expected.token
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_encrypt_decrypt_roundtrip() {
let key = b"secret_key_1234";
let mut encryptor = PacketCipher::new(key);
let mut decryptor = PacketCipher::new(key);
let msg = b"hello world";
let encrypted = encryptor.encrypt(msg);
let decrypted = decryptor.decrypt(&encrypted).unwrap();
assert_eq!(&decrypted, msg);
}
#[test]
fn test_encrypted_differs_from_plain() {
let mut cipher = PacketCipher::new(b"key");
let msg = b"test message";
let encrypted = cipher.encrypt(msg);
assert_ne!(&encrypted[8..], msg); // ciphertext differs
}
#[test]
fn test_replay_rejected() {
let key = b"key";
let mut enc = PacketCipher::new(key);
let mut dec = PacketCipher::new(key);
let pkt1 = enc.encrypt(b"first");
let pkt2 = enc.encrypt(b"second");
let _ = dec.decrypt(&pkt2).unwrap(); // accept pkt2 (seq=1)
let result = dec.decrypt(&pkt1); // pkt1 has seq=0 < expected 2
assert!(result.is_err());
}
#[test]
fn test_auth_token_valid() {
let secret = b"server_secret";
let token = AuthToken::generate(42, secret, 1000.0);
assert!(token.validate(secret, 999.0));
}
#[test]
fn test_auth_token_expired() {
let secret = b"server_secret";
let token = AuthToken::generate(42, secret, 1000.0);
assert!(!token.validate(secret, 1001.0));
}
#[test]
fn test_auth_token_wrong_secret() {
let token = AuthToken::generate(42, b"correct", 1000.0);
assert!(!token.validate(b"wronggg", 999.0));
}
}

View File

@@ -0,0 +1,234 @@
use std::collections::VecDeque;
use crate::snapshot::{EntityState, Snapshot};
/// Buffers recent snapshots and interpolates between them for smooth rendering.
pub struct InterpolationBuffer {
snapshots: VecDeque<(f64, Snapshot)>,
/// Render delay behind the latest server time (seconds).
interp_delay: f64,
/// Maximum number of snapshots to keep in the buffer.
max_snapshots: usize,
}
impl InterpolationBuffer {
/// Create a new interpolation buffer with the given delay in seconds.
pub fn new(interp_delay: f64) -> Self {
InterpolationBuffer {
snapshots: VecDeque::new(),
interp_delay,
max_snapshots: 32,
}
}
/// Push a new snapshot with its server timestamp.
pub fn push(&mut self, server_time: f64, snapshot: Snapshot) {
self.snapshots.push_back((server_time, snapshot));
// Evict old snapshots beyond the buffer limit
while self.snapshots.len() > self.max_snapshots {
self.snapshots.pop_front();
}
}
/// Interpolate to produce a snapshot for the given render_time.
///
/// The render_time should be `current_server_time - interp_delay`.
/// Returns None if there are fewer than 2 snapshots or render_time
/// is before all buffered snapshots.
pub fn interpolate(&self, render_time: f64) -> Option<Snapshot> {
if self.snapshots.len() < 2 {
return None;
}
// Find two bracketing snapshots: the last one <= render_time and the first one > render_time
let mut before = None;
let mut after = None;
for (i, (time, _)) in self.snapshots.iter().enumerate() {
if *time <= render_time {
before = Some(i);
} else {
after = Some(i);
break;
}
}
match (before, after) {
(Some(b), Some(a)) => {
let (t0, snap0) = &self.snapshots[b];
let (t1, snap1) = &self.snapshots[a];
let dt = t1 - t0;
if dt <= 0.0 {
return Some(snap0.clone());
}
let alpha = ((render_time - t0) / dt).clamp(0.0, 1.0) as f32;
Some(lerp_snapshots(snap0, snap1, alpha))
}
(Some(b), None) => {
// render_time is beyond all snapshots — return the latest
Some(self.snapshots[b].1.clone())
}
_ => None,
}
}
/// Get the interpolation delay.
pub fn delay(&self) -> f64 {
self.interp_delay
}
}
fn lerp(a: f32, b: f32, t: f32) -> f32 {
a + (b - a) * t
}
fn lerp_f32x3(a: &[f32; 3], b: &[f32; 3], t: f32) -> [f32; 3] {
[lerp(a[0], b[0], t), lerp(a[1], b[1], t), lerp(a[2], b[2], t)]
}
fn lerp_entity(a: &EntityState, b: &EntityState, t: f32) -> EntityState {
EntityState {
id: a.id,
position: lerp_f32x3(&a.position, &b.position, t),
rotation: lerp_f32x3(&a.rotation, &b.rotation, t),
velocity: lerp_f32x3(&a.velocity, &b.velocity, t),
}
}
/// Linearly interpolate between two snapshots.
/// Entities are matched by id. Entities only in one snapshot are included as-is.
fn lerp_snapshots(a: &Snapshot, b: &Snapshot, t: f32) -> Snapshot {
use std::collections::HashMap;
let a_map: HashMap<u32, &EntityState> = a.entities.iter().map(|e| (e.id, e)).collect();
let b_map: HashMap<u32, &EntityState> = b.entities.iter().map(|e| (e.id, e)).collect();
let mut entities = Vec::new();
// Interpolate matched entities, include a-only entities
for ea in &a.entities {
if let Some(eb) = b_map.get(&ea.id) {
entities.push(lerp_entity(ea, eb, t));
} else {
entities.push(ea.clone());
}
}
// Include b-only entities
for eb in &b.entities {
if !a_map.contains_key(&eb.id) {
entities.push(eb.clone());
}
}
// Interpolate tick
let tick = (a.tick as f64 + (b.tick as f64 - a.tick as f64) * t as f64) as u32;
Snapshot { tick, entities }
}
#[cfg(test)]
mod tests {
use super::*;
use crate::snapshot::EntityState;
fn make_snapshot(tick: u32, x: f32) -> Snapshot {
Snapshot {
tick,
entities: vec![EntityState {
id: 1,
position: [x, 0.0, 0.0],
rotation: [0.0, 0.0, 0.0],
velocity: [0.0, 0.0, 0.0],
}],
}
}
#[test]
fn test_exact_match_at_snapshot_time() {
let mut buf = InterpolationBuffer::new(0.1);
buf.push(0.0, make_snapshot(0, 0.0));
buf.push(0.1, make_snapshot(1, 10.0));
let result = buf.interpolate(0.0).expect("should interpolate");
assert_eq!(result.entities[0].position[0], 0.0);
}
#[test]
fn test_midpoint_interpolation() {
let mut buf = InterpolationBuffer::new(0.1);
buf.push(0.0, make_snapshot(0, 0.0));
buf.push(1.0, make_snapshot(10, 10.0));
let result = buf.interpolate(0.5).expect("should interpolate");
let x = result.entities[0].position[0];
assert!(
(x - 5.0).abs() < 0.001,
"Expected ~5.0 at midpoint, got {}",
x
);
}
#[test]
fn test_interpolation_at_quarter() {
let mut buf = InterpolationBuffer::new(0.1);
buf.push(0.0, make_snapshot(0, 0.0));
buf.push(1.0, make_snapshot(10, 100.0));
let result = buf.interpolate(0.25).unwrap();
let x = result.entities[0].position[0];
assert!(
(x - 25.0).abs() < 0.01,
"Expected ~25.0 at 0.25, got {}",
x
);
}
#[test]
fn test_extrapolation_returns_latest() {
let mut buf = InterpolationBuffer::new(0.1);
buf.push(0.0, make_snapshot(0, 0.0));
buf.push(1.0, make_snapshot(10, 10.0));
// render_time beyond all snapshots
let result = buf.interpolate(2.0).expect("should return latest");
assert_eq!(result.entities[0].position[0], 10.0);
}
#[test]
fn test_too_few_snapshots_returns_none() {
let mut buf = InterpolationBuffer::new(0.1);
assert!(buf.interpolate(0.0).is_none());
buf.push(0.0, make_snapshot(0, 0.0));
assert!(buf.interpolate(0.0).is_none());
}
#[test]
fn test_render_time_before_all_snapshots() {
let mut buf = InterpolationBuffer::new(0.1);
buf.push(1.0, make_snapshot(10, 10.0));
buf.push(2.0, make_snapshot(20, 20.0));
// render_time before the first snapshot
let result = buf.interpolate(0.0);
assert!(result.is_none());
}
#[test]
fn test_multiple_snapshots_picks_correct_bracket() {
let mut buf = InterpolationBuffer::new(0.1);
buf.push(0.0, make_snapshot(0, 0.0));
buf.push(1.0, make_snapshot(1, 10.0));
buf.push(2.0, make_snapshot(2, 20.0));
// Should interpolate between snapshot at t=1 and t=2
let result = buf.interpolate(1.5).unwrap();
let x = result.entities[0].position[0];
assert!(
(x - 15.0).abs() < 0.01,
"Expected ~15.0, got {}",
x
);
}
}

View File

@@ -0,0 +1,140 @@
use std::collections::VecDeque;
/// A timestamped snapshot of entity positions for lag compensation.
#[derive(Debug, Clone)]
pub struct HistoryEntry {
pub tick: u64,
pub timestamp: f32, // seconds
pub positions: Vec<([f32; 3], u32)>, // (position, entity_id)
}
/// Stores recent world state history for server-side lag compensation.
pub struct LagCompensation {
history: VecDeque<HistoryEntry>,
pub max_history_ms: f32, // max history duration (e.g., 200ms)
}
impl LagCompensation {
pub fn new(max_history_ms: f32) -> Self {
LagCompensation { history: VecDeque::new(), max_history_ms }
}
/// Record current world state.
pub fn record(&mut self, entry: HistoryEntry) {
self.history.push_back(entry);
// Prune old entries
let cutoff = self.history.back().map(|e| e.timestamp - self.max_history_ms / 1000.0).unwrap_or(0.0);
while let Some(front) = self.history.front() {
if front.timestamp < cutoff {
self.history.pop_front();
} else {
break;
}
}
}
/// Find the closest history entry to the given timestamp.
pub fn rewind(&self, timestamp: f32) -> Option<&HistoryEntry> {
let mut best: Option<&HistoryEntry> = None;
let mut best_diff = f32::MAX;
for entry in &self.history {
let diff = (entry.timestamp - timestamp).abs();
if diff < best_diff {
best_diff = diff;
best = Some(entry);
}
}
best
}
/// Interpolate between two closest entries at the given timestamp.
pub fn rewind_interpolated(&self, timestamp: f32) -> Option<Vec<([f32; 3], u32)>> {
if self.history.len() < 2 { return self.rewind(timestamp).map(|e| e.positions.clone()); }
// Find the two entries bracketing the timestamp
let mut before: Option<&HistoryEntry> = None;
let mut after: Option<&HistoryEntry> = None;
for entry in &self.history {
if entry.timestamp <= timestamp {
before = Some(entry);
} else {
after = Some(entry);
break;
}
}
match (before, after) {
(Some(b), Some(a)) => {
let t = if (a.timestamp - b.timestamp).abs() > 1e-6 {
(timestamp - b.timestamp) / (a.timestamp - b.timestamp)
} else { 0.0 };
// Interpolate positions
let mut result = Vec::new();
for (i, (pos_b, id)) in b.positions.iter().enumerate() {
if let Some((pos_a, _)) = a.positions.get(i) {
let lerped = [
pos_b[0] + (pos_a[0] - pos_b[0]) * t,
pos_b[1] + (pos_a[1] - pos_b[1]) * t,
pos_b[2] + (pos_a[2] - pos_b[2]) * t,
];
result.push((lerped, *id));
}
}
Some(result)
}
_ => self.rewind(timestamp).map(|e| e.positions.clone()),
}
}
pub fn history_len(&self) -> usize { self.history.len() }
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_record_and_rewind() {
let mut lc = LagCompensation::new(200.0);
lc.record(HistoryEntry { tick: 1, timestamp: 0.0, positions: vec![([1.0, 0.0, 0.0], 0)] });
lc.record(HistoryEntry { tick: 2, timestamp: 0.016, positions: vec![([2.0, 0.0, 0.0], 0)] });
let entry = lc.rewind(0.0).unwrap();
assert_eq!(entry.tick, 1);
}
#[test]
fn test_prune_old() {
let mut lc = LagCompensation::new(100.0); // 100ms
for i in 0..20 {
lc.record(HistoryEntry { tick: i, timestamp: i as f32 * 0.016, positions: vec![] });
}
// At t=0.304, cutoff = 0.304 - 0.1 = 0.204
// Entries before t=0.204 should be pruned
assert!(lc.history_len() < 20);
}
#[test]
fn test_rewind_closest() {
let mut lc = LagCompensation::new(500.0);
lc.record(HistoryEntry { tick: 1, timestamp: 0.0, positions: vec![] });
lc.record(HistoryEntry { tick: 2, timestamp: 0.1, positions: vec![] });
lc.record(HistoryEntry { tick: 3, timestamp: 0.2, positions: vec![] });
let entry = lc.rewind(0.09).unwrap();
assert_eq!(entry.tick, 2); // closest to 0.1
}
#[test]
fn test_rewind_interpolated() {
let mut lc = LagCompensation::new(500.0);
lc.record(HistoryEntry { tick: 1, timestamp: 0.0, positions: vec![([0.0, 0.0, 0.0], 0)] });
lc.record(HistoryEntry { tick: 2, timestamp: 0.1, positions: vec![([10.0, 0.0, 0.0], 0)] });
let interp = lc.rewind_interpolated(0.05).unwrap();
assert!((interp[0].0[0] - 5.0).abs() < 0.1); // midpoint
}
#[test]
fn test_empty_history() {
let lc = LagCompensation::new(200.0);
assert!(lc.rewind(0.0).is_none());
}
}

View File

@@ -0,0 +1,19 @@
pub mod packet;
pub mod socket;
pub mod server;
pub mod client;
pub mod reliable;
pub mod snapshot;
pub mod interpolation;
pub mod lag_compensation;
pub mod encryption;
pub use packet::Packet;
pub use socket::NetSocket;
pub use server::{NetServer, ServerEvent, ClientInfo};
pub use client::{NetClient, ClientEvent};
pub use reliable::{ReliableChannel, OrderedChannel};
pub use snapshot::{Snapshot, EntityState, serialize_snapshot, deserialize_snapshot, diff_snapshots, apply_diff};
pub use interpolation::InterpolationBuffer;
pub use lag_compensation::LagCompensation;
pub use encryption::{PacketCipher, AuthToken};

View File

@@ -0,0 +1,304 @@
/// Packet type IDs
const TYPE_CONNECT: u8 = 1;
const TYPE_ACCEPT: u8 = 2;
const TYPE_DISCONNECT: u8 = 3;
const TYPE_PING: u8 = 4;
const TYPE_PONG: u8 = 5;
const TYPE_USER_DATA: u8 = 6;
const TYPE_RELIABLE: u8 = 7;
const TYPE_ACK: u8 = 8;
const TYPE_SNAPSHOT: u8 = 9;
const TYPE_SNAPSHOT_DELTA: u8 = 10;
/// Header size: type_id(1) + payload_len(2 LE) + reserved(1) = 4 bytes
const HEADER_SIZE: usize = 4;
/// All packet variants for the Voltex network protocol.
#[derive(Debug, Clone, PartialEq)]
pub enum Packet {
Connect { client_name: String },
Accept { client_id: u32 },
Disconnect { client_id: u32 },
Ping { timestamp: u64 },
Pong { timestamp: u64 },
UserData { client_id: u32, data: Vec<u8> },
Reliable { sequence: u16, data: Vec<u8> },
Ack { sequence: u16 },
Snapshot { tick: u32, data: Vec<u8> },
SnapshotDelta { base_tick: u32, tick: u32, data: Vec<u8> },
}
impl Packet {
/// Serialize the packet into bytes: [type_id(1), payload_len(2 LE), reserved(1), payload...]
pub fn to_bytes(&self) -> Vec<u8> {
let payload = self.encode_payload();
let payload_len = payload.len() as u16;
let mut buf = Vec::with_capacity(HEADER_SIZE + payload.len());
buf.push(self.type_id());
buf.extend_from_slice(&payload_len.to_le_bytes());
buf.push(0u8); // reserved
buf.extend_from_slice(&payload);
buf
}
/// Deserialize a packet from bytes.
pub fn from_bytes(data: &[u8]) -> Result<Packet, String> {
if data.len() < HEADER_SIZE {
return Err(format!(
"Buffer too short for header: {} bytes",
data.len()
));
}
let type_id = data[0];
let payload_len = u16::from_le_bytes([data[1], data[2]]) as usize;
// data[3] is reserved, ignored
if data.len() < HEADER_SIZE + payload_len {
return Err(format!(
"Buffer too short: expected {} bytes, got {}",
HEADER_SIZE + payload_len,
data.len()
));
}
let payload = &data[HEADER_SIZE..HEADER_SIZE + payload_len];
match type_id {
TYPE_CONNECT => {
if payload.len() < 2 {
return Err("Connect payload too short".to_string());
}
let name_len = u16::from_le_bytes([payload[0], payload[1]]) as usize;
if payload.len() < 2 + name_len {
return Err("Connect name bytes too short".to_string());
}
let client_name = String::from_utf8(payload[2..2 + name_len].to_vec())
.map_err(|e| format!("Invalid UTF-8 in client_name: {}", e))?;
Ok(Packet::Connect { client_name })
}
TYPE_ACCEPT => {
if payload.len() < 4 {
return Err("Accept payload too short".to_string());
}
let client_id = u32::from_le_bytes([payload[0], payload[1], payload[2], payload[3]]);
Ok(Packet::Accept { client_id })
}
TYPE_DISCONNECT => {
if payload.len() < 4 {
return Err("Disconnect payload too short".to_string());
}
let client_id = u32::from_le_bytes([payload[0], payload[1], payload[2], payload[3]]);
Ok(Packet::Disconnect { client_id })
}
TYPE_PING => {
if payload.len() < 8 {
return Err("Ping payload too short".to_string());
}
let timestamp = u64::from_le_bytes([
payload[0], payload[1], payload[2], payload[3],
payload[4], payload[5], payload[6], payload[7],
]);
Ok(Packet::Ping { timestamp })
}
TYPE_PONG => {
if payload.len() < 8 {
return Err("Pong payload too short".to_string());
}
let timestamp = u64::from_le_bytes([
payload[0], payload[1], payload[2], payload[3],
payload[4], payload[5], payload[6], payload[7],
]);
Ok(Packet::Pong { timestamp })
}
TYPE_USER_DATA => {
if payload.len() < 4 {
return Err("UserData payload too short".to_string());
}
let client_id = u32::from_le_bytes([payload[0], payload[1], payload[2], payload[3]]);
let data = payload[4..].to_vec();
Ok(Packet::UserData { client_id, data })
}
TYPE_RELIABLE => {
if payload.len() < 2 {
return Err("Reliable payload too short".to_string());
}
let sequence = u16::from_le_bytes([payload[0], payload[1]]);
let data = payload[2..].to_vec();
Ok(Packet::Reliable { sequence, data })
}
TYPE_ACK => {
if payload.len() < 2 {
return Err("Ack payload too short".to_string());
}
let sequence = u16::from_le_bytes([payload[0], payload[1]]);
Ok(Packet::Ack { sequence })
}
TYPE_SNAPSHOT => {
if payload.len() < 4 {
return Err("Snapshot payload too short".to_string());
}
let tick = u32::from_le_bytes([payload[0], payload[1], payload[2], payload[3]]);
let data = payload[4..].to_vec();
Ok(Packet::Snapshot { tick, data })
}
TYPE_SNAPSHOT_DELTA => {
if payload.len() < 8 {
return Err("SnapshotDelta payload too short".to_string());
}
let base_tick = u32::from_le_bytes([payload[0], payload[1], payload[2], payload[3]]);
let tick = u32::from_le_bytes([payload[4], payload[5], payload[6], payload[7]]);
let data = payload[8..].to_vec();
Ok(Packet::SnapshotDelta { base_tick, tick, data })
}
_ => Err(format!("Unknown packet type_id: {}", type_id)),
}
}
fn type_id(&self) -> u8 {
match self {
Packet::Connect { .. } => TYPE_CONNECT,
Packet::Accept { .. } => TYPE_ACCEPT,
Packet::Disconnect { .. } => TYPE_DISCONNECT,
Packet::Ping { .. } => TYPE_PING,
Packet::Pong { .. } => TYPE_PONG,
Packet::UserData { .. } => TYPE_USER_DATA,
Packet::Reliable { .. } => TYPE_RELIABLE,
Packet::Ack { .. } => TYPE_ACK,
Packet::Snapshot { .. } => TYPE_SNAPSHOT,
Packet::SnapshotDelta { .. } => TYPE_SNAPSHOT_DELTA,
}
}
fn encode_payload(&self) -> Vec<u8> {
match self {
Packet::Connect { client_name } => {
let name_bytes = client_name.as_bytes();
let name_len = name_bytes.len() as u16;
let mut buf = Vec::with_capacity(2 + name_bytes.len());
buf.extend_from_slice(&name_len.to_le_bytes());
buf.extend_from_slice(name_bytes);
buf
}
Packet::Accept { client_id } => client_id.to_le_bytes().to_vec(),
Packet::Disconnect { client_id } => client_id.to_le_bytes().to_vec(),
Packet::Ping { timestamp } => timestamp.to_le_bytes().to_vec(),
Packet::Pong { timestamp } => timestamp.to_le_bytes().to_vec(),
Packet::UserData { client_id, data } => {
let mut buf = Vec::with_capacity(4 + data.len());
buf.extend_from_slice(&client_id.to_le_bytes());
buf.extend_from_slice(data);
buf
}
Packet::Reliable { sequence, data } => {
let mut buf = Vec::with_capacity(2 + data.len());
buf.extend_from_slice(&sequence.to_le_bytes());
buf.extend_from_slice(data);
buf
}
Packet::Ack { sequence } => sequence.to_le_bytes().to_vec(),
Packet::Snapshot { tick, data } => {
let mut buf = Vec::with_capacity(4 + data.len());
buf.extend_from_slice(&tick.to_le_bytes());
buf.extend_from_slice(data);
buf
}
Packet::SnapshotDelta { base_tick, tick, data } => {
let mut buf = Vec::with_capacity(8 + data.len());
buf.extend_from_slice(&base_tick.to_le_bytes());
buf.extend_from_slice(&tick.to_le_bytes());
buf.extend_from_slice(data);
buf
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
fn roundtrip(packet: Packet) {
let bytes = packet.to_bytes();
let decoded = Packet::from_bytes(&bytes).expect("roundtrip failed");
assert_eq!(packet, decoded);
}
#[test]
fn test_connect_roundtrip() {
roundtrip(Packet::Connect {
client_name: "Alice".to_string(),
});
}
#[test]
fn test_accept_roundtrip() {
roundtrip(Packet::Accept { client_id: 42 });
}
#[test]
fn test_disconnect_roundtrip() {
roundtrip(Packet::Disconnect { client_id: 7 });
}
#[test]
fn test_ping_roundtrip() {
roundtrip(Packet::Ping {
timestamp: 1_234_567_890_u64,
});
}
#[test]
fn test_pong_roundtrip() {
roundtrip(Packet::Pong {
timestamp: 9_876_543_210_u64,
});
}
#[test]
fn test_user_data_roundtrip() {
roundtrip(Packet::UserData {
client_id: 3,
data: vec![0xDE, 0xAD, 0xBE, 0xEF],
});
}
#[test]
fn test_reliable_roundtrip() {
roundtrip(Packet::Reliable {
sequence: 42,
data: vec![0xCA, 0xFE],
});
}
#[test]
fn test_ack_roundtrip() {
roundtrip(Packet::Ack { sequence: 100 });
}
#[test]
fn test_snapshot_roundtrip() {
roundtrip(Packet::Snapshot {
tick: 999,
data: vec![1, 2, 3],
});
}
#[test]
fn test_snapshot_delta_roundtrip() {
roundtrip(Packet::SnapshotDelta {
base_tick: 10,
tick: 15,
data: vec![4, 5, 6],
});
}
#[test]
fn test_invalid_type_returns_error() {
// Build a packet with type_id = 99 (unknown)
let bytes = vec![99u8, 0, 0, 0]; // type=99, payload_len=0, reserved=0
let result = Packet::from_bytes(&bytes);
assert!(result.is_err(), "Expected error for unknown type_id");
}
}

View File

@@ -0,0 +1,318 @@
use std::collections::{HashMap, HashSet};
use std::time::{Duration, Instant};
/// A channel that provides reliable delivery over unreliable transport.
///
/// Assigns sequence numbers, tracks ACKs, estimates RTT,
/// and retransmits unacknowledged packets after 2x RTT.
pub struct ReliableChannel {
next_sequence: u16,
pending_acks: HashMap<u16, (Instant, Vec<u8>)>,
received_seqs: HashSet<u16>,
rtt: Duration,
/// Outgoing ACK packets that need to be sent by the caller.
outgoing_acks: Vec<u16>,
}
impl ReliableChannel {
pub fn new() -> Self {
ReliableChannel {
next_sequence: 0,
pending_acks: HashMap::new(),
received_seqs: HashSet::new(),
rtt: Duration::from_millis(100), // initial estimate
outgoing_acks: Vec::new(),
}
}
/// Returns the current RTT estimate.
pub fn rtt(&self) -> Duration {
self.rtt
}
/// Returns the number of packets awaiting acknowledgement.
pub fn pending_count(&self) -> usize {
self.pending_acks.len()
}
/// Prepare a reliable send. Returns (sequence_number, wrapped_data).
/// The caller is responsible for actually transmitting the wrapped data.
pub fn send_reliable(&mut self, data: &[u8]) -> (u16, Vec<u8>) {
let seq = self.next_sequence;
self.next_sequence = self.next_sequence.wrapping_add(1);
// Build the reliable packet payload: [seq(2 LE), data...]
let mut buf = Vec::with_capacity(2 + data.len());
buf.extend_from_slice(&seq.to_le_bytes());
buf.extend_from_slice(data);
self.pending_acks.insert(seq, (Instant::now(), buf.clone()));
(seq, buf)
}
/// Process a received reliable packet. Returns the payload data if this is
/// not a duplicate, or None if already received. Queues an ACK to send.
pub fn receive_and_ack(&mut self, sequence: u16, data: &[u8]) -> Option<Vec<u8>> {
// Always queue an ACK, even for duplicates
self.outgoing_acks.push(sequence);
if self.received_seqs.contains(&sequence) {
return None; // duplicate
}
self.received_seqs.insert(sequence);
Some(data.to_vec())
}
/// Process an incoming ACK for a sequence we sent.
pub fn process_ack(&mut self, sequence: u16) {
if let Some((send_time, _)) = self.pending_acks.remove(&sequence) {
let sample = send_time.elapsed();
// Exponential moving average: rtt = 0.875 * rtt + 0.125 * sample
self.rtt = Duration::from_secs_f64(
0.875 * self.rtt.as_secs_f64() + 0.125 * sample.as_secs_f64(),
);
}
}
/// Drain any pending outgoing ACK sequence numbers.
pub fn drain_acks(&mut self) -> Vec<u16> {
std::mem::take(&mut self.outgoing_acks)
}
/// Check for timed-out packets and return their data for retransmission.
/// Resets the send_time for retransmitted packets.
pub fn update(&mut self) -> Vec<Vec<u8>> {
let timeout = self.rtt * 2;
let now = Instant::now();
let mut retransmits = Vec::new();
for (_, (send_time, data)) in self.pending_acks.iter_mut() {
if now.duration_since(*send_time) >= timeout {
retransmits.push(data.clone());
*send_time = now;
}
}
retransmits
}
}
/// A channel that delivers packets in order, built on top of ReliableChannel.
pub struct OrderedChannel {
reliable: ReliableChannel,
next_deliver: u16,
buffer: HashMap<u16, Vec<u8>>,
}
impl OrderedChannel {
pub fn new() -> Self {
OrderedChannel {
reliable: ReliableChannel::new(),
next_deliver: 0,
buffer: HashMap::new(),
}
}
/// Access the underlying reliable channel (e.g., for send_reliable, process_ack, update).
pub fn reliable(&self) -> &ReliableChannel {
&self.reliable
}
/// Access the underlying reliable channel mutably.
pub fn reliable_mut(&mut self) -> &mut ReliableChannel {
&mut self.reliable
}
/// Prepare a reliable, ordered send.
pub fn send(&mut self, data: &[u8]) -> (u16, Vec<u8>) {
self.reliable.send_reliable(data)
}
/// Receive a packet. Buffers out-of-order packets and returns all
/// packets that can now be delivered in sequence order.
pub fn receive(&mut self, sequence: u16, data: &[u8]) -> Vec<Vec<u8>> {
let payload = self.reliable.receive_and_ack(sequence, data);
if let Some(payload) = payload {
self.buffer.insert(sequence, payload);
}
// Deliver as many consecutive packets as possible
let mut delivered = Vec::new();
while let Some(data) = self.buffer.remove(&self.next_deliver) {
delivered.push(data);
self.next_deliver = self.next_deliver.wrapping_add(1);
}
delivered
}
}
#[cfg(test)]
mod tests {
use super::*;
// ---- ReliableChannel tests ----
#[test]
fn test_send_receive_ack_roundtrip() {
let mut sender = ReliableChannel::new();
let mut receiver = ReliableChannel::new();
let original = b"hello world";
let (seq, _buf) = sender.send_reliable(original);
assert_eq!(seq, 0);
assert_eq!(sender.pending_count(), 1);
// Receiver gets the packet
let result = receiver.receive_and_ack(seq, original);
assert_eq!(result, Some(original.to_vec()));
// Receiver queued an ack
let acks = receiver.drain_acks();
assert_eq!(acks, vec![0]);
// Sender processes the ack
sender.process_ack(seq);
assert_eq!(sender.pending_count(), 0);
}
#[test]
fn test_duplicate_rejection() {
let mut receiver = ReliableChannel::new();
let data = b"payload";
let result1 = receiver.receive_and_ack(0, data);
assert!(result1.is_some());
let result2 = receiver.receive_and_ack(0, data);
assert!(result2.is_none(), "Duplicate should be rejected");
// But ACK is still queued for both
let acks = receiver.drain_acks();
assert_eq!(acks.len(), 2);
}
#[test]
fn test_sequence_numbers_increment() {
let mut channel = ReliableChannel::new();
let (s0, _) = channel.send_reliable(b"a");
let (s1, _) = channel.send_reliable(b"b");
let (s2, _) = channel.send_reliable(b"c");
assert_eq!(s0, 0);
assert_eq!(s1, 1);
assert_eq!(s2, 2);
assert_eq!(channel.pending_count(), 3);
}
#[test]
fn test_retransmission_on_timeout() {
let mut channel = ReliableChannel::new();
// Set a very short RTT so timeout (2*RTT) triggers quickly
channel.rtt = Duration::from_millis(1);
let (_seq, _buf) = channel.send_reliable(b"data");
assert_eq!(channel.pending_count(), 1);
// Wait for timeout
std::thread::sleep(Duration::from_millis(10));
let retransmits = channel.update();
assert_eq!(retransmits.len(), 1, "Should retransmit 1 packet");
// Packet is still pending (not acked)
assert_eq!(channel.pending_count(), 1);
}
#[test]
fn test_no_retransmission_before_timeout() {
let mut channel = ReliableChannel::new();
// Default RTT = 100ms, so timeout = 200ms
let (_seq, _buf) = channel.send_reliable(b"data");
// Immediately check — should not retransmit
let retransmits = channel.update();
assert!(retransmits.is_empty());
}
#[test]
fn test_rtt_estimation() {
let mut channel = ReliableChannel::new();
let initial_rtt = channel.rtt();
let (seq, _) = channel.send_reliable(b"x");
std::thread::sleep(Duration::from_millis(5));
channel.process_ack(seq);
// RTT should have changed from initial value
let new_rtt = channel.rtt();
assert_ne!(initial_rtt, new_rtt, "RTT should be updated after ACK");
}
#[test]
fn test_wrapping_sequence() {
let mut channel = ReliableChannel::new();
channel.next_sequence = u16::MAX;
let (s1, _) = channel.send_reliable(b"a");
assert_eq!(s1, u16::MAX);
let (s2, _) = channel.send_reliable(b"b");
assert_eq!(s2, 0); // wrapped
}
// ---- OrderedChannel tests ----
#[test]
fn test_ordered_in_order_delivery() {
let mut channel = OrderedChannel::new();
let delivered0 = channel.receive(0, b"first");
assert_eq!(delivered0, vec![b"first".to_vec()]);
let delivered1 = channel.receive(1, b"second");
assert_eq!(delivered1, vec![b"second".to_vec()]);
}
#[test]
fn test_ordered_out_of_order_delivery() {
let mut channel = OrderedChannel::new();
// Receive seq 1 first (out of order)
let delivered = channel.receive(1, b"second");
assert!(delivered.is_empty(), "Seq 1 should be buffered, waiting for 0");
// Receive seq 2 (still missing 0)
let delivered = channel.receive(2, b"third");
assert!(delivered.is_empty());
// Receive seq 0 — should deliver 0, 1, 2 in order
let delivered = channel.receive(0, b"first");
assert_eq!(delivered.len(), 3);
assert_eq!(delivered[0], b"first");
assert_eq!(delivered[1], b"second");
assert_eq!(delivered[2], b"third");
}
#[test]
fn test_ordered_gap_handling() {
let mut channel = OrderedChannel::new();
// Deliver 0
let d = channel.receive(0, b"a");
assert_eq!(d.len(), 1);
// Skip 1, deliver 2
let d = channel.receive(2, b"c");
assert!(d.is_empty(), "Can't deliver 2 without 1");
// Now deliver 1 — should flush both 1 and 2
let d = channel.receive(1, b"b");
assert_eq!(d.len(), 2);
assert_eq!(d[0], b"b");
assert_eq!(d[1], b"c");
}
}

View File

@@ -0,0 +1,195 @@
use std::collections::HashMap;
use std::net::SocketAddr;
use crate::packet::Packet;
use crate::socket::NetSocket;
/// Information about a connected client.
pub struct ClientInfo {
pub id: u32,
pub addr: SocketAddr,
pub name: String,
}
/// Events produced by the server during polling.
pub enum ServerEvent {
ClientConnected { client_id: u32, name: String },
ClientDisconnected { client_id: u32 },
PacketReceived { client_id: u32, packet: Packet },
}
/// A non-blocking UDP server that manages multiple clients.
pub struct NetServer {
socket: NetSocket,
clients: HashMap<u32, ClientInfo>,
addr_to_id: HashMap<SocketAddr, u32>,
next_id: u32,
}
impl NetServer {
/// Bind the server to the given address.
pub fn new(addr: &str) -> Result<Self, String> {
let socket = NetSocket::bind(addr)?;
Ok(NetServer {
socket,
clients: HashMap::new(),
addr_to_id: HashMap::new(),
next_id: 1,
})
}
/// Return the local address the server is listening on.
pub fn local_addr(&self) -> SocketAddr {
self.socket.local_addr()
}
/// Poll for incoming packets and return any resulting server events.
pub fn poll(&mut self) -> Vec<ServerEvent> {
let mut events = Vec::new();
while let Some((packet, addr)) = self.socket.recv_from() {
match &packet {
Packet::Connect { client_name } => {
// Assign a new id and send Accept
let id = self.next_id;
self.next_id += 1;
let name = client_name.clone();
let info = ClientInfo {
id,
addr,
name: name.clone(),
};
self.clients.insert(id, info);
self.addr_to_id.insert(addr, id);
let accept = Packet::Accept { client_id: id };
if let Err(e) = self.socket.send_to(&accept, addr) {
eprintln!("[NetServer] Failed to send Accept to {}: {}", addr, e);
}
events.push(ServerEvent::ClientConnected { client_id: id, name });
}
Packet::Disconnect { client_id } => {
let id = *client_id;
if let Some(info) = self.clients.remove(&id) {
self.addr_to_id.remove(&info.addr);
events.push(ServerEvent::ClientDisconnected { client_id: id });
}
}
_ => {
// Map address to client id
if let Some(&client_id) = self.addr_to_id.get(&addr) {
events.push(ServerEvent::PacketReceived {
client_id,
packet: packet.clone(),
});
} else {
eprintln!("[NetServer] Packet from unknown addr {}", addr);
}
}
}
}
events
}
/// Send a packet to every connected client.
pub fn broadcast(&self, packet: &Packet) {
for info in self.clients.values() {
if let Err(e) = self.socket.send_to(packet, info.addr) {
eprintln!("[NetServer] broadcast failed for client {}: {}", info.id, e);
}
}
}
/// Send a packet to a specific client by id.
pub fn send_to_client(&self, id: u32, packet: &Packet) {
if let Some(info) = self.clients.get(&id) {
if let Err(e) = self.socket.send_to(packet, info.addr) {
eprintln!("[NetServer] send_to_client {} failed: {}", id, e);
}
}
}
/// Returns a slice of all connected clients.
pub fn clients(&self) -> impl Iterator<Item = &ClientInfo> {
self.clients.values()
}
/// Returns the number of connected clients.
pub fn client_count(&self) -> usize {
self.clients.len()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::client::{ClientEvent, NetClient};
use std::time::Duration;
#[test]
fn test_integration_connect_and_userdata() {
// Step 1: Start server on OS-assigned port
let mut server = NetServer::new("127.0.0.1:0").expect("server bind failed");
let server_addr = server.local_addr();
// Step 2: Create client on OS-assigned port, point at server
let mut client = NetClient::new("127.0.0.1:0", server_addr, "TestClient")
.expect("client bind failed");
// Step 3: Client sends Connect
client.connect().expect("connect send failed");
// Step 4: Give the packet time to travel
std::thread::sleep(Duration::from_millis(50));
// Step 5: Server poll → should get ClientConnected
let server_events = server.poll();
let mut connected_id = None;
for event in &server_events {
if let ServerEvent::ClientConnected { client_id, name } = event {
connected_id = Some(*client_id);
assert_eq!(name, "TestClient");
}
}
assert!(connected_id.is_some(), "Server did not receive ClientConnected");
// Step 6: Client poll → should get Connected
std::thread::sleep(Duration::from_millis(50));
let client_events = client.poll();
let mut got_connected = false;
for event in &client_events {
if let ClientEvent::Connected { client_id } = event {
assert_eq!(Some(*client_id), connected_id);
got_connected = true;
}
}
assert!(got_connected, "Client did not receive Connected event");
// Step 7: Client sends UserData, server should receive it
let cid = client.client_id().unwrap();
let user_packet = Packet::UserData {
client_id: cid,
data: vec![1, 2, 3, 4],
};
client.send(user_packet.clone()).expect("send userdata failed");
std::thread::sleep(Duration::from_millis(50));
let server_events2 = server.poll();
let mut got_packet = false;
for event in server_events2 {
if let ServerEvent::PacketReceived { client_id, packet } = event {
assert_eq!(client_id, cid);
assert_eq!(packet, user_packet);
got_packet = true;
}
}
assert!(got_packet, "Server did not receive UserData packet");
// Cleanup: disconnect
client.disconnect().expect("disconnect send failed");
}
}

View File

@@ -0,0 +1,378 @@
/// State of a single entity at a point in time.
#[derive(Debug, Clone, PartialEq)]
pub struct EntityState {
pub id: u32,
pub position: [f32; 3],
pub rotation: [f32; 3],
pub velocity: [f32; 3],
}
/// A snapshot of the world at a given tick.
#[derive(Debug, Clone, PartialEq)]
pub struct Snapshot {
pub tick: u32,
pub entities: Vec<EntityState>,
}
/// Binary size of one entity: id(4) + pos(12) + rot(12) + vel(12) = 40 bytes
const ENTITY_SIZE: usize = 4 + 12 + 12 + 12;
fn write_f32_le(buf: &mut Vec<u8>, v: f32) {
buf.extend_from_slice(&v.to_le_bytes());
}
fn read_f32_le(data: &[u8], offset: usize) -> f32 {
f32::from_le_bytes([data[offset], data[offset + 1], data[offset + 2], data[offset + 3]])
}
fn write_f32x3(buf: &mut Vec<u8>, v: &[f32; 3]) {
write_f32_le(buf, v[0]);
write_f32_le(buf, v[1]);
write_f32_le(buf, v[2]);
}
fn read_f32x3(data: &[u8], offset: usize) -> [f32; 3] {
[
read_f32_le(data, offset),
read_f32_le(data, offset + 4),
read_f32_le(data, offset + 8),
]
}
fn serialize_entity(buf: &mut Vec<u8>, e: &EntityState) {
buf.extend_from_slice(&e.id.to_le_bytes());
write_f32x3(buf, &e.position);
write_f32x3(buf, &e.rotation);
write_f32x3(buf, &e.velocity);
}
fn deserialize_entity(data: &[u8], offset: usize) -> EntityState {
let id = u32::from_le_bytes([
data[offset], data[offset + 1], data[offset + 2], data[offset + 3],
]);
let position = read_f32x3(data, offset + 4);
let rotation = read_f32x3(data, offset + 16);
let velocity = read_f32x3(data, offset + 28);
EntityState { id, position, rotation, velocity }
}
/// Serialize a snapshot into compact binary format.
/// Layout: tick(4 LE) + entity_count(4 LE) + entities...
pub fn serialize_snapshot(snapshot: &Snapshot) -> Vec<u8> {
let count = snapshot.entities.len() as u32;
let mut buf = Vec::with_capacity(8 + ENTITY_SIZE * snapshot.entities.len());
buf.extend_from_slice(&snapshot.tick.to_le_bytes());
buf.extend_from_slice(&count.to_le_bytes());
for e in &snapshot.entities {
serialize_entity(&mut buf, e);
}
buf
}
/// Deserialize a snapshot from binary data.
pub fn deserialize_snapshot(data: &[u8]) -> Result<Snapshot, String> {
if data.len() < 8 {
return Err("Snapshot data too short for header".to_string());
}
let tick = u32::from_le_bytes([data[0], data[1], data[2], data[3]]);
let count = u32::from_le_bytes([data[4], data[5], data[6], data[7]]) as usize;
let expected = 8 + count * ENTITY_SIZE;
if data.len() < expected {
return Err(format!(
"Snapshot data too short: expected {} bytes, got {}",
expected,
data.len()
));
}
let mut entities = Vec::with_capacity(count);
for i in 0..count {
entities.push(deserialize_entity(data, 8 + i * ENTITY_SIZE));
}
Ok(Snapshot { tick, entities })
}
/// Compute a delta between two snapshots.
/// Format: new_tick(4) + count(4) + [id(4) + flags(1) + changed_fields...]
/// Flags bitmask: 0x01 = position, 0x02 = rotation, 0x04 = velocity, 0x80 = new entity (full)
pub fn diff_snapshots(old: &Snapshot, new: &Snapshot) -> Vec<u8> {
use std::collections::HashMap;
let old_map: HashMap<u32, &EntityState> = old.entities.iter().map(|e| (e.id, e)).collect();
let mut entries: Vec<u8> = Vec::new();
let mut count: u32 = 0;
for new_ent in &new.entities {
if let Some(old_ent) = old_map.get(&new_ent.id) {
let mut flags: u8 = 0;
let mut fields = Vec::new();
if new_ent.position != old_ent.position {
flags |= 0x01;
write_f32x3(&mut fields, &new_ent.position);
}
if new_ent.rotation != old_ent.rotation {
flags |= 0x02;
write_f32x3(&mut fields, &new_ent.rotation);
}
if new_ent.velocity != old_ent.velocity {
flags |= 0x04;
write_f32x3(&mut fields, &new_ent.velocity);
}
if flags != 0 {
entries.extend_from_slice(&new_ent.id.to_le_bytes());
entries.push(flags);
entries.extend_from_slice(&fields);
count += 1;
}
} else {
// New entity — send full state
entries.extend_from_slice(&new_ent.id.to_le_bytes());
entries.push(0x80); // "new entity" flag
write_f32x3(&mut entries, &new_ent.position);
write_f32x3(&mut entries, &new_ent.rotation);
write_f32x3(&mut entries, &new_ent.velocity);
count += 1;
}
}
let mut buf = Vec::with_capacity(8 + entries.len());
buf.extend_from_slice(&new.tick.to_le_bytes());
buf.extend_from_slice(&count.to_le_bytes());
buf.extend_from_slice(&entries);
buf
}
/// Apply a delta to a base snapshot to produce an updated snapshot.
pub fn apply_diff(base: &Snapshot, diff: &[u8]) -> Result<Snapshot, String> {
if diff.len() < 8 {
return Err("Diff data too short for header".to_string());
}
let tick = u32::from_le_bytes([diff[0], diff[1], diff[2], diff[3]]);
let count = u32::from_le_bytes([diff[4], diff[5], diff[6], diff[7]]) as usize;
// Start from a clone of the base
let mut entities: Vec<EntityState> = base.entities.clone();
let mut offset = 8;
for _ in 0..count {
if offset + 5 > diff.len() {
return Err("Diff truncated at entry header".to_string());
}
let id = u32::from_le_bytes([diff[offset], diff[offset + 1], diff[offset + 2], diff[offset + 3]]);
let flags = diff[offset + 4];
offset += 5;
if flags & 0x80 != 0 {
// New entity — full state
if offset + 36 > diff.len() {
return Err("Diff truncated at new entity data".to_string());
}
let position = read_f32x3(diff, offset);
let rotation = read_f32x3(diff, offset + 12);
let velocity = read_f32x3(diff, offset + 24);
offset += 36;
// Add or replace
if let Some(ent) = entities.iter_mut().find(|e| e.id == id) {
ent.position = position;
ent.rotation = rotation;
ent.velocity = velocity;
} else {
entities.push(EntityState { id, position, rotation, velocity });
}
} else {
// Delta update — find existing entity
let ent = entities.iter_mut().find(|e| e.id == id)
.ok_or_else(|| format!("Diff references unknown entity {}", id))?;
if flags & 0x01 != 0 {
if offset + 12 > diff.len() {
return Err("Diff truncated at position".to_string());
}
ent.position = read_f32x3(diff, offset);
offset += 12;
}
if flags & 0x02 != 0 {
if offset + 12 > diff.len() {
return Err("Diff truncated at rotation".to_string());
}
ent.rotation = read_f32x3(diff, offset);
offset += 12;
}
if flags & 0x04 != 0 {
if offset + 12 > diff.len() {
return Err("Diff truncated at velocity".to_string());
}
ent.velocity = read_f32x3(diff, offset);
offset += 12;
}
}
}
Ok(Snapshot { tick, entities })
}
#[cfg(test)]
mod tests {
use super::*;
fn make_entity(id: u32, px: f32, py: f32, pz: f32) -> EntityState {
EntityState {
id,
position: [px, py, pz],
rotation: [0.0, 0.0, 0.0],
velocity: [0.0, 0.0, 0.0],
}
}
#[test]
fn test_snapshot_roundtrip() {
let snap = Snapshot {
tick: 42,
entities: vec![
make_entity(1, 1.0, 2.0, 3.0),
make_entity(2, 4.0, 5.0, 6.0),
],
};
let bytes = serialize_snapshot(&snap);
let decoded = deserialize_snapshot(&bytes).expect("deserialize failed");
assert_eq!(snap, decoded);
}
#[test]
fn test_snapshot_empty() {
let snap = Snapshot { tick: 0, entities: vec![] };
let bytes = serialize_snapshot(&snap);
assert_eq!(bytes.len(), 8); // just header
let decoded = deserialize_snapshot(&bytes).unwrap();
assert_eq!(snap, decoded);
}
#[test]
fn test_diff_no_changes() {
let snap = Snapshot {
tick: 10,
entities: vec![make_entity(1, 1.0, 2.0, 3.0)],
};
let snap2 = Snapshot {
tick: 11,
entities: vec![make_entity(1, 1.0, 2.0, 3.0)],
};
let diff = diff_snapshots(&snap, &snap2);
// Header only: tick(4) + count(4) = 8, count = 0
assert_eq!(diff.len(), 8);
let count = u32::from_le_bytes([diff[4], diff[5], diff[6], diff[7]]);
assert_eq!(count, 0);
}
#[test]
fn test_diff_position_changed() {
let old = Snapshot {
tick: 10,
entities: vec![make_entity(1, 0.0, 0.0, 0.0)],
};
let new = Snapshot {
tick: 11,
entities: vec![EntityState {
id: 1,
position: [1.0, 2.0, 3.0],
rotation: [0.0, 0.0, 0.0],
velocity: [0.0, 0.0, 0.0],
}],
};
let diff = diff_snapshots(&old, &new);
let result = apply_diff(&old, &diff).expect("apply_diff failed");
assert_eq!(result.tick, 11);
assert_eq!(result.entities.len(), 1);
assert_eq!(result.entities[0].position, [1.0, 2.0, 3.0]);
assert_eq!(result.entities[0].rotation, [0.0, 0.0, 0.0]); // unchanged
}
#[test]
fn test_diff_new_entity() {
let old = Snapshot {
tick: 10,
entities: vec![make_entity(1, 0.0, 0.0, 0.0)],
};
let new = Snapshot {
tick: 11,
entities: vec![
make_entity(1, 0.0, 0.0, 0.0),
make_entity(2, 5.0, 6.0, 7.0),
],
};
let diff = diff_snapshots(&old, &new);
let result = apply_diff(&old, &diff).expect("apply_diff failed");
assert_eq!(result.entities.len(), 2);
assert_eq!(result.entities[1].id, 2);
assert_eq!(result.entities[1].position, [5.0, 6.0, 7.0]);
}
#[test]
fn test_diff_multiple_fields_changed() {
let old = Snapshot {
tick: 10,
entities: vec![EntityState {
id: 1,
position: [0.0, 0.0, 0.0],
rotation: [0.0, 0.0, 0.0],
velocity: [0.0, 0.0, 0.0],
}],
};
let new = Snapshot {
tick: 11,
entities: vec![EntityState {
id: 1,
position: [1.0, 1.0, 1.0],
rotation: [2.0, 2.0, 2.0],
velocity: [3.0, 3.0, 3.0],
}],
};
let diff = diff_snapshots(&old, &new);
let result = apply_diff(&old, &diff).unwrap();
assert_eq!(result.entities[0].position, [1.0, 1.0, 1.0]);
assert_eq!(result.entities[0].rotation, [2.0, 2.0, 2.0]);
assert_eq!(result.entities[0].velocity, [3.0, 3.0, 3.0]);
}
#[test]
fn test_diff_is_compact() {
// Only position changes — diff should be smaller than full snapshot
let old = Snapshot {
tick: 10,
entities: vec![make_entity(1, 0.0, 0.0, 0.0)],
};
let new = Snapshot {
tick: 11,
entities: vec![EntityState {
id: 1,
position: [1.0, 2.0, 3.0],
rotation: [0.0, 0.0, 0.0],
velocity: [0.0, 0.0, 0.0],
}],
};
let full_bytes = serialize_snapshot(&new);
let diff_bytes = diff_snapshots(&old, &new);
assert!(
diff_bytes.len() < full_bytes.len(),
"Diff ({} bytes) should be smaller than full snapshot ({} bytes)",
diff_bytes.len(),
full_bytes.len()
);
}
}

View File

@@ -0,0 +1,58 @@
use std::net::{SocketAddr, UdpSocket};
use crate::Packet;
/// Maximum UDP datagram size we'll allocate for receiving.
const MAX_PACKET_SIZE: usize = 65535;
/// A non-blocking UDP socket wrapper.
pub struct NetSocket {
inner: UdpSocket,
}
impl NetSocket {
/// Bind a new non-blocking UDP socket to the given address.
pub fn bind(addr: &str) -> Result<Self, String> {
let socket = UdpSocket::bind(addr)
.map_err(|e| format!("UdpSocket::bind({}) failed: {}", addr, e))?;
socket
.set_nonblocking(true)
.map_err(|e| format!("set_nonblocking failed: {}", e))?;
Ok(NetSocket { inner: socket })
}
/// Returns the local address this socket is bound to.
pub fn local_addr(&self) -> SocketAddr {
self.inner.local_addr().expect("local_addr unavailable")
}
/// Serialize and send a packet to the given address.
pub fn send_to(&self, packet: &Packet, addr: SocketAddr) -> Result<(), String> {
let bytes = packet.to_bytes();
self.inner
.send_to(&bytes, addr)
.map_err(|e| format!("send_to failed: {}", e))?;
Ok(())
}
/// Try to receive one packet. Returns None on WouldBlock (no data available).
pub fn recv_from(&self) -> Option<(Packet, SocketAddr)> {
let mut buf = vec![0u8; MAX_PACKET_SIZE];
match self.inner.recv_from(&mut buf) {
Ok((len, addr)) => {
match Packet::from_bytes(&buf[..len]) {
Ok(packet) => Some((packet, addr)),
Err(e) => {
eprintln!("[NetSocket] Failed to parse packet from {}: {}", addr, e);
None
}
}
}
Err(ref e) if e.kind() == std::io::ErrorKind::WouldBlock => None,
Err(e) => {
eprintln!("[NetSocket] recv_from error: {}", e);
None
}
}
}
}

View File

@@ -0,0 +1,8 @@
[package]
name = "voltex_physics"
version = "0.1.0"
edition = "2021"
[dependencies]
voltex_math.workspace = true
voltex_ecs.workspace = true

View File

@@ -0,0 +1,377 @@
use voltex_ecs::Entity;
use voltex_math::{AABB, Ray};
use crate::ray::ray_vs_aabb;
#[derive(Debug)]
enum BvhNode {
Leaf { entity: Entity, aabb: AABB },
Internal { aabb: AABB, left: usize, right: usize },
}
#[derive(Debug)]
pub struct BvhTree {
nodes: Vec<BvhNode>,
}
impl BvhTree {
pub fn build(entries: &[(Entity, AABB)]) -> Self {
let mut tree = BvhTree { nodes: Vec::new() };
if !entries.is_empty() {
let mut sorted: Vec<(Entity, AABB)> = entries.to_vec();
tree.build_recursive(&mut sorted);
}
tree
}
fn build_recursive(&mut self, entries: &mut [(Entity, AABB)]) -> usize {
if entries.len() == 1 {
let idx = self.nodes.len();
self.nodes.push(BvhNode::Leaf {
entity: entries[0].0,
aabb: entries[0].1,
});
return idx;
}
// Compute bounding AABB
let mut combined = entries[0].1;
for e in entries.iter().skip(1) {
combined = combined.merged(&e.1);
}
// Find longest axis
let extent = combined.max - combined.min;
let axis = if extent.x >= extent.y && extent.x >= extent.z {
0
} else if extent.y >= extent.z {
1
} else {
2
};
// Sort by axis center
entries.sort_by(|a, b| {
let ca = a.1.center();
let cb = b.1.center();
let va = match axis { 0 => ca.x, 1 => ca.y, _ => ca.z };
let vb = match axis { 0 => cb.x, 1 => cb.y, _ => cb.z };
va.partial_cmp(&vb).unwrap()
});
let mid = entries.len() / 2;
let (left_entries, right_entries) = entries.split_at_mut(mid);
let left = self.build_recursive(left_entries);
let right = self.build_recursive(right_entries);
let idx = self.nodes.len();
self.nodes.push(BvhNode::Internal {
aabb: combined,
left,
right,
});
idx
}
/// Query all overlapping pairs using recursive tree traversal (replaces N² brute force).
pub fn query_pairs(&self) -> Vec<(Entity, Entity)> {
let mut pairs = Vec::new();
if self.nodes.is_empty() {
return pairs;
}
let root = self.nodes.len() - 1;
self.query_pairs_recursive(root, root, &mut pairs);
pairs
}
fn query_pairs_recursive(&self, a: usize, b: usize, pairs: &mut Vec<(Entity, Entity)>) {
let aabb_a = self.node_aabb(a);
let aabb_b = self.node_aabb(b);
if !aabb_a.intersects(&aabb_b) {
return;
}
match (&self.nodes[a], &self.nodes[b]) {
(BvhNode::Leaf { entity: ea, aabb: aabb_a }, BvhNode::Leaf { entity: eb, aabb: aabb_b }) => {
if a != b && ea.id <= eb.id && aabb_a.intersects(aabb_b) {
pairs.push((*ea, *eb));
}
}
(BvhNode::Leaf { .. }, BvhNode::Internal { left, right, .. }) => {
self.query_pairs_recursive(a, *left, pairs);
self.query_pairs_recursive(a, *right, pairs);
}
(BvhNode::Internal { left, right, .. }, BvhNode::Leaf { .. }) => {
self.query_pairs_recursive(*left, b, pairs);
self.query_pairs_recursive(*right, b, pairs);
}
(BvhNode::Internal { left: la, right: ra, .. }, BvhNode::Internal { left: lb, right: rb, .. }) => {
if a == b {
// Same node: check children against each other and themselves
let la = *la;
let ra = *ra;
self.query_pairs_recursive(la, la, pairs);
self.query_pairs_recursive(ra, ra, pairs);
self.query_pairs_recursive(la, ra, pairs);
} else {
let la = *la;
let ra = *ra;
let lb = *lb;
let rb = *rb;
self.query_pairs_recursive(la, lb, pairs);
self.query_pairs_recursive(la, rb, pairs);
self.query_pairs_recursive(ra, lb, pairs);
self.query_pairs_recursive(ra, rb, pairs);
}
}
}
}
fn node_aabb(&self, idx: usize) -> &AABB {
match &self.nodes[idx] {
BvhNode::Leaf { aabb, .. } => aabb,
BvhNode::Internal { aabb, .. } => aabb,
}
}
/// Query ray against BVH, returning all (Entity, t) hits sorted by t.
pub fn query_ray(&self, ray: &Ray, max_t: f32) -> Vec<(Entity, f32)> {
let mut hits = Vec::new();
if self.nodes.is_empty() {
return hits;
}
let root = self.nodes.len() - 1;
self.query_ray_recursive(root, ray, max_t, &mut hits);
hits.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
hits
}
fn query_ray_recursive(&self, idx: usize, ray: &Ray, max_t: f32, hits: &mut Vec<(Entity, f32)>) {
let aabb = self.node_aabb(idx);
match ray_vs_aabb(ray, aabb) {
Some(t) if t <= max_t => {}
_ => return,
}
match &self.nodes[idx] {
BvhNode::Leaf { entity, aabb } => {
if let Some(t) = ray_vs_aabb(ray, aabb) {
if t <= max_t {
hits.push((*entity, t));
}
}
}
BvhNode::Internal { left, right, .. } => {
self.query_ray_recursive(*left, ray, max_t, hits);
self.query_ray_recursive(*right, ray, max_t, hits);
}
}
}
/// Refit the BVH: update leaf AABBs and propagate changes to parents.
/// `updated` maps entity → new AABB. Leaves not in map are unchanged.
pub fn refit(&mut self, updated: &[(Entity, AABB)]) {
if self.nodes.is_empty() {
return;
}
let root = self.nodes.len() - 1;
self.refit_recursive(root, updated);
}
fn refit_recursive(&mut self, idx: usize, updated: &[(Entity, AABB)]) -> AABB {
match self.nodes[idx] {
BvhNode::Leaf { entity, ref mut aabb } => {
if let Some((_, new_aabb)) = updated.iter().find(|(e, _)| *e == entity) {
*aabb = *new_aabb;
}
*aabb
}
BvhNode::Internal { left, right, aabb: _ } => {
let left = left;
let right = right;
let left_aabb = self.refit_recursive(left, updated);
let right_aabb = self.refit_recursive(right, updated);
let new_aabb = left_aabb.merged(&right_aabb);
// Update the internal node's AABB
if let BvhNode::Internal { ref mut aabb, .. } = self.nodes[idx] {
*aabb = new_aabb;
}
new_aabb
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use voltex_math::Vec3;
fn make_entity(id: u32) -> Entity {
Entity { id, generation: 0 }
}
#[test]
fn test_build_empty() {
let tree = BvhTree::build(&[]);
assert!(tree.query_pairs().is_empty());
}
#[test]
fn test_build_single() {
let entries = vec![
(make_entity(0), AABB::new(Vec3::ZERO, Vec3::ONE)),
];
let tree = BvhTree::build(&entries);
assert!(tree.query_pairs().is_empty());
}
#[test]
fn test_overlapping_pair() {
let entries = vec![
(make_entity(0), AABB::new(Vec3::ZERO, Vec3::new(2.0, 2.0, 2.0))),
(make_entity(1), AABB::new(Vec3::ONE, Vec3::new(3.0, 3.0, 3.0))),
];
let tree = BvhTree::build(&entries);
let pairs = tree.query_pairs();
assert_eq!(pairs.len(), 1);
}
#[test]
fn test_separated_pair() {
let entries = vec![
(make_entity(0), AABB::new(Vec3::ZERO, Vec3::ONE)),
(make_entity(1), AABB::new(Vec3::new(5.0, 5.0, 5.0), Vec3::new(6.0, 6.0, 6.0))),
];
let tree = BvhTree::build(&entries);
assert!(tree.query_pairs().is_empty());
}
#[test]
fn test_multiple_entities() {
let entries = vec![
(make_entity(0), AABB::new(Vec3::ZERO, Vec3::new(2.0, 2.0, 2.0))),
(make_entity(1), AABB::new(Vec3::ONE, Vec3::new(3.0, 3.0, 3.0))),
(make_entity(2), AABB::new(Vec3::new(10.0, 10.0, 10.0), Vec3::new(11.0, 11.0, 11.0))),
];
let tree = BvhTree::build(&entries);
let pairs = tree.query_pairs();
assert_eq!(pairs.len(), 1);
let (a, b) = pairs[0];
assert!((a.id == 0 && b.id == 1) || (a.id == 1 && b.id == 0));
}
// --- query_pairs: verify recursive gives same results as brute force ---
#[test]
fn test_query_pairs_matches_brute_force() {
let entries = vec![
(make_entity(0), AABB::new(Vec3::ZERO, Vec3::new(2.0, 2.0, 2.0))),
(make_entity(1), AABB::new(Vec3::ONE, Vec3::new(3.0, 3.0, 3.0))),
(make_entity(2), AABB::new(Vec3::new(2.5, 2.5, 2.5), Vec3::new(4.0, 4.0, 4.0))),
(make_entity(3), AABB::new(Vec3::new(10.0, 10.0, 10.0), Vec3::new(11.0, 11.0, 11.0))),
(make_entity(4), AABB::new(Vec3::new(10.5, 10.5, 10.5), Vec3::new(12.0, 12.0, 12.0))),
];
let tree = BvhTree::build(&entries);
let mut pairs = tree.query_pairs();
pairs.sort_by_key(|(a, b)| (a.id.min(b.id), a.id.max(b.id)));
// Brute force
let mut brute: Vec<(Entity, Entity)> = Vec::new();
for i in 0..entries.len() {
for j in (i + 1)..entries.len() {
if entries[i].1.intersects(&entries[j].1) {
let a = entries[i].0;
let b = entries[j].0;
brute.push(if a.id <= b.id { (a, b) } else { (b, a) });
}
}
}
brute.sort_by_key(|(a, b)| (a.id, b.id));
assert_eq!(pairs.len(), brute.len(), "pair count mismatch: tree={}, brute={}", pairs.len(), brute.len());
for (tree_pair, brute_pair) in pairs.iter().zip(brute.iter()) {
let t = (tree_pair.0.id.min(tree_pair.1.id), tree_pair.0.id.max(tree_pair.1.id));
let b = (brute_pair.0.id, brute_pair.1.id);
assert_eq!(t, b);
}
}
// --- query_ray tests ---
#[test]
fn test_query_ray_basic() {
let entries = vec![
(make_entity(0), AABB::new(Vec3::new(4.0, -1.0, -1.0), Vec3::new(6.0, 1.0, 1.0))),
(make_entity(1), AABB::new(Vec3::new(10.0, 10.0, 10.0), Vec3::new(11.0, 11.0, 11.0))),
];
let tree = BvhTree::build(&entries);
let ray = Ray::new(Vec3::ZERO, Vec3::X);
let hits = tree.query_ray(&ray, 100.0);
assert_eq!(hits.len(), 1);
assert_eq!(hits[0].0.id, 0);
}
#[test]
fn test_query_ray_multiple() {
let entries = vec![
(make_entity(0), AABB::new(Vec3::new(2.0, -1.0, -1.0), Vec3::new(4.0, 1.0, 1.0))),
(make_entity(1), AABB::new(Vec3::new(6.0, -1.0, -1.0), Vec3::new(8.0, 1.0, 1.0))),
];
let tree = BvhTree::build(&entries);
let ray = Ray::new(Vec3::ZERO, Vec3::X);
let hits = tree.query_ray(&ray, 100.0);
assert_eq!(hits.len(), 2);
assert!(hits[0].1 < hits[1].1, "should be sorted by distance");
}
#[test]
fn test_query_ray_miss() {
let entries = vec![
(make_entity(0), AABB::new(Vec3::new(4.0, 4.0, 4.0), Vec3::new(6.0, 6.0, 6.0))),
];
let tree = BvhTree::build(&entries);
let ray = Ray::new(Vec3::ZERO, Vec3::X);
let hits = tree.query_ray(&ray, 100.0);
assert!(hits.is_empty());
}
// --- refit tests ---
#[test]
fn test_refit_updates_leaf() {
let entries = vec![
(make_entity(0), AABB::new(Vec3::ZERO, Vec3::ONE)),
(make_entity(1), AABB::new(Vec3::new(5.0, 5.0, 5.0), Vec3::new(6.0, 6.0, 6.0))),
];
let mut tree = BvhTree::build(&entries);
// Initially separated
assert!(tree.query_pairs().is_empty());
// Move entity 1 to overlap entity 0
tree.refit(&[(make_entity(1), AABB::new(Vec3::new(0.5, 0.5, 0.5), Vec3::new(1.5, 1.5, 1.5)))]);
let pairs = tree.query_pairs();
assert_eq!(pairs.len(), 1, "after refit, overlapping entities should be found");
}
#[test]
fn test_refit_separates_entities() {
let entries = vec![
(make_entity(0), AABB::new(Vec3::ZERO, Vec3::new(2.0, 2.0, 2.0))),
(make_entity(1), AABB::new(Vec3::ONE, Vec3::new(3.0, 3.0, 3.0))),
];
let mut tree = BvhTree::build(&entries);
// Initially overlapping
assert_eq!(tree.query_pairs().len(), 1);
// Move entity 1 far away
tree.refit(&[(make_entity(1), AABB::new(Vec3::new(100.0, 100.0, 100.0), Vec3::new(101.0, 101.0, 101.0)))]);
assert!(tree.query_pairs().is_empty(), "after refit, separated entities should not overlap");
}
}

View File

@@ -0,0 +1,118 @@
use voltex_math::{Vec3, AABB, Ray};
use crate::ray::ray_vs_aabb;
/// Swept sphere vs AABB continuous collision detection.
/// Expands the AABB by the sphere radius, then tests a ray from start to end.
/// Returns t in [0,1] of first contact, or None if no contact.
pub fn swept_sphere_vs_aabb(start: Vec3, end: Vec3, radius: f32, aabb: &AABB) -> Option<f32> {
// Expand AABB by sphere radius
let r = Vec3::new(radius, radius, radius);
let expanded = AABB::new(aabb.min - r, aabb.max + r);
let direction = end - start;
let sweep_len = direction.length();
if sweep_len < 1e-10 {
// No movement — check if already inside
if expanded.contains_point(start) {
return Some(0.0);
}
return None;
}
let ray = Ray::new(start, direction * (1.0 / sweep_len));
match ray_vs_aabb(&ray, &expanded) {
Some(t) => {
let parametric_t = t / sweep_len;
if parametric_t <= 1.0 {
Some(parametric_t)
} else {
None
}
}
None => None,
}
}
#[cfg(test)]
mod tests {
use super::*;
fn approx(a: f32, b: f32) -> bool {
(a - b).abs() < 1e-3
}
#[test]
fn test_swept_sphere_hits_aabb() {
let start = Vec3::new(-10.0, 0.0, 0.0);
let end = Vec3::new(10.0, 0.0, 0.0);
let radius = 0.5;
let aabb = AABB::new(Vec3::new(4.0, -1.0, -1.0), Vec3::new(6.0, 1.0, 1.0));
let t = swept_sphere_vs_aabb(start, end, radius, &aabb).unwrap();
// Expanded AABB min.x = 3.5, start.x = -10, direction = 20
// t = (3.5 - (-10)) / 20 = 13.5 / 20 = 0.675
assert!(t > 0.0 && t < 1.0);
assert!(approx(t, 0.675));
}
#[test]
fn test_swept_sphere_misses_aabb() {
let start = Vec3::new(-10.0, 10.0, 0.0);
let end = Vec3::new(10.0, 10.0, 0.0);
let radius = 0.5;
let aabb = AABB::new(Vec3::new(4.0, -1.0, -1.0), Vec3::new(6.0, 1.0, 1.0));
assert!(swept_sphere_vs_aabb(start, end, radius, &aabb).is_none());
}
#[test]
fn test_swept_sphere_starts_inside() {
let start = Vec3::new(5.0, 0.0, 0.0);
let end = Vec3::new(10.0, 0.0, 0.0);
let radius = 0.5;
let aabb = AABB::new(Vec3::new(4.0, -1.0, -1.0), Vec3::new(6.0, 1.0, 1.0));
let t = swept_sphere_vs_aabb(start, end, radius, &aabb).unwrap();
assert!(approx(t, 0.0));
}
#[test]
fn test_swept_sphere_tunneling_detection() {
// Fast sphere that would tunnel through a thin wall
let start = Vec3::new(-100.0, 0.0, 0.0);
let end = Vec3::new(100.0, 0.0, 0.0);
let radius = 0.1;
// Thin wall at x=0
let aabb = AABB::new(Vec3::new(-0.05, -10.0, -10.0), Vec3::new(0.05, 10.0, 10.0));
let t = swept_sphere_vs_aabb(start, end, radius, &aabb);
assert!(t.is_some(), "should detect tunneling through thin wall");
let t = t.unwrap();
assert!(t > 0.0 && t < 1.0);
}
#[test]
fn test_swept_sphere_no_movement() {
let start = Vec3::new(5.0, 0.0, 0.0);
let end = Vec3::new(5.0, 0.0, 0.0);
let radius = 0.5;
let aabb = AABB::new(Vec3::new(4.0, -1.0, -1.0), Vec3::new(6.0, 1.0, 1.0));
// Inside expanded AABB, so should return 0
let t = swept_sphere_vs_aabb(start, end, radius, &aabb).unwrap();
assert!(approx(t, 0.0));
}
#[test]
fn test_swept_sphere_beyond_range() {
let start = Vec3::new(-10.0, 0.0, 0.0);
let end = Vec3::new(-5.0, 0.0, 0.0);
let radius = 0.5;
let aabb = AABB::new(Vec3::new(4.0, -1.0, -1.0), Vec3::new(6.0, 1.0, 1.0));
// AABB is at x=4..6, moving from -10 to -5 won't reach it
assert!(swept_sphere_vs_aabb(start, end, radius, &aabb).is_none());
}
}

View File

@@ -0,0 +1,133 @@
use voltex_math::{Vec3, AABB};
/// A convex hull collider defined by a set of vertices.
#[derive(Debug, Clone)]
pub struct ConvexHull {
pub vertices: Vec<Vec3>,
}
impl ConvexHull {
pub fn new(vertices: Vec<Vec3>) -> Self {
ConvexHull { vertices }
}
/// Support function for GJK: returns the vertex farthest in the given direction.
pub fn support(&self, direction: Vec3) -> Vec3 {
let mut best = self.vertices[0];
let mut best_dot = best.dot(direction);
for &v in &self.vertices[1..] {
let d = v.dot(direction);
if d > best_dot {
best_dot = d;
best = v;
}
}
best
}
}
#[derive(Debug, Clone)]
pub enum Collider {
Sphere { radius: f32 },
Box { half_extents: Vec3 },
Capsule { radius: f32, half_height: f32 },
ConvexHull(ConvexHull),
}
impl Collider {
pub fn aabb(&self, position: Vec3) -> AABB {
match self {
Collider::Sphere { radius } => {
let r = Vec3::new(*radius, *radius, *radius);
AABB::new(position - r, position + r)
}
Collider::Box { half_extents } => {
AABB::new(position - *half_extents, position + *half_extents)
}
Collider::Capsule { radius, half_height } => {
let r = Vec3::new(*radius, *half_height + *radius, *radius);
AABB::new(position - r, position + r)
}
Collider::ConvexHull(hull) => {
let mut min = position + hull.vertices[0];
let mut max = min;
for &v in &hull.vertices[1..] {
let p = position + v;
min = Vec3::new(min.x.min(p.x), min.y.min(p.y), min.z.min(p.z));
max = Vec3::new(max.x.max(p.x), max.y.max(p.y), max.z.max(p.z));
}
AABB::new(min, max)
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_sphere_aabb() {
let c = Collider::Sphere { radius: 2.0 };
let aabb = c.aabb(Vec3::new(1.0, 0.0, 0.0));
assert_eq!(aabb.min, Vec3::new(-1.0, -2.0, -2.0));
assert_eq!(aabb.max, Vec3::new(3.0, 2.0, 2.0));
}
#[test]
fn test_box_aabb() {
let c = Collider::Box { half_extents: Vec3::new(1.0, 2.0, 3.0) };
let aabb = c.aabb(Vec3::ZERO);
assert_eq!(aabb.min, Vec3::new(-1.0, -2.0, -3.0));
assert_eq!(aabb.max, Vec3::new(1.0, 2.0, 3.0));
}
#[test]
fn test_capsule_aabb() {
let c = Collider::Capsule { radius: 0.5, half_height: 1.0 };
let aabb = c.aabb(Vec3::new(1.0, 2.0, 3.0));
assert_eq!(aabb.min, Vec3::new(0.5, 0.5, 2.5));
assert_eq!(aabb.max, Vec3::new(1.5, 3.5, 3.5));
}
#[test]
fn test_convex_hull_support() {
let hull = ConvexHull::new(vec![
Vec3::new(-1.0, -1.0, -1.0),
Vec3::new(1.0, -1.0, -1.0),
Vec3::new(0.0, 1.0, 0.0),
Vec3::new(0.0, -1.0, 1.0),
]);
// Support in +Y direction should return the top vertex
let s = hull.support(Vec3::new(0.0, 1.0, 0.0));
assert!((s.y - 1.0).abs() < 1e-6);
}
#[test]
fn test_convex_hull_support_negative() {
let hull = ConvexHull::new(vec![
Vec3::new(0.0, 0.0, 0.0),
Vec3::new(1.0, 0.0, 0.0),
Vec3::new(0.0, 1.0, 0.0),
]);
let s = hull.support(Vec3::new(-1.0, 0.0, 0.0));
assert!((s.x - 0.0).abs() < 1e-6); // origin is farthest in -X
}
#[test]
fn test_convex_hull_in_collider() {
// Test that ConvexHull variant works with existing collision system
let hull = ConvexHull::new(vec![
Vec3::new(-1.0, -1.0, -1.0),
Vec3::new(1.0, -1.0, -1.0),
Vec3::new(1.0, 1.0, -1.0),
Vec3::new(-1.0, 1.0, -1.0),
Vec3::new(-1.0, -1.0, 1.0),
Vec3::new(1.0, -1.0, 1.0),
Vec3::new(1.0, 1.0, 1.0),
Vec3::new(-1.0, 1.0, 1.0),
]);
// Just verify construction works
assert_eq!(hull.vertices.len(), 8);
}
}

View File

@@ -0,0 +1,187 @@
use voltex_ecs::{World, Entity};
use voltex_ecs::Transform;
use voltex_math::Vec3;
use crate::collider::Collider;
use crate::contact::ContactPoint;
use crate::bvh::BvhTree;
use crate::narrow;
use crate::gjk;
pub fn detect_collisions(world: &World) -> Vec<ContactPoint> {
// 1. Gather entities with Transform + Collider
let pairs_data: Vec<(Entity, Vec3, Collider)> = world
.query2::<Transform, Collider>()
.into_iter()
.map(|(e, t, c)| (e, t.position, c.clone()))
.collect();
if pairs_data.len() < 2 {
return Vec::new();
}
// 2. Build AABBs
let entries: Vec<(Entity, voltex_math::AABB)> = pairs_data
.iter()
.map(|(e, pos, col)| (*e, col.aabb(*pos)))
.collect();
// 3. Broad phase
let bvh = BvhTree::build(&entries);
let broad_pairs = bvh.query_pairs();
// 4. Narrow phase
let mut contacts = Vec::new();
let lookup = |entity: Entity| -> Option<(Vec3, Collider)> {
pairs_data.iter().find(|(e, _, _)| *e == entity).map(|(_, p, c)| (*p, c.clone()))
};
for (ea, eb) in broad_pairs {
let (pos_a, col_a) = match lookup(ea) { Some(v) => v, None => continue };
let (pos_b, col_b) = match lookup(eb) { Some(v) => v, None => continue };
let result = match (&col_a, &col_b) {
(Collider::Sphere { radius: ra }, Collider::Sphere { radius: rb }) => {
narrow::sphere_vs_sphere(pos_a, *ra, pos_b, *rb)
}
(Collider::Sphere { radius }, Collider::Box { half_extents }) => {
narrow::sphere_vs_box(pos_a, *radius, pos_b, *half_extents)
}
(Collider::Box { half_extents }, Collider::Sphere { radius }) => {
narrow::sphere_vs_box(pos_b, *radius, pos_a, *half_extents)
.map(|(n, d, pa, pb)| (-n, d, pb, pa))
}
(Collider::Box { half_extents: ha }, Collider::Box { half_extents: hb }) => {
narrow::box_vs_box(pos_a, *ha, pos_b, *hb)
}
// Any combination involving Capsule or ConvexHull uses GJK/EPA
(Collider::Capsule { .. }, _) | (_, Collider::Capsule { .. })
| (Collider::ConvexHull(_), _) | (_, Collider::ConvexHull(_)) => {
gjk::gjk_epa(&col_a, pos_a, &col_b, pos_b)
}
};
if let Some((normal, depth, point_on_a, point_on_b)) = result {
contacts.push(ContactPoint {
entity_a: ea,
entity_b: eb,
normal,
depth,
point_on_a,
point_on_b,
});
}
}
contacts
}
#[cfg(test)]
mod tests {
use super::*;
use voltex_ecs::World;
use voltex_ecs::Transform;
use voltex_math::Vec3;
use crate::Collider;
#[test]
fn test_no_colliders() {
let world = World::new();
let contacts = detect_collisions(&world);
assert!(contacts.is_empty());
}
#[test]
fn test_single_entity() {
let mut world = World::new();
let e = world.spawn();
world.add(e, Transform::from_position(Vec3::ZERO));
world.add(e, Collider::Sphere { radius: 1.0 });
let contacts = detect_collisions(&world);
assert!(contacts.is_empty());
}
#[test]
fn test_two_spheres_colliding() {
let mut world = World::new();
let a = world.spawn();
world.add(a, Transform::from_position(Vec3::ZERO));
world.add(a, Collider::Sphere { radius: 1.0 });
let b = world.spawn();
world.add(b, Transform::from_position(Vec3::new(1.5, 0.0, 0.0)));
world.add(b, Collider::Sphere { radius: 1.0 });
let contacts = detect_collisions(&world);
assert_eq!(contacts.len(), 1);
assert!((contacts[0].depth - 0.5).abs() < 1e-5);
}
#[test]
fn test_two_spheres_separated() {
let mut world = World::new();
let a = world.spawn();
world.add(a, Transform::from_position(Vec3::ZERO));
world.add(a, Collider::Sphere { radius: 1.0 });
let b = world.spawn();
world.add(b, Transform::from_position(Vec3::new(10.0, 0.0, 0.0)));
world.add(b, Collider::Sphere { radius: 1.0 });
let contacts = detect_collisions(&world);
assert!(contacts.is_empty());
}
#[test]
fn test_sphere_vs_box_collision() {
let mut world = World::new();
let a = world.spawn();
world.add(a, Transform::from_position(Vec3::ZERO));
world.add(a, Collider::Sphere { radius: 1.0 });
let b = world.spawn();
world.add(b, Transform::from_position(Vec3::new(1.5, 0.0, 0.0)));
world.add(b, Collider::Box { half_extents: Vec3::ONE });
let contacts = detect_collisions(&world);
assert_eq!(contacts.len(), 1);
assert!(contacts[0].depth > 0.0);
}
#[test]
fn test_box_vs_box_collision() {
let mut world = World::new();
let a = world.spawn();
world.add(a, Transform::from_position(Vec3::ZERO));
world.add(a, Collider::Box { half_extents: Vec3::ONE });
let b = world.spawn();
world.add(b, Transform::from_position(Vec3::new(1.5, 0.0, 0.0)));
world.add(b, Collider::Box { half_extents: Vec3::ONE });
let contacts = detect_collisions(&world);
assert_eq!(contacts.len(), 1);
assert!((contacts[0].depth - 0.5).abs() < 1e-5);
}
#[test]
fn test_three_entities_mixed() {
let mut world = World::new();
let a = world.spawn();
world.add(a, Transform::from_position(Vec3::ZERO));
world.add(a, Collider::Sphere { radius: 1.0 });
let b = world.spawn();
world.add(b, Transform::from_position(Vec3::new(1.5, 0.0, 0.0)));
world.add(b, Collider::Sphere { radius: 1.0 });
let c = world.spawn();
world.add(c, Transform::from_position(Vec3::new(100.0, 0.0, 0.0)));
world.add(c, Collider::Box { half_extents: Vec3::ONE });
let contacts = detect_collisions(&world);
assert_eq!(contacts.len(), 1);
}
}

View File

@@ -0,0 +1,12 @@
use voltex_ecs::Entity;
use voltex_math::Vec3;
#[derive(Debug, Clone, Copy)]
pub struct ContactPoint {
pub entity_a: Entity,
pub entity_b: Entity,
pub normal: Vec3,
pub depth: f32,
pub point_on_a: Vec3,
pub point_on_b: Vec3,
}

View File

@@ -0,0 +1,524 @@
use voltex_math::Vec3;
use crate::collider::Collider;
/// Returns the farthest point on a convex collider in a given direction.
fn support(collider: &Collider, position: Vec3, direction: Vec3) -> Vec3 {
match collider {
Collider::Sphere { radius } => {
let len = direction.length();
if len < 1e-10 {
return position;
}
position + direction * (*radius / len)
}
Collider::Box { half_extents } => {
Vec3::new(
position.x + if direction.x >= 0.0 { half_extents.x } else { -half_extents.x },
position.y + if direction.y >= 0.0 { half_extents.y } else { -half_extents.y },
position.z + if direction.z >= 0.0 { half_extents.z } else { -half_extents.z },
)
}
Collider::Capsule { radius, half_height } => {
let base = if direction.y >= 0.0 {
Vec3::new(position.x, position.y + *half_height, position.z)
} else {
Vec3::new(position.x, position.y - *half_height, position.z)
};
let len = direction.length();
if len < 1e-10 {
return base;
}
base + direction * (*radius / len)
}
Collider::ConvexHull(hull) => {
position + hull.support(direction)
}
}
}
/// Minkowski difference support: support_A(dir) - support_B(-dir)
fn support_minkowski(a: &Collider, pos_a: Vec3, b: &Collider, pos_b: Vec3, dir: Vec3) -> Vec3 {
support(a, pos_a, dir) - support(b, pos_b, -dir)
}
/// GJK: returns Some(simplex) if shapes overlap, None if separated.
pub fn gjk(a: &Collider, pos_a: Vec3, b: &Collider, pos_b: Vec3) -> Option<Vec<Vec3>> {
let mut dir = pos_b - pos_a;
if dir.length_squared() < 1e-10 {
dir = Vec3::X;
}
let mut simplex: Vec<Vec3> = Vec::new();
let s = support_minkowski(a, pos_a, b, pos_b, dir);
simplex.push(s);
dir = -s;
for _ in 0..64 {
if dir.length_squared() < 1e-20 {
return build_tetrahedron(&simplex, a, pos_a, b, pos_b);
}
let new_point = support_minkowski(a, pos_a, b, pos_b, dir);
// If the new point didn't pass the origin, no intersection
if new_point.dot(dir) < 0.0 {
return None;
}
simplex.push(new_point);
if process_simplex(&mut simplex, &mut dir) {
return build_tetrahedron(&simplex, a, pos_a, b, pos_b);
}
}
if simplex.len() >= 4 {
Some(simplex)
} else {
None
}
}
fn build_tetrahedron(
simplex: &[Vec3],
a: &Collider, pos_a: Vec3,
b: &Collider, pos_b: Vec3,
) -> Option<Vec<Vec3>> {
let mut pts: Vec<Vec3> = simplex.to_vec();
if pts.len() >= 4 {
return Some(pts[..4].to_vec());
}
let dirs = [Vec3::X, Vec3::Y, Vec3::Z, -Vec3::X, -Vec3::Y, -Vec3::Z];
for &d in &dirs {
if pts.len() >= 4 { break; }
let p = support_minkowski(a, pos_a, b, pos_b, d);
if !pts.iter().any(|s| (*s - p).length_squared() < 1e-10) {
pts.push(p);
}
}
if pts.len() >= 4 {
Some(pts[..4].to_vec())
} else {
Some(pts)
}
}
fn process_simplex(simplex: &mut Vec<Vec3>, dir: &mut Vec3) -> bool {
match simplex.len() {
2 => process_line(simplex, dir),
3 => process_triangle(simplex, dir),
4 => process_tetrahedron(simplex, dir),
_ => false,
}
}
// Simplex is [B, A] where A is the most recently added point
fn process_line(simplex: &mut Vec<Vec3>, dir: &mut Vec3) -> bool {
let a = simplex[1];
let b = simplex[0];
let ab = b - a;
let ao = -a;
if ab.dot(ao) > 0.0 {
// Origin is in the region of the line AB
// Direction perpendicular to AB toward origin
let t = ab.cross(ao).cross(ab);
if t.length_squared() < 1e-20 {
// Origin is on the line — use any perpendicular
*dir = perpendicular_to(ab);
} else {
*dir = t;
}
} else {
// Origin is behind A, closest to A alone
*simplex = vec![a];
*dir = ao;
}
false
}
// Simplex is [C, B, A] where A is the most recently added
fn process_triangle(simplex: &mut Vec<Vec3>, dir: &mut Vec3) -> bool {
let a = simplex[2];
let b = simplex[1];
let c = simplex[0];
let ab = b - a;
let ac = c - a;
let ao = -a;
let abc = ab.cross(ac); // triangle normal
// Check if origin is outside edge AB (on the side away from C)
let ab_normal = ab.cross(abc); // perpendicular to AB, pointing away from triangle interior
if ab_normal.dot(ao) > 0.0 {
// Origin is outside AB edge
if ab.dot(ao) > 0.0 {
*simplex = vec![b, a];
*dir = ab.cross(ao).cross(ab);
if dir.length_squared() < 1e-20 {
*dir = perpendicular_to(ab);
}
} else {
*simplex = vec![a];
*dir = ao;
}
return false;
}
// Check if origin is outside edge AC (on the side away from B)
let ac_normal = abc.cross(ac); // perpendicular to AC, pointing away from triangle interior
if ac_normal.dot(ao) > 0.0 {
// Origin is outside AC edge
if ac.dot(ao) > 0.0 {
*simplex = vec![c, a];
*dir = ac.cross(ao).cross(ac);
if dir.length_squared() < 1e-20 {
*dir = perpendicular_to(ac);
}
} else {
*simplex = vec![a];
*dir = ao;
}
return false;
}
// Origin is within the triangle prism — check above or below
if abc.dot(ao) > 0.0 {
// Origin is above the triangle
*dir = abc;
} else {
// Origin is below the triangle — flip winding
*simplex = vec![b, c, a];
*dir = -abc;
}
false
}
// Simplex is [D, C, B, A] where A is the most recently added
fn process_tetrahedron(simplex: &mut Vec<Vec3>, dir: &mut Vec3) -> bool {
let a = simplex[3];
let b = simplex[2];
let c = simplex[1];
let d = simplex[0];
let ab = b - a;
let ac = c - a;
let ad = d - a;
let ao = -a;
// Face normals, oriented outward from the tetrahedron
let abc = ab.cross(ac);
let acd = ac.cross(ad);
let adb = ad.cross(ab);
// Ensure normals point outward: ABC normal should point away from D
let abc_out = if abc.dot(ad) < 0.0 { abc } else { -abc };
let acd_out = if acd.dot(ab) < 0.0 { acd } else { -acd };
let adb_out = if adb.dot(ac) < 0.0 { adb } else { -adb };
if abc_out.dot(ao) > 0.0 {
// Origin above face ABC — reduce to triangle ABC
*simplex = vec![c, b, a];
*dir = abc_out;
return false;
}
if acd_out.dot(ao) > 0.0 {
// Origin above face ACD — reduce to triangle ACD
*simplex = vec![d, c, a];
*dir = acd_out;
return false;
}
if adb_out.dot(ao) > 0.0 {
// Origin above face ADB — reduce to triangle ADB
*simplex = vec![b, d, a];
*dir = adb_out;
return false;
}
// Origin is inside the tetrahedron
true
}
fn perpendicular_to(v: Vec3) -> Vec3 {
let candidate = if v.x.abs() < 0.9 { Vec3::X } else { Vec3::Y };
v.cross(candidate)
}
// ==================== EPA ====================
const EPA_MAX_ITER: usize = 64;
const EPA_TOLERANCE: f32 = 0.0001;
#[derive(Clone)]
struct EpaFace {
indices: [usize; 3],
normal: Vec3,
distance: f32,
}
/// EPA: given a simplex containing the origin, find penetration info.
/// Returns (normal, depth, point_on_a, point_on_b).
pub fn epa(
a: &Collider, pos_a: Vec3,
b: &Collider, pos_b: Vec3,
simplex: &[Vec3],
) -> (Vec3, f32, Vec3, Vec3) {
let mut polytope: Vec<Vec3> = simplex.to_vec();
if polytope.len() < 4 {
let diff = pos_b - pos_a;
let len = diff.length();
let normal = if len > 1e-8 { diff * (1.0 / len) } else { Vec3::Y };
return (normal, 0.0, pos_a, pos_b);
}
// Build initial 4 faces of tetrahedron
let mut faces: Vec<EpaFace> = Vec::new();
let face_indices: [[usize; 3]; 4] = [
[0, 1, 2],
[0, 3, 1],
[0, 2, 3],
[1, 3, 2],
];
for indices in &face_indices {
let mut face = make_face(&polytope, *indices);
// Ensure outward-pointing normal (away from origin)
if face.distance < 0.0 {
face.indices.swap(0, 1);
face.normal = -face.normal;
face.distance = -face.distance;
}
if face.normal.length_squared() > 1e-10 {
faces.push(face);
}
}
let mut closest_face = EpaFace {
indices: [0, 0, 0],
normal: Vec3::Y,
distance: f32::MAX,
};
for _ in 0..EPA_MAX_ITER {
if faces.is_empty() {
break;
}
// Find closest face
let mut min_dist = f32::MAX;
let mut min_idx = 0;
for (i, face) in faces.iter().enumerate() {
if face.distance < min_dist {
min_dist = face.distance;
min_idx = i;
}
}
closest_face = faces[min_idx].clone();
let new_point = support_minkowski(a, pos_a, b, pos_b, closest_face.normal);
let new_dist = new_point.dot(closest_face.normal);
if new_dist - min_dist < EPA_TOLERANCE {
break;
}
// Add new point
let new_idx = polytope.len();
polytope.push(new_point);
// Find and remove visible faces, collect horizon edges
let mut edges: Vec<[usize; 2]> = Vec::new();
faces.retain(|face| {
let v = polytope[face.indices[0]];
if face.normal.dot(new_point - v) > 0.0 {
for i in 0..3 {
let edge = [face.indices[i], face.indices[(i + 1) % 3]];
let rev_idx = edges.iter().position(|e| e[0] == edge[1] && e[1] == edge[0]);
if let Some(idx) = rev_idx {
edges.remove(idx);
} else {
edges.push(edge);
}
}
false
} else {
true
}
});
// Create new faces from horizon edges
for edge in &edges {
let mut face = make_face(&polytope, [edge[0], edge[1], new_idx]);
if face.distance < 0.0 {
face.indices.swap(0, 1);
face.normal = -face.normal;
face.distance = -face.distance;
}
if face.normal.length_squared() > 1e-10 {
faces.push(face);
}
}
}
let normal = closest_face.normal;
let depth = closest_face.distance.max(0.0);
let point_on_a = support(a, pos_a, normal);
let point_on_b = support(b, pos_b, -normal);
(normal, depth, point_on_a, point_on_b)
}
fn make_face(polytope: &[Vec3], indices: [usize; 3]) -> EpaFace {
let a = polytope[indices[0]];
let b = polytope[indices[1]];
let c = polytope[indices[2]];
let ab = b - a;
let ac = c - a;
let mut normal = ab.cross(ac);
let len = normal.length();
if len > 1e-10 {
normal = normal * (1.0 / len);
} else {
normal = Vec3::Y;
}
let distance = normal.dot(a);
EpaFace {
indices,
normal,
distance,
}
}
/// Combined GJK + EPA: returns (normal, depth, point_on_a, point_on_b) or None.
pub fn gjk_epa(
a: &Collider, pos_a: Vec3,
b: &Collider, pos_b: Vec3,
) -> Option<(Vec3, f32, Vec3, Vec3)> {
let simplex = gjk(a, pos_a, b, pos_b)?;
Some(epa(a, pos_a, b, pos_b, &simplex))
}
#[cfg(test)]
mod tests {
use super::*;
fn approx(a: f32, b: f32, tol: f32) -> bool {
(a - b).abs() < tol
}
#[test]
fn test_gjk_spheres_overlapping() {
let a = Collider::Sphere { radius: 1.0 };
let b = Collider::Sphere { radius: 1.0 };
let pos_a = Vec3::ZERO;
let pos_b = Vec3::new(1.5, 0.0, 0.0);
let result = gjk_epa(&a, pos_a, &b, pos_b);
assert!(result.is_some());
let (normal, depth, _pa, _pb) = result.unwrap();
assert!(normal.x.abs() > 0.5);
assert!(approx(depth, 0.5, 0.1));
}
#[test]
fn test_gjk_spheres_separated() {
let a = Collider::Sphere { radius: 1.0 };
let b = Collider::Sphere { radius: 1.0 };
let pos_a = Vec3::ZERO;
let pos_b = Vec3::new(5.0, 0.0, 0.0);
let result = gjk(&a, pos_a, &b, pos_b);
assert!(result.is_none());
}
#[test]
fn test_gjk_capsule_vs_sphere_overlap() {
let a = Collider::Capsule { radius: 0.5, half_height: 1.0 };
let b = Collider::Sphere { radius: 1.0 };
let pos_a = Vec3::ZERO;
let pos_b = Vec3::new(1.0, 0.0, 0.0);
let result = gjk_epa(&a, pos_a, &b, pos_b);
assert!(result.is_some());
let (_normal, depth, _pa, _pb) = result.unwrap();
assert!(depth > 0.0);
}
#[test]
fn test_gjk_capsule_vs_sphere_separated() {
let a = Collider::Capsule { radius: 0.5, half_height: 1.0 };
let b = Collider::Sphere { radius: 0.5 };
let pos_a = Vec3::ZERO;
let pos_b = Vec3::new(5.0, 0.0, 0.0);
let result = gjk(&a, pos_a, &b, pos_b);
assert!(result.is_none());
}
#[test]
fn test_gjk_capsule_vs_box_overlap() {
let a = Collider::Capsule { radius: 0.5, half_height: 1.0 };
let b = Collider::Box { half_extents: Vec3::ONE };
let pos_a = Vec3::ZERO;
let pos_b = Vec3::new(1.0, 0.0, 0.0);
let result = gjk_epa(&a, pos_a, &b, pos_b);
assert!(result.is_some());
let (_normal, depth, _pa, _pb) = result.unwrap();
assert!(depth > 0.0);
}
#[test]
fn test_gjk_capsule_vs_capsule_overlap() {
let a = Collider::Capsule { radius: 0.5, half_height: 1.0 };
let b = Collider::Capsule { radius: 0.5, half_height: 1.0 };
let pos_a = Vec3::ZERO;
let pos_b = Vec3::new(0.5, 0.0, 0.0);
let result = gjk_epa(&a, pos_a, &b, pos_b);
assert!(result.is_some());
let (_normal, depth, _pa, _pb) = result.unwrap();
assert!(depth > 0.0);
}
#[test]
fn test_gjk_capsule_vs_capsule_separated() {
let a = Collider::Capsule { radius: 0.5, half_height: 1.0 };
let b = Collider::Capsule { radius: 0.5, half_height: 1.0 };
let pos_a = Vec3::ZERO;
let pos_b = Vec3::new(5.0, 0.0, 0.0);
let result = gjk(&a, pos_a, &b, pos_b);
assert!(result.is_none());
}
#[test]
fn test_support_sphere() {
let c = Collider::Sphere { radius: 2.0 };
let p = support(&c, Vec3::ZERO, Vec3::X);
assert!(approx(p.x, 2.0, 1e-5));
assert!(approx(p.y, 0.0, 1e-5));
assert!(approx(p.z, 0.0, 1e-5));
}
#[test]
fn test_support_box() {
let c = Collider::Box { half_extents: Vec3::new(1.0, 2.0, 3.0) };
let p = support(&c, Vec3::ZERO, Vec3::new(1.0, -1.0, 1.0));
assert!(approx(p.x, 1.0, 1e-5));
assert!(approx(p.y, -2.0, 1e-5));
assert!(approx(p.z, 3.0, 1e-5));
}
#[test]
fn test_support_capsule() {
let c = Collider::Capsule { radius: 0.5, half_height: 1.0 };
let p = support(&c, Vec3::ZERO, Vec3::Y);
assert!(approx(p.y, 1.5, 1e-5));
}
}

View File

@@ -0,0 +1,331 @@
use voltex_ecs::World;
use voltex_ecs::Transform;
use voltex_math::Vec3;
use crate::collider::Collider;
use crate::rigid_body::{RigidBody, PhysicsConfig, SLEEP_VELOCITY_THRESHOLD, SLEEP_TIME_THRESHOLD};
/// Compute diagonal inertia tensor for a collider shape.
/// Returns Vec3 where each component is the moment of inertia about that axis.
pub fn inertia_tensor(collider: &Collider, mass: f32) -> Vec3 {
match collider {
Collider::Sphere { radius } => {
let i = (2.0 / 5.0) * mass * radius * radius;
Vec3::new(i, i, i)
}
Collider::Box { half_extents } => {
let w = half_extents.x * 2.0;
let h = half_extents.y * 2.0;
let d = half_extents.z * 2.0;
let factor = mass / 12.0;
Vec3::new(
factor * (h * h + d * d),
factor * (w * w + d * d),
factor * (w * w + h * h),
)
}
Collider::Capsule { radius, half_height } => {
// Approximate as cylinder with total height = 2*half_height + 2*radius
let r = *radius;
let h = half_height * 2.0;
let ix = mass * (3.0 * r * r + h * h) / 12.0;
let iy = mass * r * r / 2.0;
let iz = ix;
Vec3::new(ix, iy, iz)
}
Collider::ConvexHull(hull) => {
// Approximate using AABB of the hull vertices
let mut min = hull.vertices[0];
let mut max = min;
for &v in &hull.vertices[1..] {
min = Vec3::new(min.x.min(v.x), min.y.min(v.y), min.z.min(v.z));
max = Vec3::new(max.x.max(v.x), max.y.max(v.y), max.z.max(v.z));
}
let size = max - min;
let w = size.x;
let h = size.y;
let d = size.z;
let factor = mass / 12.0;
Vec3::new(
factor * (h * h + d * d),
factor * (w * w + d * d),
factor * (w * w + h * h),
)
}
}
}
/// Compute inverse inertia (component-wise 1/I). Returns zero for zero-mass or zero-inertia.
pub fn inv_inertia(inertia: Vec3) -> Vec3 {
Vec3::new(
if inertia.x > 1e-10 { 1.0 / inertia.x } else { 0.0 },
if inertia.y > 1e-10 { 1.0 / inertia.y } else { 0.0 },
if inertia.z > 1e-10 { 1.0 / inertia.z } else { 0.0 },
)
}
pub fn integrate(world: &mut World, config: &PhysicsConfig) {
// 1. Collect linear + angular updates
let updates: Vec<(voltex_ecs::Entity, Vec3, Vec3, Vec3)> = world
.query2::<Transform, RigidBody>()
.into_iter()
.filter(|(_, _, rb)| !rb.is_static() && !rb.is_sleeping)
.map(|(entity, transform, rb)| {
let new_velocity = rb.velocity + config.gravity * rb.gravity_scale * config.fixed_dt;
let new_position = transform.position + new_velocity * config.fixed_dt;
let new_rotation = transform.rotation + rb.angular_velocity * config.fixed_dt;
(entity, new_velocity, new_position, new_rotation)
})
.collect();
// 2. Apply
for (entity, new_velocity, new_position, new_rotation) in updates {
if let Some(rb) = world.get_mut::<RigidBody>(entity) {
rb.velocity = new_velocity;
}
if let Some(t) = world.get_mut::<Transform>(entity) {
t.position = new_position;
t.rotation = new_rotation;
}
}
// 3. Update sleep timers
update_sleep_timers(world, config);
}
fn update_sleep_timers(world: &mut World, config: &PhysicsConfig) {
let sleep_updates: Vec<(voltex_ecs::Entity, bool, f32)> = world
.query::<RigidBody>()
.filter(|(_, rb)| !rb.is_static())
.map(|(entity, rb)| {
let speed = rb.velocity.length() + rb.angular_velocity.length();
if speed < SLEEP_VELOCITY_THRESHOLD {
let new_timer = rb.sleep_timer + config.fixed_dt;
let should_sleep = new_timer >= SLEEP_TIME_THRESHOLD;
(entity, should_sleep, new_timer)
} else {
(entity, false, 0.0)
}
})
.collect();
for (entity, should_sleep, timer) in sleep_updates {
if let Some(rb) = world.get_mut::<RigidBody>(entity) {
rb.sleep_timer = timer;
if should_sleep {
rb.is_sleeping = true;
rb.velocity = Vec3::ZERO;
rb.angular_velocity = Vec3::ZERO;
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use voltex_ecs::World;
use voltex_ecs::Transform;
use voltex_math::Vec3;
use crate::{RigidBody, Collider};
fn approx(a: f32, b: f32) -> bool {
(a - b).abs() < 1e-4
}
#[test]
fn test_gravity_fall() {
let mut world = World::new();
let e = world.spawn();
world.add(e, Transform::from_position(Vec3::new(0.0, 10.0, 0.0)));
world.add(e, RigidBody::dynamic(1.0));
let config = PhysicsConfig::default();
integrate(&mut world, &config);
let rb = world.get::<RigidBody>(e).unwrap();
let t = world.get::<Transform>(e).unwrap();
let expected_vy = -9.81 * config.fixed_dt;
assert!(approx(rb.velocity.y, expected_vy));
let expected_py = 10.0 + expected_vy * config.fixed_dt;
assert!(approx(t.position.y, expected_py));
}
#[test]
fn test_static_unchanged() {
let mut world = World::new();
let e = world.spawn();
world.add(e, Transform::from_position(Vec3::new(0.0, 5.0, 0.0)));
world.add(e, RigidBody::statik());
let config = PhysicsConfig::default();
integrate(&mut world, &config);
let t = world.get::<Transform>(e).unwrap();
assert!(approx(t.position.y, 5.0));
let rb = world.get::<RigidBody>(e).unwrap();
assert!(approx(rb.velocity.y, 0.0));
}
#[test]
fn test_initial_velocity() {
let mut world = World::new();
let e = world.spawn();
world.add(e, Transform::from_position(Vec3::ZERO));
let mut rb = RigidBody::dynamic(1.0);
rb.velocity = Vec3::new(5.0, 0.0, 0.0);
rb.gravity_scale = 0.0;
world.add(e, rb);
let config = PhysicsConfig::default();
integrate(&mut world, &config);
let t = world.get::<Transform>(e).unwrap();
let expected_x = 5.0 * config.fixed_dt;
assert!(approx(t.position.x, expected_x));
}
// --- Inertia tensor tests ---
#[test]
fn test_inertia_tensor_sphere() {
let c = Collider::Sphere { radius: 1.0 };
let i = inertia_tensor(&c, 1.0);
let expected = 2.0 / 5.0;
assert!(approx(i.x, expected));
assert!(approx(i.y, expected));
assert!(approx(i.z, expected));
}
#[test]
fn test_inertia_tensor_box() {
let c = Collider::Box { half_extents: Vec3::ONE };
let i = inertia_tensor(&c, 12.0);
// w=2, h=2, d=2, factor=1 => ix = 4+4=8, etc
assert!(approx(i.x, 8.0));
assert!(approx(i.y, 8.0));
assert!(approx(i.z, 8.0));
}
#[test]
fn test_inertia_tensor_capsule() {
let c = Collider::Capsule { radius: 0.5, half_height: 1.0 };
let i = inertia_tensor(&c, 1.0);
// Approximate as cylinder: r=0.5, h=2.0
// ix = m*(3*r^2 + h^2)/12 = (3*0.25 + 4)/12 = 4.75/12
assert!(approx(i.x, 4.75 / 12.0));
// iy = m*r^2/2 = 0.25/2 = 0.125
assert!(approx(i.y, 0.125));
}
// --- Angular velocity integration tests ---
#[test]
fn test_spinning_sphere() {
let mut world = World::new();
let e = world.spawn();
world.add(e, Transform::from_position(Vec3::ZERO));
let mut rb = RigidBody::dynamic(1.0);
rb.angular_velocity = Vec3::new(0.0, 3.14159, 0.0); // ~PI rad/s around Y
rb.gravity_scale = 0.0;
world.add(e, rb);
let config = PhysicsConfig::default();
integrate(&mut world, &config);
let t = world.get::<Transform>(e).unwrap();
let expected_rot_y = 3.14159 * config.fixed_dt;
assert!(approx(t.rotation.y, expected_rot_y));
// Position should not change (no linear velocity, no gravity)
assert!(approx(t.position.x, 0.0));
assert!(approx(t.position.y, 0.0));
}
#[test]
fn test_angular_velocity_persists() {
let mut world = World::new();
let e = world.spawn();
world.add(e, Transform::from_position(Vec3::ZERO));
let mut rb = RigidBody::dynamic(1.0);
rb.angular_velocity = Vec3::new(1.0, 0.0, 0.0);
rb.gravity_scale = 0.0;
world.add(e, rb);
let config = PhysicsConfig::default();
integrate(&mut world, &config);
integrate(&mut world, &config);
let t = world.get::<Transform>(e).unwrap();
let expected_rot_x = 2.0 * config.fixed_dt;
assert!(approx(t.rotation.x, expected_rot_x));
}
// --- Sleep system tests ---
#[test]
fn test_body_sleeps_after_resting() {
let mut world = World::new();
let e = world.spawn();
world.add(e, Transform::from_position(Vec3::ZERO));
let mut rb = RigidBody::dynamic(1.0);
rb.velocity = Vec3::ZERO;
rb.gravity_scale = 0.0;
world.add(e, rb);
let config = PhysicsConfig {
gravity: Vec3::ZERO,
fixed_dt: 1.0 / 60.0,
solver_iterations: 4,
};
// Integrate many times until sleep timer exceeds threshold
for _ in 0..60 {
integrate(&mut world, &config);
}
let rb = world.get::<RigidBody>(e).unwrap();
assert!(rb.is_sleeping, "body should be sleeping after resting");
}
#[test]
fn test_sleeping_body_not_integrated() {
let mut world = World::new();
let e = world.spawn();
world.add(e, Transform::from_position(Vec3::new(0.0, 10.0, 0.0)));
let mut rb = RigidBody::dynamic(1.0);
rb.is_sleeping = true;
world.add(e, rb);
let config = PhysicsConfig::default();
integrate(&mut world, &config);
let t = world.get::<Transform>(e).unwrap();
assert!(approx(t.position.y, 10.0), "sleeping body should not move");
}
#[test]
fn test_moving_body_does_not_sleep() {
let mut world = World::new();
let e = world.spawn();
world.add(e, Transform::from_position(Vec3::ZERO));
let mut rb = RigidBody::dynamic(1.0);
rb.velocity = Vec3::new(5.0, 0.0, 0.0);
rb.gravity_scale = 0.0;
world.add(e, rb);
let config = PhysicsConfig {
gravity: Vec3::ZERO,
fixed_dt: 1.0 / 60.0,
solver_iterations: 4,
};
for _ in 0..60 {
integrate(&mut world, &config);
}
let rb = world.get::<RigidBody>(e).unwrap();
assert!(!rb.is_sleeping, "fast-moving body should not sleep");
}
}

View File

@@ -0,0 +1,25 @@
pub mod bvh;
pub mod ray;
pub mod collider;
pub mod contact;
pub mod narrow;
pub mod gjk;
pub mod collision;
pub mod rigid_body;
pub mod integrator;
pub mod solver;
pub mod raycast;
pub mod ccd;
pub mod mesh_collider;
pub use bvh::BvhTree;
pub use collider::{Collider, ConvexHull};
pub use contact::ContactPoint;
pub use collision::detect_collisions;
pub use rigid_body::{RigidBody, PhysicsConfig};
pub use integrator::{integrate, inertia_tensor, inv_inertia};
pub use solver::{resolve_collisions, physics_step};
pub use raycast::{RayHit, raycast, raycast_all};
pub use ray::ray_vs_triangle;
pub use ccd::swept_sphere_vs_aabb;
pub use mesh_collider::{MeshCollider, MeshHit, ray_vs_mesh, ray_vs_mesh_all};

View File

@@ -0,0 +1,143 @@
use voltex_math::{Vec3, Ray};
use crate::ray::ray_vs_triangle;
/// A triangle mesh collider defined by vertices and triangle indices.
#[derive(Debug, Clone)]
pub struct MeshCollider {
pub vertices: Vec<Vec3>,
pub indices: Vec<[u32; 3]>,
}
/// Result of a ray-mesh intersection test.
#[derive(Debug, Clone, Copy)]
pub struct MeshHit {
pub distance: f32,
pub point: Vec3,
pub normal: Vec3,
pub triangle_index: usize,
}
/// Cast a ray against a triangle mesh. Returns the closest hit, if any.
pub fn ray_vs_mesh(ray: &Ray, mesh: &MeshCollider) -> Option<MeshHit> {
let mut closest: Option<MeshHit> = None;
for (i, tri) in mesh.indices.iter().enumerate() {
let v0 = mesh.vertices[tri[0] as usize];
let v1 = mesh.vertices[tri[1] as usize];
let v2 = mesh.vertices[tri[2] as usize];
if let Some((t, normal)) = ray_vs_triangle(ray, v0, v1, v2) {
let is_closer = closest.as_ref().map_or(true, |c| t < c.distance);
if is_closer {
let point = ray.at(t);
closest = Some(MeshHit {
distance: t,
point,
normal,
triangle_index: i,
});
}
}
}
closest
}
/// Cast a ray against a triangle mesh. Returns all hits sorted by distance.
pub fn ray_vs_mesh_all(ray: &Ray, mesh: &MeshCollider) -> Vec<MeshHit> {
let mut hits = Vec::new();
for (i, tri) in mesh.indices.iter().enumerate() {
let v0 = mesh.vertices[tri[0] as usize];
let v1 = mesh.vertices[tri[1] as usize];
let v2 = mesh.vertices[tri[2] as usize];
if let Some((t, normal)) = ray_vs_triangle(ray, v0, v1, v2) {
let point = ray.at(t);
hits.push(MeshHit {
distance: t,
point,
normal,
triangle_index: i,
});
}
}
hits.sort_by(|a, b| a.distance.partial_cmp(&b.distance).unwrap());
hits
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_ray_vs_mesh_hit() {
// Simple quad (2 triangles)
let mesh = MeshCollider {
vertices: vec![
Vec3::new(-1.0, 0.0, -1.0),
Vec3::new(1.0, 0.0, -1.0),
Vec3::new(1.0, 0.0, 1.0),
Vec3::new(-1.0, 0.0, 1.0),
],
indices: vec![[0, 1, 2], [0, 2, 3]],
};
let ray = Ray::new(Vec3::new(0.0, 1.0, 0.0), Vec3::new(0.0, -1.0, 0.0));
let hit = ray_vs_mesh(&ray, &mesh);
assert!(hit.is_some());
assert!((hit.unwrap().distance - 1.0).abs() < 0.01);
}
#[test]
fn test_ray_vs_mesh_miss() {
let mesh = MeshCollider {
vertices: vec![
Vec3::new(-1.0, 0.0, -1.0),
Vec3::new(1.0, 0.0, -1.0),
Vec3::new(0.0, 0.0, 1.0),
],
indices: vec![[0, 1, 2]],
};
let ray = Ray::new(Vec3::new(5.0, 1.0, 0.0), Vec3::new(0.0, -1.0, 0.0));
assert!(ray_vs_mesh(&ray, &mesh).is_none());
}
#[test]
fn test_ray_vs_mesh_closest() {
// Two triangles at different heights
let mesh = MeshCollider {
vertices: vec![
Vec3::new(-1.0, 0.0, -1.0),
Vec3::new(1.0, 0.0, -1.0),
Vec3::new(0.0, 0.0, 1.0),
Vec3::new(-1.0, 2.0, -1.0),
Vec3::new(1.0, 2.0, -1.0),
Vec3::new(0.0, 2.0, 1.0),
],
indices: vec![[0, 1, 2], [3, 4, 5]],
};
let ray = Ray::new(Vec3::new(0.0, 5.0, 0.0), Vec3::new(0.0, -1.0, 0.0));
let hit = ray_vs_mesh(&ray, &mesh).unwrap();
assert!((hit.distance - 3.0).abs() < 0.01); // hits y=2 first
}
#[test]
fn test_ray_vs_mesh_all() {
let mesh = MeshCollider {
vertices: vec![
Vec3::new(-1.0, 0.0, -1.0),
Vec3::new(1.0, 0.0, -1.0),
Vec3::new(0.0, 0.0, 1.0),
Vec3::new(-1.0, 2.0, -1.0),
Vec3::new(1.0, 2.0, -1.0),
Vec3::new(0.0, 2.0, 1.0),
],
indices: vec![[0, 1, 2], [3, 4, 5]],
};
let ray = Ray::new(Vec3::new(0.0, 5.0, 0.0), Vec3::new(0.0, -1.0, 0.0));
let hits = ray_vs_mesh_all(&ray, &mesh);
assert_eq!(hits.len(), 2);
assert!(hits[0].distance < hits[1].distance); // sorted
}
}

View File

@@ -0,0 +1,236 @@
use voltex_math::Vec3;
/// Returns (normal A→B, depth, point_on_a, point_on_b) or None if no collision.
pub fn sphere_vs_sphere(
pos_a: Vec3, radius_a: f32,
pos_b: Vec3, radius_b: f32,
) -> Option<(Vec3, f32, Vec3, Vec3)> {
let diff = pos_b - pos_a;
let dist_sq = diff.length_squared();
let sum_r = radius_a + radius_b;
if dist_sq > sum_r * sum_r {
return None;
}
let dist = dist_sq.sqrt();
let normal = if dist > 1e-8 {
diff * (1.0 / dist)
} else {
Vec3::Y
};
let depth = sum_r - dist;
let point_on_a = pos_a + normal * radius_a;
let point_on_b = pos_b - normal * radius_b;
Some((normal, depth, point_on_a, point_on_b))
}
pub fn sphere_vs_box(
sphere_pos: Vec3, radius: f32,
box_pos: Vec3, half_extents: Vec3,
) -> Option<(Vec3, f32, Vec3, Vec3)> {
let bmin = box_pos - half_extents;
let bmax = box_pos + half_extents;
let closest = Vec3::new(
sphere_pos.x.clamp(bmin.x, bmax.x),
sphere_pos.y.clamp(bmin.y, bmax.y),
sphere_pos.z.clamp(bmin.z, bmax.z),
);
let diff = sphere_pos - closest;
let dist_sq = diff.length_squared();
// Sphere center outside box
if dist_sq > 1e-8 {
let dist = dist_sq.sqrt();
if dist > radius {
return None;
}
let normal = diff * (-1.0 / dist); // sphere→box direction
let depth = radius - dist;
let point_on_a = sphere_pos + normal * radius; // sphere surface toward box
let point_on_b = closest;
return Some((normal, depth, point_on_a, point_on_b));
}
// Sphere center inside box — find nearest face
let dx_min = sphere_pos.x - bmin.x;
let dx_max = bmax.x - sphere_pos.x;
let dy_min = sphere_pos.y - bmin.y;
let dy_max = bmax.y - sphere_pos.y;
let dz_min = sphere_pos.z - bmin.z;
let dz_max = bmax.z - sphere_pos.z;
let mut min_dist = dx_min;
let mut normal = Vec3::new(-1.0, 0.0, 0.0);
let mut closest_face = Vec3::new(bmin.x, sphere_pos.y, sphere_pos.z);
if dx_max < min_dist {
min_dist = dx_max;
normal = Vec3::new(1.0, 0.0, 0.0);
closest_face = Vec3::new(bmax.x, sphere_pos.y, sphere_pos.z);
}
if dy_min < min_dist {
min_dist = dy_min;
normal = Vec3::new(0.0, -1.0, 0.0);
closest_face = Vec3::new(sphere_pos.x, bmin.y, sphere_pos.z);
}
if dy_max < min_dist {
min_dist = dy_max;
normal = Vec3::new(0.0, 1.0, 0.0);
closest_face = Vec3::new(sphere_pos.x, bmax.y, sphere_pos.z);
}
if dz_min < min_dist {
min_dist = dz_min;
normal = Vec3::new(0.0, 0.0, -1.0);
closest_face = Vec3::new(sphere_pos.x, sphere_pos.y, bmin.z);
}
if dz_max < min_dist {
min_dist = dz_max;
normal = Vec3::new(0.0, 0.0, 1.0);
closest_face = Vec3::new(sphere_pos.x, sphere_pos.y, bmax.z);
}
let depth = min_dist + radius;
let point_on_a = sphere_pos + normal * radius;
let point_on_b = closest_face;
Some((normal, depth, point_on_a, point_on_b))
}
pub fn box_vs_box(
pos_a: Vec3, half_a: Vec3,
pos_b: Vec3, half_b: Vec3,
) -> Option<(Vec3, f32, Vec3, Vec3)> {
let diff = pos_b - pos_a;
let overlap_x = (half_a.x + half_b.x) - diff.x.abs();
if overlap_x < 0.0 { return None; }
let overlap_y = (half_a.y + half_b.y) - diff.y.abs();
if overlap_y < 0.0 { return None; }
let overlap_z = (half_a.z + half_b.z) - diff.z.abs();
if overlap_z < 0.0 { return None; }
let (normal, depth) = if overlap_x <= overlap_y && overlap_x <= overlap_z {
let sign = if diff.x >= 0.0 { 1.0 } else { -1.0 };
(Vec3::new(sign, 0.0, 0.0), overlap_x)
} else if overlap_y <= overlap_z {
let sign = if diff.y >= 0.0 { 1.0 } else { -1.0 };
(Vec3::new(0.0, sign, 0.0), overlap_y)
} else {
let sign = if diff.z >= 0.0 { 1.0 } else { -1.0 };
(Vec3::new(0.0, 0.0, sign), overlap_z)
};
let point_on_a = pos_a + Vec3::new(
normal.x * half_a.x,
normal.y * half_a.y,
normal.z * half_a.z,
);
let point_on_b = pos_b - Vec3::new(
normal.x * half_b.x,
normal.y * half_b.y,
normal.z * half_b.z,
);
Some((normal, depth, point_on_a, point_on_b))
}
#[cfg(test)]
mod tests {
use super::*;
fn approx(a: f32, b: f32) -> bool { (a - b).abs() < 1e-5 }
fn approx_vec(a: Vec3, b: Vec3) -> bool { approx(a.x, b.x) && approx(a.y, b.y) && approx(a.z, b.z) }
// sphere_vs_sphere tests
#[test]
fn test_sphere_sphere_separated() {
let r = sphere_vs_sphere(Vec3::ZERO, 1.0, Vec3::new(5.0, 0.0, 0.0), 1.0);
assert!(r.is_none());
}
#[test]
fn test_sphere_sphere_overlapping() {
let r = sphere_vs_sphere(Vec3::ZERO, 1.0, Vec3::new(1.5, 0.0, 0.0), 1.0);
let (normal, depth, pa, pb) = r.unwrap();
assert!(approx_vec(normal, Vec3::X));
assert!(approx(depth, 0.5));
assert!(approx_vec(pa, Vec3::new(1.0, 0.0, 0.0)));
assert!(approx_vec(pb, Vec3::new(0.5, 0.0, 0.0)));
}
#[test]
fn test_sphere_sphere_touching() {
let r = sphere_vs_sphere(Vec3::ZERO, 1.0, Vec3::new(2.0, 0.0, 0.0), 1.0);
let (normal, depth, _pa, _pb) = r.unwrap();
assert!(approx_vec(normal, Vec3::X));
assert!(approx(depth, 0.0));
}
#[test]
fn test_sphere_sphere_coincident() {
let r = sphere_vs_sphere(Vec3::ZERO, 1.0, Vec3::ZERO, 1.0);
let (_normal, depth, _pa, _pb) = r.unwrap();
assert!(approx(depth, 2.0));
}
// sphere_vs_box tests
#[test]
fn test_sphere_box_separated() {
let r = sphere_vs_box(Vec3::new(5.0, 0.0, 0.0), 1.0, Vec3::ZERO, Vec3::ONE);
assert!(r.is_none());
}
#[test]
fn test_sphere_box_face_overlap() {
let r = sphere_vs_box(Vec3::new(1.5, 0.0, 0.0), 1.0, Vec3::ZERO, Vec3::ONE);
let (normal, depth, _pa, pb) = r.unwrap();
assert!(approx(normal.x, -1.0));
assert!(approx(depth, 0.5));
assert!(approx(pb.x, 1.0));
}
#[test]
fn test_sphere_box_center_inside() {
let r = sphere_vs_box(Vec3::ZERO, 0.5, Vec3::ZERO, Vec3::ONE);
assert!(r.is_some());
let (_normal, depth, _pa, _pb) = r.unwrap();
assert!(depth > 0.0);
}
// box_vs_box tests
#[test]
fn test_box_box_separated() {
let r = box_vs_box(Vec3::ZERO, Vec3::ONE, Vec3::new(5.0, 0.0, 0.0), Vec3::ONE);
assert!(r.is_none());
}
#[test]
fn test_box_box_overlapping() {
let r = box_vs_box(Vec3::ZERO, Vec3::ONE, Vec3::new(1.5, 0.0, 0.0), Vec3::ONE);
let (normal, depth, _pa, _pb) = r.unwrap();
assert!(approx_vec(normal, Vec3::X));
assert!(approx(depth, 0.5));
}
#[test]
fn test_box_box_touching() {
let r = box_vs_box(Vec3::ZERO, Vec3::ONE, Vec3::new(2.0, 0.0, 0.0), Vec3::ONE);
let (_normal, depth, _pa, _pb) = r.unwrap();
assert!(approx(depth, 0.0));
}
#[test]
fn test_box_box_y_axis() {
let r = box_vs_box(Vec3::ZERO, Vec3::ONE, Vec3::new(0.0, 1.5, 0.0), Vec3::ONE);
let (normal, depth, _pa, _pb) = r.unwrap();
assert!(approx_vec(normal, Vec3::Y));
assert!(approx(depth, 0.5));
}
}

View File

@@ -0,0 +1,362 @@
use voltex_math::{Vec3, Ray, AABB};
/// Ray vs AABB (slab method). Returns t of nearest intersection, or None.
/// If ray starts inside AABB, returns Some(0.0).
pub fn ray_vs_aabb(ray: &Ray, aabb: &AABB) -> Option<f32> {
let mut t_min = f32::NEG_INFINITY;
let mut t_max = f32::INFINITY;
let o = [ray.origin.x, ray.origin.y, ray.origin.z];
let d = [ray.direction.x, ray.direction.y, ray.direction.z];
let bmin = [aabb.min.x, aabb.min.y, aabb.min.z];
let bmax = [aabb.max.x, aabb.max.y, aabb.max.z];
for i in 0..3 {
if d[i].abs() < 1e-8 {
if o[i] < bmin[i] || o[i] > bmax[i] {
return None;
}
} else {
let inv_d = 1.0 / d[i];
let mut t1 = (bmin[i] - o[i]) * inv_d;
let mut t2 = (bmax[i] - o[i]) * inv_d;
if t1 > t2 {
std::mem::swap(&mut t1, &mut t2);
}
t_min = t_min.max(t1);
t_max = t_max.min(t2);
if t_min > t_max {
return None;
}
}
}
if t_max < 0.0 {
return None;
}
Some(t_min.max(0.0))
}
/// Ray vs Sphere. Returns (t, normal) or None.
pub fn ray_vs_sphere(ray: &Ray, center: Vec3, radius: f32) -> Option<(f32, Vec3)> {
let oc = ray.origin - center;
let a = ray.direction.dot(ray.direction);
let b = 2.0 * oc.dot(ray.direction);
let c = oc.dot(oc) - radius * radius;
let discriminant = b * b - 4.0 * a * c;
if discriminant < 0.0 {
return None;
}
let sqrt_d = discriminant.sqrt();
let mut t = (-b - sqrt_d) / (2.0 * a);
if t < 0.0 {
t = (-b + sqrt_d) / (2.0 * a);
if t < 0.0 {
return None;
}
}
let point = ray.at(t);
let normal = (point - center).normalize();
Some((t, normal))
}
/// Ray vs axis-aligned Box. Returns (t, normal) or None.
pub fn ray_vs_box(ray: &Ray, center: Vec3, half_extents: Vec3) -> Option<(f32, Vec3)> {
let aabb = AABB::from_center_half_extents(center, half_extents);
let t = ray_vs_aabb(ray, &aabb)?;
if t == 0.0 {
// Ray starts inside box
let bmin = aabb.min;
let bmax = aabb.max;
let p = ray.origin;
let faces: [(f32, Vec3); 6] = [
(p.x - bmin.x, Vec3::new(-1.0, 0.0, 0.0)),
(bmax.x - p.x, Vec3::new(1.0, 0.0, 0.0)),
(p.y - bmin.y, Vec3::new(0.0, -1.0, 0.0)),
(bmax.y - p.y, Vec3::new(0.0, 1.0, 0.0)),
(p.z - bmin.z, Vec3::new(0.0, 0.0, -1.0)),
(bmax.z - p.z, Vec3::new(0.0, 0.0, 1.0)),
];
let mut min_dist = f32::INFINITY;
let mut normal = Vec3::Y;
for (dist, n) in &faces {
if *dist < min_dist {
min_dist = *dist;
normal = *n;
}
}
return Some((0.0, normal));
}
let hit = ray.at(t);
let rel = hit - center;
let hx = half_extents.x;
let hy = half_extents.y;
let hz = half_extents.z;
let normal = if (rel.x - hx).abs() < 1e-4 {
Vec3::X
} else if (rel.x + hx).abs() < 1e-4 {
Vec3::new(-1.0, 0.0, 0.0)
} else if (rel.y - hy).abs() < 1e-4 {
Vec3::Y
} else if (rel.y + hy).abs() < 1e-4 {
Vec3::new(0.0, -1.0, 0.0)
} else if (rel.z - hz).abs() < 1e-4 {
Vec3::Z
} else {
Vec3::new(0.0, 0.0, -1.0)
};
Some((t, normal))
}
/// Ray vs Capsule (Y-axis aligned). Returns (t, normal) or None.
/// Capsule = cylinder of given half_height along Y + hemisphere caps of given radius.
pub fn ray_vs_capsule(ray: &Ray, center: Vec3, radius: f32, half_height: f32) -> Option<(f32, Vec3)> {
// Test the two hemisphere caps
let top_center = Vec3::new(center.x, center.y + half_height, center.z);
let bot_center = Vec3::new(center.x, center.y - half_height, center.z);
let mut best: Option<(f32, Vec3)> = None;
// Test infinite cylinder (ignore Y component for the 2D circle test)
// Ray in XZ plane relative to capsule center
let ox = ray.origin.x - center.x;
let oz = ray.origin.z - center.z;
let dx = ray.direction.x;
let dz = ray.direction.z;
let a = dx * dx + dz * dz;
let b = 2.0 * (ox * dx + oz * dz);
let c = ox * ox + oz * oz - radius * radius;
if a > 1e-10 {
let disc = b * b - 4.0 * a * c;
if disc >= 0.0 {
let sqrt_d = disc.sqrt();
for sign in [-1.0f32, 1.0] {
let t = (-b + sign * sqrt_d) / (2.0 * a);
if t >= 0.0 {
let hit_y = ray.origin.y + ray.direction.y * t;
// Check if hit is within the cylinder portion
if hit_y >= center.y - half_height && hit_y <= center.y + half_height {
let hit = ray.at(t);
let normal = Vec3::new(hit.x - center.x, 0.0, hit.z - center.z).normalize();
if best.is_none() || t < best.unwrap().0 {
best = Some((t, normal));
}
}
}
}
}
}
// Test top hemisphere
if let Some((t, normal)) = ray_vs_sphere(ray, top_center, radius) {
let hit = ray.at(t);
if hit.y >= center.y + half_height {
if best.is_none() || t < best.unwrap().0 {
best = Some((t, normal));
}
}
}
// Test bottom hemisphere
if let Some((t, normal)) = ray_vs_sphere(ray, bot_center, radius) {
let hit = ray.at(t);
if hit.y <= center.y - half_height {
if best.is_none() || t < best.unwrap().0 {
best = Some((t, normal));
}
}
}
best
}
/// Ray vs Triangle (MöllerTrumbore algorithm).
/// Returns (t, normal) where normal is the triangle face normal.
pub fn ray_vs_triangle(ray: &Ray, v0: Vec3, v1: Vec3, v2: Vec3) -> Option<(f32, Vec3)> {
let edge1 = v1 - v0;
let edge2 = v2 - v0;
let h = ray.direction.cross(edge2);
let a = edge1.dot(h);
if a.abs() < 1e-8 {
return None; // Ray is parallel to triangle
}
let f = 1.0 / a;
let s = ray.origin - v0;
let u = f * s.dot(h);
if u < 0.0 || u > 1.0 {
return None;
}
let q = s.cross(edge1);
let v = f * ray.direction.dot(q);
if v < 0.0 || u + v > 1.0 {
return None;
}
let t = f * edge2.dot(q);
if t < 0.0 {
return None; // Triangle is behind the ray
}
let normal = edge1.cross(edge2).normalize();
Some((t, normal))
}
#[cfg(test)]
mod tests {
use super::*;
fn approx(a: f32, b: f32) -> bool {
(a - b).abs() < 1e-4
}
fn approx_vec(a: Vec3, b: Vec3) -> bool {
approx(a.x, b.x) && approx(a.y, b.y) && approx(a.z, b.z)
}
#[test]
fn test_aabb_hit() {
let ray = Ray::new(Vec3::new(-5.0, 0.0, 0.0), Vec3::X);
let aabb = AABB::new(Vec3::new(-1.0, -1.0, -1.0), Vec3::ONE);
let t = ray_vs_aabb(&ray, &aabb).unwrap();
assert!(approx(t, 4.0));
}
#[test]
fn test_aabb_miss() {
let ray = Ray::new(Vec3::new(-5.0, 5.0, 0.0), Vec3::X);
let aabb = AABB::new(Vec3::new(-1.0, -1.0, -1.0), Vec3::ONE);
assert!(ray_vs_aabb(&ray, &aabb).is_none());
}
#[test]
fn test_aabb_inside() {
let ray = Ray::new(Vec3::ZERO, Vec3::X);
let aabb = AABB::new(Vec3::new(-1.0, -1.0, -1.0), Vec3::ONE);
let t = ray_vs_aabb(&ray, &aabb).unwrap();
assert!(approx(t, 0.0));
}
#[test]
fn test_sphere_hit() {
let ray = Ray::new(Vec3::new(-5.0, 0.0, 0.0), Vec3::X);
let (t, normal) = ray_vs_sphere(&ray, Vec3::ZERO, 1.0).unwrap();
assert!(approx(t, 4.0));
assert!(approx_vec(normal, Vec3::new(-1.0, 0.0, 0.0)));
}
#[test]
fn test_sphere_miss() {
let ray = Ray::new(Vec3::new(-5.0, 5.0, 0.0), Vec3::X);
assert!(ray_vs_sphere(&ray, Vec3::ZERO, 1.0).is_none());
}
#[test]
fn test_sphere_tangent() {
let ray = Ray::new(Vec3::new(-5.0, 1.0, 0.0), Vec3::X);
assert!(ray_vs_sphere(&ray, Vec3::ZERO, 1.0).is_some());
}
#[test]
fn test_sphere_inside() {
let ray = Ray::new(Vec3::ZERO, Vec3::X);
let (t, _normal) = ray_vs_sphere(&ray, Vec3::ZERO, 2.0).unwrap();
assert!(approx(t, 2.0));
}
#[test]
fn test_box_hit_face() {
let ray = Ray::new(Vec3::new(-5.0, 0.0, 0.0), Vec3::X);
let (t, normal) = ray_vs_box(&ray, Vec3::ZERO, Vec3::ONE).unwrap();
assert!(approx(t, 4.0));
assert!(approx_vec(normal, Vec3::new(-1.0, 0.0, 0.0)));
}
#[test]
fn test_box_miss() {
let ray = Ray::new(Vec3::new(-5.0, 5.0, 0.0), Vec3::X);
assert!(ray_vs_box(&ray, Vec3::ZERO, Vec3::ONE).is_none());
}
#[test]
fn test_box_inside() {
let ray = Ray::new(Vec3::ZERO, Vec3::X);
let (t, _normal) = ray_vs_box(&ray, Vec3::ZERO, Vec3::ONE).unwrap();
assert!(approx(t, 0.0));
}
// --- ray_vs_triangle tests ---
#[test]
fn test_triangle_hit() {
let v0 = Vec3::new(-1.0, -1.0, 5.0);
let v1 = Vec3::new(1.0, -1.0, 5.0);
let v2 = Vec3::new(0.0, 1.0, 5.0);
let ray = Ray::new(Vec3::ZERO, Vec3::Z);
let (t, normal) = ray_vs_triangle(&ray, v0, v1, v2).unwrap();
assert!(approx(t, 5.0));
// Normal should point toward -Z (facing the ray)
assert!(normal.z.abs() > 0.9);
}
#[test]
fn test_triangle_miss() {
let v0 = Vec3::new(-1.0, -1.0, 5.0);
let v1 = Vec3::new(1.0, -1.0, 5.0);
let v2 = Vec3::new(0.0, 1.0, 5.0);
// Ray pointing away
let ray = Ray::new(Vec3::new(10.0, 10.0, 0.0), Vec3::Z);
assert!(ray_vs_triangle(&ray, v0, v1, v2).is_none());
}
#[test]
fn test_triangle_behind_ray() {
let v0 = Vec3::new(-1.0, -1.0, -5.0);
let v1 = Vec3::new(1.0, -1.0, -5.0);
let v2 = Vec3::new(0.0, 1.0, -5.0);
let ray = Ray::new(Vec3::ZERO, Vec3::Z);
assert!(ray_vs_triangle(&ray, v0, v1, v2).is_none());
}
#[test]
fn test_triangle_parallel() {
let v0 = Vec3::new(0.0, 0.0, 5.0);
let v1 = Vec3::new(1.0, 0.0, 5.0);
let v2 = Vec3::new(0.0, 0.0, 6.0);
// Ray parallel to triangle (in XZ plane)
let ray = Ray::new(Vec3::ZERO, Vec3::X);
assert!(ray_vs_triangle(&ray, v0, v1, v2).is_none());
}
#[test]
fn test_triangle_edge_hit() {
// Ray hitting exactly on an edge
let v0 = Vec3::new(-1.0, 0.0, 5.0);
let v1 = Vec3::new(1.0, 0.0, 5.0);
let v2 = Vec3::new(0.0, 2.0, 5.0);
// Hit on the midpoint of v0-v1 edge
let ray = Ray::new(Vec3::new(0.0, 0.0, 0.0), Vec3::Z);
let result = ray_vs_triangle(&ray, v0, v1, v2);
assert!(result.is_some());
let (t, _) = result.unwrap();
assert!(approx(t, 5.0));
}
}

View File

@@ -0,0 +1,289 @@
use voltex_ecs::{World, Entity};
use voltex_ecs::Transform;
use voltex_math::{Vec3, Ray};
use crate::collider::Collider;
use crate::ray as ray_tests;
#[derive(Debug, Clone, Copy)]
pub struct RayHit {
pub entity: Entity,
pub t: f32,
pub point: Vec3,
pub normal: Vec3,
}
pub fn raycast(world: &World, ray: &Ray, max_dist: f32) -> Option<RayHit> {
let entities: Vec<(Entity, Vec3, Collider)> = world
.query2::<Transform, Collider>()
.into_iter()
.map(|(e, t, c)| (e, t.position, c.clone()))
.collect();
if entities.is_empty() {
return None;
}
let mut closest: Option<RayHit> = None;
for (entity, pos, collider) in &entities {
let aabb = collider.aabb(*pos);
// Broad phase: ray vs AABB
let aabb_t = match ray_tests::ray_vs_aabb(ray, &aabb) {
Some(t) if t <= max_dist => t,
_ => continue,
};
// Early skip if we already have a closer hit
if let Some(ref hit) = closest {
if aabb_t >= hit.t {
continue;
}
}
// Narrow phase
let result = match collider {
Collider::Sphere { radius } => {
ray_tests::ray_vs_sphere(ray, *pos, *radius)
}
Collider::Box { half_extents } => {
ray_tests::ray_vs_box(ray, *pos, *half_extents)
}
Collider::Capsule { radius, half_height } => {
ray_tests::ray_vs_capsule(ray, *pos, *radius, *half_height)
}
Collider::ConvexHull(_) => {
// Use AABB test as approximation for convex hull raycasting
let aabb = collider.aabb(*pos);
ray_tests::ray_vs_aabb(ray, &aabb).map(|t| (t, Vec3::Y))
}
};
if let Some((t, normal)) = result {
if t <= max_dist {
if closest.is_none() || t < closest.as_ref().unwrap().t {
closest = Some(RayHit {
entity: *entity,
t,
point: ray.at(t),
normal,
});
}
}
}
}
closest
}
/// Cast a ray and return ALL hits sorted by distance.
pub fn raycast_all(world: &World, ray: &Ray, max_dist: f32) -> Vec<RayHit> {
let entities: Vec<(Entity, Vec3, Collider)> = world
.query2::<Transform, Collider>()
.into_iter()
.map(|(e, t, c)| (e, t.position, c.clone()))
.collect();
if entities.is_empty() {
return Vec::new();
}
let mut hits = Vec::new();
for (entity, pos, collider) in &entities {
let aabb = collider.aabb(*pos);
// Broad phase: ray vs AABB
match ray_tests::ray_vs_aabb(ray, &aabb) {
Some(t) if t <= max_dist => {}
_ => continue,
};
// Narrow phase
let result = match collider {
Collider::Sphere { radius } => {
ray_tests::ray_vs_sphere(ray, *pos, *radius)
}
Collider::Box { half_extents } => {
ray_tests::ray_vs_box(ray, *pos, *half_extents)
}
Collider::Capsule { radius, half_height } => {
ray_tests::ray_vs_capsule(ray, *pos, *radius, *half_height)
}
Collider::ConvexHull(_) => {
// Use AABB test as approximation for convex hull raycasting
let aabb = collider.aabb(*pos);
ray_tests::ray_vs_aabb(ray, &aabb).map(|t| (t, Vec3::Y))
}
};
if let Some((t, normal)) = result {
if t <= max_dist {
hits.push(RayHit {
entity: *entity,
t,
point: ray.at(t),
normal,
});
}
}
}
hits.sort_by(|a, b| a.t.partial_cmp(&b.t).unwrap());
hits
}
#[cfg(test)]
mod tests {
use super::*;
use voltex_ecs::World;
use voltex_ecs::Transform;
use voltex_math::Vec3;
use crate::Collider;
fn approx(a: f32, b: f32) -> bool {
(a - b).abs() < 1e-3
}
#[test]
fn test_empty_world() {
let world = World::new();
let ray = Ray::new(Vec3::ZERO, Vec3::X);
assert!(raycast(&world, &ray, 100.0).is_none());
}
#[test]
fn test_hit_sphere() {
let mut world = World::new();
let e = world.spawn();
world.add(e, Transform::from_position(Vec3::new(5.0, 0.0, 0.0)));
world.add(e, Collider::Sphere { radius: 1.0 });
let ray = Ray::new(Vec3::ZERO, Vec3::X);
let hit = raycast(&world, &ray, 100.0).unwrap();
assert_eq!(hit.entity, e);
assert!(approx(hit.t, 4.0));
assert!(approx(hit.point.x, 4.0));
}
#[test]
fn test_closest_of_multiple() {
let mut world = World::new();
let far = world.spawn();
world.add(far, Transform::from_position(Vec3::new(10.0, 0.0, 0.0)));
world.add(far, Collider::Sphere { radius: 1.0 });
let near = world.spawn();
world.add(near, Transform::from_position(Vec3::new(3.0, 0.0, 0.0)));
world.add(near, Collider::Sphere { radius: 1.0 });
let ray = Ray::new(Vec3::ZERO, Vec3::X);
let hit = raycast(&world, &ray, 100.0).unwrap();
assert_eq!(hit.entity, near);
assert!(approx(hit.t, 2.0));
}
#[test]
fn test_max_dist() {
let mut world = World::new();
let e = world.spawn();
world.add(e, Transform::from_position(Vec3::new(50.0, 0.0, 0.0)));
world.add(e, Collider::Sphere { radius: 1.0 });
let ray = Ray::new(Vec3::ZERO, Vec3::X);
assert!(raycast(&world, &ray, 10.0).is_none());
}
#[test]
fn test_hit_box() {
let mut world = World::new();
let e = world.spawn();
world.add(e, Transform::from_position(Vec3::new(5.0, 0.0, 0.0)));
world.add(e, Collider::Box { half_extents: Vec3::ONE });
let ray = Ray::new(Vec3::ZERO, Vec3::X);
let hit = raycast(&world, &ray, 100.0).unwrap();
assert_eq!(hit.entity, e);
assert!(approx(hit.t, 4.0));
}
// --- raycast_all tests ---
#[test]
fn test_raycast_all_multiple_hits() {
let mut world = World::new();
let near = world.spawn();
world.add(near, Transform::from_position(Vec3::new(3.0, 0.0, 0.0)));
world.add(near, Collider::Sphere { radius: 0.5 });
let mid = world.spawn();
world.add(mid, Transform::from_position(Vec3::new(6.0, 0.0, 0.0)));
world.add(mid, Collider::Sphere { radius: 0.5 });
let far = world.spawn();
world.add(far, Transform::from_position(Vec3::new(10.0, 0.0, 0.0)));
world.add(far, Collider::Sphere { radius: 0.5 });
let ray = Ray::new(Vec3::ZERO, Vec3::X);
let hits = raycast_all(&world, &ray, 100.0);
assert_eq!(hits.len(), 3);
assert_eq!(hits[0].entity, near);
assert_eq!(hits[1].entity, mid);
assert_eq!(hits[2].entity, far);
assert!(hits[0].t < hits[1].t);
assert!(hits[1].t < hits[2].t);
}
#[test]
fn test_raycast_all_empty() {
let world = World::new();
let ray = Ray::new(Vec3::ZERO, Vec3::X);
let hits = raycast_all(&world, &ray, 100.0);
assert!(hits.is_empty());
}
#[test]
fn test_raycast_all_max_dist() {
let mut world = World::new();
let near = world.spawn();
world.add(near, Transform::from_position(Vec3::new(3.0, 0.0, 0.0)));
world.add(near, Collider::Sphere { radius: 0.5 });
let far = world.spawn();
world.add(far, Transform::from_position(Vec3::new(50.0, 0.0, 0.0)));
world.add(far, Collider::Sphere { radius: 0.5 });
let ray = Ray::new(Vec3::ZERO, Vec3::X);
let hits = raycast_all(&world, &ray, 10.0);
assert_eq!(hits.len(), 1);
assert_eq!(hits[0].entity, near);
}
#[test]
fn test_mixed_sphere_box() {
let mut world = World::new();
let sphere = world.spawn();
world.add(sphere, Transform::from_position(Vec3::new(10.0, 0.0, 0.0)));
world.add(sphere, Collider::Sphere { radius: 1.0 });
let box_e = world.spawn();
world.add(box_e, Transform::from_position(Vec3::new(3.0, 0.0, 0.0)));
world.add(box_e, Collider::Box { half_extents: Vec3::ONE });
let ray = Ray::new(Vec3::ZERO, Vec3::X);
let hit = raycast(&world, &ray, 100.0).unwrap();
assert_eq!(hit.entity, box_e);
assert!(approx(hit.t, 2.0));
}
}

View File

@@ -0,0 +1,119 @@
use voltex_math::Vec3;
pub const SLEEP_VELOCITY_THRESHOLD: f32 = 0.01;
pub const SLEEP_TIME_THRESHOLD: f32 = 0.5;
#[derive(Debug, Clone, Copy)]
pub struct RigidBody {
pub velocity: Vec3,
pub angular_velocity: Vec3,
pub mass: f32,
pub restitution: f32,
pub gravity_scale: f32,
pub friction: f32, // Coulomb friction coefficient, default 0.5
pub is_sleeping: bool,
pub sleep_timer: f32,
}
impl RigidBody {
pub fn dynamic(mass: f32) -> Self {
Self {
velocity: Vec3::ZERO,
angular_velocity: Vec3::ZERO,
mass,
restitution: 0.3,
gravity_scale: 1.0,
friction: 0.5,
is_sleeping: false,
sleep_timer: 0.0,
}
}
pub fn statik() -> Self {
Self {
velocity: Vec3::ZERO,
angular_velocity: Vec3::ZERO,
mass: 0.0,
restitution: 0.3,
gravity_scale: 0.0,
friction: 0.5,
is_sleeping: false,
sleep_timer: 0.0,
}
}
pub fn inv_mass(&self) -> f32 {
if self.mass == 0.0 { 0.0 } else { 1.0 / self.mass }
}
pub fn is_static(&self) -> bool {
self.mass == 0.0
}
/// Wake this body from sleep.
pub fn wake(&mut self) {
self.is_sleeping = false;
self.sleep_timer = 0.0;
}
}
pub struct PhysicsConfig {
pub gravity: Vec3,
pub fixed_dt: f32,
pub solver_iterations: u32,
}
impl Default for PhysicsConfig {
fn default() -> Self {
Self {
gravity: Vec3::new(0.0, -9.81, 0.0),
fixed_dt: 1.0 / 60.0,
solver_iterations: 4,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_dynamic_body() {
let rb = RigidBody::dynamic(2.0);
assert_eq!(rb.mass, 2.0);
assert!(!rb.is_static());
assert!((rb.inv_mass() - 0.5).abs() < 1e-6);
assert_eq!(rb.velocity, Vec3::ZERO);
assert_eq!(rb.restitution, 0.3);
assert_eq!(rb.gravity_scale, 1.0);
assert!(!rb.is_sleeping);
assert_eq!(rb.sleep_timer, 0.0);
}
#[test]
fn test_static_body() {
let rb = RigidBody::statik();
assert_eq!(rb.mass, 0.0);
assert!(rb.is_static());
assert_eq!(rb.inv_mass(), 0.0);
assert_eq!(rb.gravity_scale, 0.0);
}
#[test]
fn test_physics_config_default() {
let cfg = PhysicsConfig::default();
assert!((cfg.gravity.y - (-9.81)).abs() < 1e-6);
assert!((cfg.fixed_dt - 1.0 / 60.0).abs() < 1e-6);
assert_eq!(cfg.solver_iterations, 4);
}
#[test]
fn test_wake() {
let mut rb = RigidBody::dynamic(1.0);
rb.is_sleeping = true;
rb.sleep_timer = 1.0;
rb.wake();
assert!(!rb.is_sleeping);
assert_eq!(rb.sleep_timer, 0.0);
}
}

View File

@@ -0,0 +1,574 @@
use voltex_ecs::{World, Entity};
use voltex_ecs::Transform;
use voltex_math::Vec3;
use crate::collider::Collider;
use crate::contact::ContactPoint;
use crate::rigid_body::{RigidBody, PhysicsConfig};
use crate::collision::detect_collisions;
use crate::integrator::{integrate, inertia_tensor, inv_inertia};
use crate::ccd;
const POSITION_SLOP: f32 = 0.01;
const POSITION_PERCENT: f32 = 0.4;
pub fn resolve_collisions(world: &mut World, contacts: &[ContactPoint], iterations: u32) {
// Wake sleeping bodies that are in contact
wake_colliding_bodies(world, contacts);
for _iter in 0..iterations {
let mut velocity_changes: Vec<(Entity, Vec3, Vec3)> = Vec::new(); // (entity, dv_linear, dv_angular)
let mut position_changes: Vec<(Entity, Vec3)> = Vec::new();
for contact in contacts {
let rb_a = world.get::<RigidBody>(contact.entity_a).copied();
let rb_b = world.get::<RigidBody>(contact.entity_b).copied();
let col_a = world.get::<Collider>(contact.entity_a).cloned();
let col_b = world.get::<Collider>(contact.entity_b).cloned();
let pos_a = world.get::<Transform>(contact.entity_a).map(|t| t.position);
let pos_b = world.get::<Transform>(contact.entity_b).map(|t| t.position);
let (rb_a, rb_b) = match (rb_a, rb_b) {
(Some(a), Some(b)) => (a, b),
_ => continue,
};
let inv_mass_a = rb_a.inv_mass();
let inv_mass_b = rb_b.inv_mass();
let inv_mass_sum = inv_mass_a + inv_mass_b;
if inv_mass_sum == 0.0 {
continue;
}
// Compute lever arms for angular impulse
let center_a = pos_a.unwrap_or(Vec3::ZERO);
let center_b = pos_b.unwrap_or(Vec3::ZERO);
let r_a = contact.point_on_a - center_a;
let r_b = contact.point_on_b - center_b;
// Compute inverse inertia
let inv_i_a = col_a.map(|c| inv_inertia(inertia_tensor(&c, rb_a.mass)))
.unwrap_or(Vec3::ZERO);
let inv_i_b = col_b.map(|c| inv_inertia(inertia_tensor(&c, rb_b.mass)))
.unwrap_or(Vec3::ZERO);
// Relative velocity at contact point (including angular contribution)
let v_a = rb_a.velocity + rb_a.angular_velocity.cross(r_a);
let v_b = rb_b.velocity + rb_b.angular_velocity.cross(r_b);
let v_rel = v_a - v_b;
let v_rel_n = v_rel.dot(contact.normal);
// Effective mass including rotational terms
let r_a_cross_n = r_a.cross(contact.normal);
let r_b_cross_n = r_b.cross(contact.normal);
let angular_term_a = Vec3::new(
r_a_cross_n.x * inv_i_a.x,
r_a_cross_n.y * inv_i_a.y,
r_a_cross_n.z * inv_i_a.z,
).cross(r_a).dot(contact.normal);
let angular_term_b = Vec3::new(
r_b_cross_n.x * inv_i_b.x,
r_b_cross_n.y * inv_i_b.y,
r_b_cross_n.z * inv_i_b.z,
).cross(r_b).dot(contact.normal);
let effective_mass = inv_mass_sum + angular_term_a + angular_term_b;
// normal points A→B; v_rel_n > 0 means A approaches B → apply impulse
let j = if v_rel_n > 0.0 {
let e = rb_a.restitution.min(rb_b.restitution);
let j = (1.0 + e) * v_rel_n / effective_mass;
// Linear impulse
velocity_changes.push((contact.entity_a, contact.normal * (-j * inv_mass_a), Vec3::ZERO));
velocity_changes.push((contact.entity_b, contact.normal * (j * inv_mass_b), Vec3::ZERO));
// Angular impulse: torque = r × impulse
let angular_impulse_a = r_a.cross(contact.normal * (-j));
let angular_impulse_b = r_b.cross(contact.normal * j);
let dw_a = Vec3::new(
angular_impulse_a.x * inv_i_a.x,
angular_impulse_a.y * inv_i_a.y,
angular_impulse_a.z * inv_i_a.z,
);
let dw_b = Vec3::new(
angular_impulse_b.x * inv_i_b.x,
angular_impulse_b.y * inv_i_b.y,
angular_impulse_b.z * inv_i_b.z,
);
velocity_changes.push((contact.entity_a, Vec3::ZERO, dw_a));
velocity_changes.push((contact.entity_b, Vec3::ZERO, dw_b));
j
} else {
contact.depth / inv_mass_sum
};
// Coulomb friction: tangential impulse clamped to mu * normal impulse
let v_rel_tangent = v_rel - contact.normal * v_rel_n;
let tangent_len = v_rel_tangent.length();
if tangent_len > 1e-6 {
let tangent = v_rel_tangent * (1.0 / tangent_len);
let mu = (rb_a.friction + rb_b.friction) * 0.5;
let jt = -v_rel_tangent.dot(tangent) / effective_mass;
let friction_j = if jt.abs() <= j * mu {
jt
} else {
j * mu * jt.signum()
};
velocity_changes.push((contact.entity_a, tangent * (friction_j * inv_mass_a), Vec3::ZERO));
velocity_changes.push((contact.entity_b, tangent * (-friction_j * inv_mass_b), Vec3::ZERO));
// Angular friction impulse
let angular_fric_a = r_a.cross(tangent * friction_j);
let angular_fric_b = r_b.cross(tangent * (-friction_j));
let dw_fric_a = Vec3::new(
angular_fric_a.x * inv_i_a.x,
angular_fric_a.y * inv_i_a.y,
angular_fric_a.z * inv_i_a.z,
);
let dw_fric_b = Vec3::new(
angular_fric_b.x * inv_i_b.x,
angular_fric_b.y * inv_i_b.y,
angular_fric_b.z * inv_i_b.z,
);
velocity_changes.push((contact.entity_a, Vec3::ZERO, dw_fric_a));
velocity_changes.push((contact.entity_b, Vec3::ZERO, dw_fric_b));
}
// Position correction only on first iteration
if _iter == 0 {
let correction_mag = (contact.depth - POSITION_SLOP).max(0.0) * POSITION_PERCENT / inv_mass_sum;
if correction_mag > 0.0 {
let correction = contact.normal * correction_mag;
position_changes.push((contact.entity_a, correction * (-inv_mass_a)));
position_changes.push((contact.entity_b, correction * inv_mass_b));
}
}
}
// Apply velocity changes
for (entity, dv, dw) in velocity_changes {
if let Some(rb) = world.get_mut::<RigidBody>(entity) {
rb.velocity = rb.velocity + dv;
rb.angular_velocity = rb.angular_velocity + dw;
}
}
// Apply position corrections
for (entity, dp) in position_changes {
if let Some(t) = world.get_mut::<Transform>(entity) {
t.position = t.position + dp;
}
}
}
}
fn wake_colliding_bodies(world: &mut World, contacts: &[ContactPoint]) {
let wake_list: Vec<Entity> = contacts
.iter()
.flat_map(|c| {
let mut entities = Vec::new();
if let Some(rb) = world.get::<RigidBody>(c.entity_a) {
if rb.is_sleeping { entities.push(c.entity_a); }
}
if let Some(rb) = world.get::<RigidBody>(c.entity_b) {
if rb.is_sleeping { entities.push(c.entity_b); }
}
entities
})
.collect();
for entity in wake_list {
if let Some(rb) = world.get_mut::<RigidBody>(entity) {
rb.wake();
}
}
}
pub fn physics_step(world: &mut World, config: &PhysicsConfig) {
// CCD: for fast-moving bodies, check for tunneling
apply_ccd(world, config);
integrate(world, config);
let contacts = detect_collisions(world);
resolve_collisions(world, &contacts, config.solver_iterations);
}
fn apply_ccd(world: &mut World, config: &PhysicsConfig) {
// Gather fast-moving bodies and all collider AABBs
let bodies: Vec<(Entity, Vec3, Vec3, Collider)> = world
.query3::<Transform, RigidBody, Collider>()
.into_iter()
.filter(|(_, _, rb, _)| !rb.is_static() && !rb.is_sleeping)
.map(|(e, t, rb, c)| (e, t.position, rb.velocity, c.clone()))
.collect();
let all_colliders: Vec<(Entity, voltex_math::AABB)> = world
.query2::<Transform, Collider>()
.into_iter()
.map(|(e, t, c)| (e, c.aabb(t.position)))
.collect();
let mut ccd_corrections: Vec<(Entity, Vec3)> = Vec::new();
for (entity, pos, vel, collider) in &bodies {
let speed = vel.length();
let collider_radius = match collider {
Collider::Sphere { radius } => *radius,
Collider::Box { half_extents } => half_extents.x.min(half_extents.y).min(half_extents.z),
Collider::Capsule { radius, .. } => *radius,
Collider::ConvexHull(hull) => {
// Use minimum distance from origin to any vertex as approximate radius
hull.vertices.iter().map(|v| v.length()).fold(f32::MAX, f32::min)
}
};
// Only apply CCD if displacement > collider radius
if speed * config.fixed_dt <= collider_radius {
continue;
}
let sweep_radius = match collider {
Collider::Sphere { radius } => *radius,
_ => collider_radius,
};
let end = *pos + *vel * config.fixed_dt;
let mut earliest_t = 1.0f32;
for (other_entity, other_aabb) in &all_colliders {
if *other_entity == *entity {
continue;
}
if let Some(t) = ccd::swept_sphere_vs_aabb(*pos, end, sweep_radius, other_aabb) {
if t < earliest_t {
earliest_t = t;
}
}
}
if earliest_t < 1.0 {
// Place body just before collision point
let safe_t = (earliest_t - 0.01).max(0.0);
let safe_pos = *pos + *vel * config.fixed_dt * safe_t;
ccd_corrections.push((*entity, safe_pos));
}
}
for (entity, safe_pos) in ccd_corrections {
if let Some(t) = world.get_mut::<Transform>(entity) {
t.position = safe_pos;
}
if let Some(rb) = world.get_mut::<RigidBody>(entity) {
// Reduce velocity to prevent re-tunneling
rb.velocity = rb.velocity * 0.5;
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use voltex_ecs::World;
use voltex_ecs::Transform;
use voltex_math::Vec3;
use crate::{Collider, RigidBody};
use crate::collision::detect_collisions;
fn approx(a: f32, b: f32) -> bool {
(a - b).abs() < 1e-3
}
#[test]
fn test_two_dynamic_spheres_head_on() {
let mut world = World::new();
let a = world.spawn();
world.add(a, Transform::from_position(Vec3::new(-0.5, 0.0, 0.0)));
world.add(a, Collider::Sphere { radius: 1.0 });
let mut rb_a = RigidBody::dynamic(1.0);
rb_a.velocity = Vec3::new(1.0, 0.0, 0.0);
rb_a.restitution = 1.0;
rb_a.gravity_scale = 0.0;
world.add(a, rb_a);
let b = world.spawn();
world.add(b, Transform::from_position(Vec3::new(0.5, 0.0, 0.0)));
world.add(b, Collider::Sphere { radius: 1.0 });
let mut rb_b = RigidBody::dynamic(1.0);
rb_b.velocity = Vec3::new(-1.0, 0.0, 0.0);
rb_b.restitution = 1.0;
rb_b.gravity_scale = 0.0;
world.add(b, rb_b);
let contacts = detect_collisions(&world);
assert_eq!(contacts.len(), 1);
resolve_collisions(&mut world, &contacts, 1);
let va = world.get::<RigidBody>(a).unwrap().velocity;
let vb = world.get::<RigidBody>(b).unwrap().velocity;
assert!(approx(va.x, -1.0));
assert!(approx(vb.x, 1.0));
}
#[test]
fn test_dynamic_vs_static_floor() {
let mut world = World::new();
let ball = world.spawn();
world.add(ball, Transform::from_position(Vec3::new(0.0, 0.5, 0.0)));
world.add(ball, Collider::Sphere { radius: 1.0 });
let mut rb = RigidBody::dynamic(1.0);
rb.velocity = Vec3::new(0.0, -2.0, 0.0);
rb.restitution = 1.0;
rb.gravity_scale = 0.0;
world.add(ball, rb);
let floor = world.spawn();
world.add(floor, Transform::from_position(Vec3::new(0.0, -1.0, 0.0)));
world.add(floor, Collider::Box { half_extents: Vec3::new(10.0, 1.0, 10.0) });
world.add(floor, RigidBody::statik());
let contacts = detect_collisions(&world);
assert_eq!(contacts.len(), 1);
resolve_collisions(&mut world, &contacts, 1);
let ball_rb = world.get::<RigidBody>(ball).unwrap();
let floor_rb = world.get::<RigidBody>(floor).unwrap();
assert!(ball_rb.velocity.y > 0.0);
assert!(approx(floor_rb.velocity.y, 0.0));
}
#[test]
fn test_position_correction() {
let mut world = World::new();
let a = world.spawn();
world.add(a, Transform::from_position(Vec3::ZERO));
world.add(a, Collider::Sphere { radius: 1.0 });
let mut rb_a = RigidBody::dynamic(1.0);
rb_a.gravity_scale = 0.0;
world.add(a, rb_a);
let b = world.spawn();
world.add(b, Transform::from_position(Vec3::new(1.0, 0.0, 0.0)));
world.add(b, Collider::Sphere { radius: 1.0 });
let mut rb_b = RigidBody::dynamic(1.0);
rb_b.gravity_scale = 0.0;
world.add(b, rb_b);
let contacts = detect_collisions(&world);
assert_eq!(contacts.len(), 1);
resolve_collisions(&mut world, &contacts, 1);
let pa = world.get::<Transform>(a).unwrap().position;
let pb = world.get::<Transform>(b).unwrap().position;
let dist = (pb - pa).length();
assert!(dist > 1.0);
}
#[test]
fn test_physics_step_ball_drop() {
let mut world = World::new();
let ball = world.spawn();
world.add(ball, Transform::from_position(Vec3::new(0.0, 5.0, 0.0)));
world.add(ball, Collider::Sphere { radius: 0.5 });
world.add(ball, RigidBody::dynamic(1.0));
let floor = world.spawn();
world.add(floor, Transform::from_position(Vec3::new(0.0, -1.0, 0.0)));
world.add(floor, Collider::Box { half_extents: Vec3::new(10.0, 1.0, 10.0) });
world.add(floor, RigidBody::statik());
let config = PhysicsConfig::default();
for _ in 0..10 {
physics_step(&mut world, &config);
}
let t = world.get::<Transform>(ball).unwrap();
assert!(t.position.y < 5.0);
assert!(t.position.y > -1.0);
}
#[test]
fn test_friction_slows_sliding() {
let mut world = World::new();
let ball = world.spawn();
world.add(ball, Transform::from_position(Vec3::new(0.0, 0.4, 0.0)));
world.add(ball, Collider::Sphere { radius: 0.5 });
let mut rb = RigidBody::dynamic(1.0);
rb.velocity = Vec3::new(5.0, 0.0, 0.0);
rb.gravity_scale = 0.0;
rb.friction = 0.5;
world.add(ball, rb);
let floor = world.spawn();
world.add(floor, Transform::from_position(Vec3::new(0.0, -0.5, 0.0)));
world.add(floor, Collider::Box { half_extents: Vec3::new(10.0, 0.5, 10.0) });
let mut floor_rb = RigidBody::statik();
floor_rb.friction = 0.5;
world.add(floor, floor_rb);
let contacts = detect_collisions(&world);
if !contacts.is_empty() {
resolve_collisions(&mut world, &contacts, 1);
}
let ball_v = world.get::<RigidBody>(ball).unwrap().velocity;
assert!(ball_v.x < 5.0, "friction should slow horizontal velocity: {}", ball_v.x);
assert!(ball_v.x > 0.0, "should still be moving: {}", ball_v.x);
}
#[test]
fn test_both_static_no_response() {
let mut world = World::new();
let a = world.spawn();
world.add(a, Transform::from_position(Vec3::ZERO));
world.add(a, Collider::Sphere { radius: 1.0 });
world.add(a, RigidBody::statik());
let b = world.spawn();
world.add(b, Transform::from_position(Vec3::new(0.5, 0.0, 0.0)));
world.add(b, Collider::Sphere { radius: 1.0 });
world.add(b, RigidBody::statik());
let contacts = detect_collisions(&world);
resolve_collisions(&mut world, &contacts, 1);
let pa = world.get::<Transform>(a).unwrap().position;
let pb = world.get::<Transform>(b).unwrap().position;
assert!(approx(pa.x, 0.0));
assert!(approx(pb.x, 0.5));
}
// --- Angular impulse tests ---
#[test]
fn test_off_center_hit_produces_spin() {
let mut world = World::new();
// Sphere A moving right, hitting sphere B off-center (offset in Y)
let a = world.spawn();
world.add(a, Transform::from_position(Vec3::new(-0.5, 0.5, 0.0)));
world.add(a, Collider::Sphere { radius: 1.0 });
let mut rb_a = RigidBody::dynamic(1.0);
rb_a.velocity = Vec3::new(2.0, 0.0, 0.0);
rb_a.restitution = 0.5;
rb_a.gravity_scale = 0.0;
world.add(a, rb_a);
let b = world.spawn();
world.add(b, Transform::from_position(Vec3::new(0.5, -0.5, 0.0)));
world.add(b, Collider::Sphere { radius: 1.0 });
let mut rb_b = RigidBody::dynamic(1.0);
rb_b.gravity_scale = 0.0;
rb_b.restitution = 0.5;
world.add(b, rb_b);
let contacts = detect_collisions(&world);
assert!(!contacts.is_empty());
resolve_collisions(&mut world, &contacts, 4);
let rb_a_after = world.get::<RigidBody>(a).unwrap();
let rb_b_after = world.get::<RigidBody>(b).unwrap();
// At least one body should have non-zero angular velocity after off-center collision
let total_angular = rb_a_after.angular_velocity.length() + rb_b_after.angular_velocity.length();
assert!(total_angular > 1e-4, "off-center hit should produce angular velocity, got {}", total_angular);
}
// --- Sequential impulse tests ---
#[test]
fn test_sequential_impulse_stability() {
// Stack of 3 boxes on floor - with iterations they should be more stable
let mut world = World::new();
let floor = world.spawn();
world.add(floor, Transform::from_position(Vec3::new(0.0, -0.5, 0.0)));
world.add(floor, Collider::Box { half_extents: Vec3::new(10.0, 0.5, 10.0) });
world.add(floor, RigidBody::statik());
let mut boxes = Vec::new();
for i in 0..3 {
let e = world.spawn();
let y = 0.5 + i as f32 * 1.0;
world.add(e, Transform::from_position(Vec3::new(0.0, y, 0.0)));
world.add(e, Collider::Box { half_extents: Vec3::new(0.5, 0.5, 0.5) });
let mut rb = RigidBody::dynamic(1.0);
rb.gravity_scale = 0.0; // no gravity for stability test
world.add(e, rb);
boxes.push(e);
}
let config = PhysicsConfig {
gravity: Vec3::ZERO,
fixed_dt: 1.0 / 60.0,
solver_iterations: 4,
};
// Run a few steps
for _ in 0..5 {
physics_step(&mut world, &config);
}
// All boxes should remain roughly in place (no gravity, just resting)
for (i, e) in boxes.iter().enumerate() {
let t = world.get::<Transform>(*e).unwrap();
let expected_y = 0.5 + i as f32 * 1.0;
assert!((t.position.y - expected_y).abs() < 1.0,
"box {} moved too much: expected y~{}, got {}", i, expected_y, t.position.y);
}
}
// --- Wake on collision test ---
#[test]
fn test_wake_on_collision() {
let mut world = World::new();
// Sleeping body
let a = world.spawn();
world.add(a, Transform::from_position(Vec3::ZERO));
world.add(a, Collider::Sphere { radius: 1.0 });
let mut rb_a = RigidBody::dynamic(1.0);
rb_a.is_sleeping = true;
rb_a.gravity_scale = 0.0;
world.add(a, rb_a);
// Moving body that collides with sleeping body
let b = world.spawn();
world.add(b, Transform::from_position(Vec3::new(1.5, 0.0, 0.0)));
world.add(b, Collider::Sphere { radius: 1.0 });
let mut rb_b = RigidBody::dynamic(1.0);
rb_b.velocity = Vec3::new(-2.0, 0.0, 0.0);
rb_b.gravity_scale = 0.0;
world.add(b, rb_b);
let contacts = detect_collisions(&world);
assert!(!contacts.is_empty());
resolve_collisions(&mut world, &contacts, 1);
let rb_a_after = world.get::<RigidBody>(a).unwrap();
assert!(!rb_a_after.is_sleeping, "body should wake on collision");
}
}

View File

@@ -0,0 +1,243 @@
/// Pure CPU exposure calculation logic (testable).
pub fn calculate_target_exposure(
avg_log_luminance: f32,
key_value: f32,
min_exp: f32,
max_exp: f32,
) -> f32 {
let avg_lum = avg_log_luminance.exp();
let target = key_value / avg_lum.max(0.0001);
target.clamp(min_exp, max_exp)
}
/// Smooth adaptation over time.
pub fn adapt_exposure(current: f32, target: f32, dt: f32, speed: f32) -> f32 {
current + (target - current) * (1.0 - (-dt * speed).exp())
}
/// GPU-side auto exposure compute + readback.
pub struct AutoExposure {
compute_pipeline: wgpu::ComputePipeline,
bind_group_layout: wgpu::BindGroupLayout,
result_buffer: wgpu::Buffer,
staging_buffer: wgpu::Buffer,
pub exposure: f32,
pub min_exposure: f32,
pub max_exposure: f32,
pub adaptation_speed: f32,
pub key_value: f32,
pending_read: bool,
}
impl AutoExposure {
pub fn new(device: &wgpu::Device) -> Self {
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Auto Exposure Compute"),
source: wgpu::ShaderSource::Wgsl(include_str!("auto_exposure.wgsl").into()),
});
let bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Auto Exposure BGL"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: false },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: false },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
],
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Auto Exposure PL"),
bind_group_layouts: &[&bind_group_layout],
immediate_size: 0,
});
let compute_pipeline =
device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: Some("Auto Exposure Pipeline"),
layout: Some(&pipeline_layout),
module: &shader,
entry_point: Some("main"),
compilation_options: wgpu::PipelineCompilationOptions::default(),
cache: None,
});
let result_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Auto Exposure Result"),
size: 8,
usage: wgpu::BufferUsages::STORAGE
| wgpu::BufferUsages::COPY_SRC
| wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let staging_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Auto Exposure Staging"),
size: 8,
usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
AutoExposure {
compute_pipeline,
bind_group_layout,
result_buffer,
staging_buffer,
exposure: 1.0,
min_exposure: 0.1,
max_exposure: 10.0,
adaptation_speed: 2.0,
key_value: 0.18,
pending_read: false,
}
}
/// Dispatch compute shader to calculate luminance. Call once per frame.
pub fn dispatch(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
encoder: &mut wgpu::CommandEncoder,
hdr_view: &wgpu::TextureView,
hdr_width: u32,
hdr_height: u32,
) {
// Clear result buffer
queue.write_buffer(&self.result_buffer, 0, &[0u8; 8]);
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Auto Exposure BG"),
layout: &self.bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(hdr_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: self.result_buffer.as_entire_binding(),
},
],
});
{
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
label: Some("Auto Exposure Pass"),
timestamp_writes: None,
});
cpass.set_pipeline(&self.compute_pipeline);
cpass.set_bind_group(0, &bind_group, &[]);
let wg_x = (hdr_width + 15) / 16;
let wg_y = (hdr_height + 15) / 16;
cpass.dispatch_workgroups(wg_x, wg_y, 1);
}
// Copy result to staging for CPU readback
encoder.copy_buffer_to_buffer(&self.result_buffer, 0, &self.staging_buffer, 0, 8);
self.pending_read = true;
}
/// Read back luminance result and update exposure. Call after queue.submit().
/// Returns true if exposure was updated.
pub fn update_exposure(&mut self, _dt: f32) -> bool {
if !self.pending_read {
return false;
}
self.pending_read = false;
let slice = self.staging_buffer.slice(..);
let (tx, rx) = std::sync::mpsc::channel();
slice.map_async(wgpu::MapMode::Read, move |result| {
let _ = tx.send(result);
});
// The caller must poll the device for the map to complete.
// For a full integration, use async or poll in the render loop.
// For now, return false — use set_average_luminance() for CPU-side updates.
let _ = rx;
false
}
/// Simple CPU-only exposure update without GPU readback.
/// Use when you have a luminance estimate from other means.
pub fn set_average_luminance(&mut self, avg_log_lum: f32, dt: f32) {
let target = calculate_target_exposure(
avg_log_lum,
self.key_value,
self.min_exposure,
self.max_exposure,
);
self.exposure = adapt_exposure(self.exposure, target, dt, self.adaptation_speed);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_calculate_target_exposure() {
// avg_log_lum = ln(0.18) -> avg_lum = 0.18 -> target = 0.18/0.18 = 1.0
let avg_log_lum = 0.18_f32.ln();
let target = calculate_target_exposure(avg_log_lum, 0.18, 0.1, 10.0);
assert!((target - 1.0).abs() < 0.01, "target={}", target);
}
#[test]
fn test_target_exposure_bright_scene() {
// Bright scene: avg_lum = 2.0 -> target = 0.18/2.0 = 0.09 -> clamped to min 0.1
let avg_log_lum = 2.0_f32.ln();
let target = calculate_target_exposure(avg_log_lum, 0.18, 0.1, 10.0);
assert!((target - 0.1).abs() < 0.01);
}
#[test]
fn test_target_exposure_dark_scene() {
// Dark scene: avg_lum = 0.001 -> target = 0.18/0.001 = 180 -> clamped to max 10.0
let avg_log_lum = 0.001_f32.ln();
let target = calculate_target_exposure(avg_log_lum, 0.18, 0.1, 10.0);
assert!((target - 10.0).abs() < 0.01);
}
#[test]
fn test_adapt_exposure_no_time() {
let result = adapt_exposure(1.0, 5.0, 0.0, 2.0);
assert!((result - 1.0).abs() < 0.01); // dt=0 -> no change
}
#[test]
fn test_adapt_exposure_converges() {
let mut exp = 1.0;
for _ in 0..100 {
exp = adapt_exposure(exp, 5.0, 0.016, 2.0); // 60fps
}
assert!(
(exp - 5.0).abs() < 0.2,
"should converge to 5.0, got {}",
exp
);
}
#[test]
fn test_adapt_exposure_large_dt() {
let result = adapt_exposure(1.0, 5.0, 100.0, 2.0);
assert!((result - 5.0).abs() < 0.01); // large dt -> near target
}
}

View File

@@ -0,0 +1,18 @@
@group(0) @binding(0) var hdr_texture: texture_2d<f32>;
@group(0) @binding(1) var<storage, read_write> result: array<atomic<u32>>;
@compute @workgroup_size(16, 16)
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
let dims = textureDimensions(hdr_texture);
if (gid.x >= dims.x || gid.y >= dims.y) {
return;
}
let color = textureLoad(hdr_texture, vec2<i32>(gid.xy), 0);
let lum = 0.2126 * color.r + 0.7152 * color.g + 0.0722 * color.b;
let log_lum = log(max(lum, 0.0001));
let fixed = i32(log_lum * 1000.0);
atomicAdd(&result[0], bitcast<u32>(fixed));
atomicAdd(&result[1], 1u);
}

View File

@@ -0,0 +1,52 @@
/// Bilateral bloom weight: attenuates blur across brightness edges.
pub fn bilateral_bloom_weight(
center_luminance: f32,
sample_luminance: f32,
spatial_weight: f32,
sigma_luminance: f32,
) -> f32 {
let lum_diff = (center_luminance - sample_luminance).abs();
let lum_weight = (-lum_diff * lum_diff / (2.0 * sigma_luminance * sigma_luminance)).exp();
spatial_weight * lum_weight
}
/// Calculate luminance from RGB.
pub fn luminance(r: f32, g: f32, b: f32) -> f32 {
0.2126 * r + 0.7152 * g + 0.0722 * b
}
/// 5-tap Gaussian weights for 1D bloom blur.
pub fn gaussian_5tap() -> [f32; 5] {
// Sigma ≈ 1.4
[0.0625, 0.25, 0.375, 0.25, 0.0625]
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_bilateral_bloom_same_luminance() {
let w = bilateral_bloom_weight(0.5, 0.5, 1.0, 0.1);
assert!((w - 1.0).abs() < 0.01); // same lum → full weight
}
#[test]
fn test_bilateral_bloom_edge() {
let w = bilateral_bloom_weight(0.1, 0.9, 1.0, 0.1);
assert!(w < 0.01); // large lum diff → near zero
}
#[test]
fn test_luminance_white() {
let l = luminance(1.0, 1.0, 1.0);
assert!((l - 1.0).abs() < 0.01);
}
#[test]
fn test_gaussian_5tap_sum() {
let g = gaussian_5tap();
let sum: f32 = g.iter().sum();
assert!((sum - 1.0).abs() < 0.01);
}
}

View File

@@ -0,0 +1,156 @@
pub struct BilateralBlur {
pipeline: wgpu::ComputePipeline,
bind_group_layout: wgpu::BindGroupLayout,
}
impl BilateralBlur {
pub fn new(device: &wgpu::Device) -> Self {
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Bilateral Blur Compute"),
source: wgpu::ShaderSource::Wgsl(include_str!("bilateral_blur.wgsl").into()),
});
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("Bilateral Blur BGL"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: false },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Depth,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: false },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 3,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::StorageTexture {
access: wgpu::StorageTextureAccess::WriteOnly,
format: wgpu::TextureFormat::Rgba16Float,
view_dimension: wgpu::TextureViewDimension::D2,
},
count: None,
},
],
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Bilateral Blur PL"),
bind_group_layouts: &[&bind_group_layout],
immediate_size: 0,
});
let pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
label: Some("Bilateral Blur Pipeline"),
layout: Some(&pipeline_layout),
module: &shader,
entry_point: Some("main"),
compilation_options: wgpu::PipelineCompilationOptions::default(),
cache: None,
});
BilateralBlur { pipeline, bind_group_layout }
}
pub fn dispatch(
&self,
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
input_view: &wgpu::TextureView,
depth_view: &wgpu::TextureView,
normal_view: &wgpu::TextureView,
output_view: &wgpu::TextureView,
width: u32,
height: u32,
) {
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Bilateral Blur BG"),
layout: &self.bind_group_layout,
entries: &[
wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::TextureView(input_view) },
wgpu::BindGroupEntry { binding: 1, resource: wgpu::BindingResource::TextureView(depth_view) },
wgpu::BindGroupEntry { binding: 2, resource: wgpu::BindingResource::TextureView(normal_view) },
wgpu::BindGroupEntry { binding: 3, resource: wgpu::BindingResource::TextureView(output_view) },
],
});
let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
label: Some("Bilateral Blur Pass"),
timestamp_writes: None,
});
cpass.set_pipeline(&self.pipeline);
cpass.set_bind_group(0, &bind_group, &[]);
cpass.dispatch_workgroups((width + 15) / 16, (height + 15) / 16, 1);
}
}
/// Pure CPU bilateral weight calculation (for testing).
pub fn bilateral_weight(
spatial_dist: f32,
depth_diff: f32,
normal_dot: f32,
sigma_spatial: f32,
sigma_depth: f32,
sigma_normal: f32,
) -> f32 {
let w_spatial = (-spatial_dist * spatial_dist / (2.0 * sigma_spatial * sigma_spatial)).exp();
let w_depth = (-depth_diff.abs() / sigma_depth).exp();
let w_normal = normal_dot.max(0.0).powf(sigma_normal);
w_spatial * w_depth * w_normal
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_bilateral_weight_center() {
// At center: dist=0, depth_diff=0, normal_dot=1
let w = bilateral_weight(0.0, 0.0, 1.0, 2.0, 0.1, 16.0);
assert!((w - 1.0).abs() < 0.01); // all factors = 1
}
#[test]
fn test_bilateral_weight_depth_edge() {
// Large depth difference -> low weight
let w = bilateral_weight(0.0, 10.0, 1.0, 2.0, 0.1, 16.0);
assert!(w < 0.01, "large depth diff should give low weight: {}", w);
}
#[test]
fn test_bilateral_weight_normal_edge() {
// Perpendicular normals -> low weight
let w = bilateral_weight(0.0, 0.0, 0.0, 2.0, 0.1, 16.0);
assert!(w < 0.01, "perpendicular normals should give low weight: {}", w);
}
#[test]
fn test_bilateral_weight_distance() {
// Far spatial distance -> lower weight than near
let w_near = bilateral_weight(1.0, 0.0, 1.0, 2.0, 0.1, 16.0);
let w_far = bilateral_weight(4.0, 0.0, 1.0, 2.0, 0.1, 16.0);
assert!(w_near > w_far);
}
}

View File

@@ -0,0 +1,58 @@
@group(0) @binding(0) var input_tex: texture_2d<f32>;
@group(0) @binding(1) var depth_tex: texture_depth_2d;
@group(0) @binding(2) var normal_tex: texture_2d<f32>;
@group(0) @binding(3) var output_tex: texture_storage_2d<rgba16float, write>;
const KERNEL_RADIUS: i32 = 2;
const SIGMA_SPATIAL: f32 = 2.0;
const SIGMA_DEPTH: f32 = 0.1;
const SIGMA_NORMAL: f32 = 16.0;
fn gaussian(x: f32, sigma: f32) -> f32 {
return exp(-(x * x) / (2.0 * sigma * sigma));
}
@compute @workgroup_size(16, 16)
fn main(@builtin(global_invocation_id) gid: vec3<u32>) {
let dims = textureDimensions(input_tex);
if (gid.x >= dims.x || gid.y >= dims.y) { return; }
let center = vec2<i32>(gid.xy);
let center_depth = textureLoad(depth_tex, center, 0);
let center_normal = textureLoad(normal_tex, center, 0).xyz;
var sum = vec4<f32>(0.0);
var weight_sum = 0.0;
for (var dy = -KERNEL_RADIUS; dy <= KERNEL_RADIUS; dy++) {
for (var dx = -KERNEL_RADIUS; dx <= KERNEL_RADIUS; dx++) {
let offset = vec2<i32>(dx, dy);
let sample_pos = center + offset;
if (sample_pos.x < 0 || sample_pos.y < 0 ||
sample_pos.x >= i32(dims.x) || sample_pos.y >= i32(dims.y)) {
continue;
}
let spatial_dist = sqrt(f32(dx * dx + dy * dy));
let w_spatial = gaussian(spatial_dist, SIGMA_SPATIAL);
let sample_depth = textureLoad(depth_tex, sample_pos, 0);
let depth_diff = abs(center_depth - sample_depth);
let w_depth = exp(-depth_diff / SIGMA_DEPTH);
let sample_normal = textureLoad(normal_tex, sample_pos, 0).xyz;
let n_dot = max(dot(center_normal, sample_normal), 0.0);
let w_normal = pow(n_dot, SIGMA_NORMAL);
let weight = w_spatial * w_depth * w_normal;
let sample_color = textureLoad(input_tex, sample_pos, 0);
sum += sample_color * weight;
weight_sum += weight;
}
}
let result = select(vec4<f32>(0.0), sum / weight_sum, weight_sum > 0.0);
textureStore(output_tex, vec2<i32>(gid.xy), result);
}

View File

@@ -0,0 +1,34 @@
/// Tracks which meshes need BLAS rebuild.
pub struct BlasTracker {
dirty: Vec<(u32, bool)>, // (mesh_id, needs_rebuild)
}
impl BlasTracker {
pub fn new() -> Self { BlasTracker { dirty: Vec::new() } }
pub fn register(&mut self, mesh_id: u32) { self.dirty.push((mesh_id, false)); }
pub fn mark_dirty(&mut self, mesh_id: u32) {
if let Some(entry) = self.dirty.iter_mut().find(|(id, _)| *id == mesh_id) { entry.1 = true; }
}
pub fn dirty_meshes(&self) -> Vec<u32> { self.dirty.iter().filter(|(_, d)| *d).map(|(id, _)| *id).collect() }
pub fn clear_dirty(&mut self) { for entry in &mut self.dirty { entry.1 = false; } }
pub fn mesh_count(&self) -> usize { self.dirty.len() }
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_register_and_dirty() {
let mut t = BlasTracker::new();
t.register(1); t.register(2);
t.mark_dirty(1);
assert_eq!(t.dirty_meshes(), vec![1]);
}
#[test]
fn test_clear_dirty() {
let mut t = BlasTracker::new();
t.register(1); t.mark_dirty(1);
t.clear_dirty();
assert!(t.dirty_meshes().is_empty());
}
}

View File

@@ -0,0 +1,145 @@
use bytemuck::{Pod, Zeroable};
use crate::hdr::HDR_FORMAT;
/// Number of bloom mip levels (downsample + upsample chain).
pub const BLOOM_MIP_COUNT: usize = 5;
/// Uniform buffer for the bloom pass.
#[repr(C)]
#[derive(Copy, Clone, Debug, Pod, Zeroable)]
pub struct BloomUniform {
/// Luminance threshold above which a pixel contributes to bloom.
pub threshold: f32,
/// Soft knee for the threshold.
pub soft_threshold: f32,
pub _padding: [f32; 2],
}
impl Default for BloomUniform {
fn default() -> Self {
Self {
threshold: 1.0,
soft_threshold: 0.5,
_padding: [0.0; 2],
}
}
}
/// GPU resources for the bloom pass (mip chain + uniform buffer).
pub struct BloomResources {
/// One `TextureView` per mip level (5 levels).
pub mip_views: Vec<wgpu::TextureView>,
pub uniform_buffer: wgpu::Buffer,
/// Blend intensity applied during the tonemap pass.
pub intensity: f32,
}
impl BloomResources {
pub fn new(device: &wgpu::Device, width: u32, height: u32) -> Self {
let mip_views = create_mip_views(device, width, height);
let _uniform = BloomUniform::default();
let uniform_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Bloom Uniform Buffer"),
size: std::mem::size_of::<BloomUniform>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
// Initialize with defaults via a write at construction time would require a queue;
// callers may write the buffer themselves. The buffer is left zero-initialised here.
Self {
mip_views,
uniform_buffer,
intensity: 0.5,
}
}
/// Recreate the mip-chain textures when the window is resized.
pub fn resize(&mut self, device: &wgpu::Device, width: u32, height: u32) {
self.mip_views = create_mip_views(device, width, height);
}
}
// ── Public helpers ────────────────────────────────────────────────────────────
/// Return the (width, height) of each bloom mip level.
///
/// Level 0 = width/2 × height/2; each subsequent level halves again.
/// Sizes are clamped to a minimum of 1.
pub fn mip_sizes(width: u32, height: u32) -> Vec<(u32, u32)> {
let mut sizes = Vec::with_capacity(BLOOM_MIP_COUNT);
let mut w = (width / 2).max(1);
let mut h = (height / 2).max(1);
for _ in 0..BLOOM_MIP_COUNT {
sizes.push((w, h));
w = (w / 2).max(1);
h = (h / 2).max(1);
}
sizes
}
// ── Internal helpers ──────────────────────────────────────────────────────────
fn create_mip_views(device: &wgpu::Device, width: u32, height: u32) -> Vec<wgpu::TextureView> {
let sizes = mip_sizes(width, height);
sizes
.into_iter()
.enumerate()
.map(|(i, (w, h))| {
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some(&format!("Bloom Mip {} Texture", i)),
size: wgpu::Extent3d {
width: w,
height: h,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: HDR_FORMAT,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT
| wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[],
});
texture.create_view(&wgpu::TextureViewDescriptor::default())
})
.collect()
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn mip_sizes_1920_1080() {
let sizes = mip_sizes(1920, 1080);
assert_eq!(sizes.len(), BLOOM_MIP_COUNT);
assert_eq!(sizes[0], (960, 540));
assert_eq!(sizes[1], (480, 270));
assert_eq!(sizes[2], (240, 135));
assert_eq!(sizes[3], (120, 67));
assert_eq!(sizes[4], (60, 33));
}
#[test]
fn mip_sizes_64_64() {
let sizes = mip_sizes(64, 64);
assert_eq!(sizes.len(), BLOOM_MIP_COUNT);
assert_eq!(sizes[0], (32, 32));
assert_eq!(sizes[1], (16, 16));
assert_eq!(sizes[2], (8, 8));
assert_eq!(sizes[3], (4, 4));
assert_eq!(sizes[4], (2, 2));
}
#[test]
fn bloom_uniform_default() {
let u = BloomUniform::default();
assert!((u.threshold - 1.0).abs() < f32::EPSILON);
assert!((u.soft_threshold - 0.5).abs() < f32::EPSILON);
assert_eq!(u._padding, [0.0f32; 2]);
}
}

Some files were not shown because too many files have changed in this diff Show More