feat(renderer): add deferred_demo example with multi-light deferred rendering

Demonstrates two-pass deferred rendering: G-Buffer pass writes position,
normal, albedo, and material to 4 render targets; lighting pass composites
with 8 animated orbiting point lights plus a directional light using a
fullscreen triangle.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
2026-03-25 11:54:01 +09:00
parent a9b263bf57
commit 49bfa31639
3 changed files with 741 additions and 0 deletions

View File

@@ -0,0 +1,15 @@
[package]
name = "deferred_demo"
version = "0.1.0"
edition = "2021"
[dependencies]
voltex_math.workspace = true
voltex_platform.workspace = true
voltex_renderer.workspace = true
wgpu.workspace = true
winit.workspace = true
bytemuck.workspace = true
pollster.workspace = true
env_logger.workspace = true
log.workspace = true

View File

@@ -0,0 +1,725 @@
use winit::{
application::ApplicationHandler,
event::WindowEvent,
event_loop::{ActiveEventLoop, EventLoop},
keyboard::{KeyCode, PhysicalKey},
window::WindowId,
};
use voltex_math::{Vec3, Mat4};
use voltex_platform::{VoltexWindow, WindowConfig, InputState, GameTimer};
use voltex_renderer::{
GpuContext, Camera, FpsController, CameraUniform, LightsUniform, LightData,
Mesh, GpuTexture, MaterialUniform, generate_sphere,
ShadowMap, ShadowUniform, IblResources,
GBuffer, create_fullscreen_vertex_buffer,
create_gbuffer_pipeline, create_lighting_pipeline,
gbuffer_camera_bind_group_layout,
lighting_gbuffer_bind_group_layout, lighting_lights_bind_group_layout,
lighting_shadow_bind_group_layout,
pbr_texture_bind_group_layout, create_pbr_texture_bind_group,
};
use wgpu::util::DeviceExt;
use bytemuck::{Pod, Zeroable};
const GRID_SIZE: usize = 5;
const NUM_SPHERES: usize = GRID_SIZE * GRID_SIZE;
const SPACING: f32 = 1.2;
const NUM_POINT_LIGHTS: usize = 8;
/// Camera position uniform for the lighting pass (16 bytes).
#[repr(C)]
#[derive(Copy, Clone, Debug, Pod, Zeroable)]
struct CameraPositionUniform {
camera_pos: [f32; 3],
_padding: f32,
}
fn align_up(size: u32, alignment: u32) -> u32 {
((size + alignment - 1) / alignment) * alignment
}
struct DeferredDemoApp {
state: Option<AppState>,
}
struct AppState {
window: VoltexWindow,
gpu: GpuContext,
gbuffer: GBuffer,
// G-Buffer pass resources
gbuffer_pipeline: wgpu::RenderPipeline,
mesh: Mesh,
camera: Camera,
fps_controller: FpsController,
camera_buffer: wgpu::Buffer,
material_buffer: wgpu::Buffer,
camera_bind_group: wgpu::BindGroup,
pbr_texture_bind_group: wgpu::BindGroup,
material_bind_group: wgpu::BindGroup,
// Lighting pass resources
lighting_pipeline: wgpu::RenderPipeline,
fullscreen_vb: wgpu::Buffer,
gbuffer_bind_group: wgpu::BindGroup,
lights_bind_group: wgpu::BindGroup,
shadow_bind_group: wgpu::BindGroup,
light_buffer: wgpu::Buffer,
cam_pos_buffer: wgpu::Buffer,
// Layouts needed for rebuild on resize
gbuffer_layout: wgpu::BindGroupLayout,
// Keep textures alive
_albedo_tex: GpuTexture,
_normal_tex: (wgpu::Texture, wgpu::TextureView, wgpu::Sampler),
_shadow_map: ShadowMap,
_ibl: IblResources,
input: InputState,
timer: GameTimer,
cam_aligned_size: u32,
mat_aligned_size: u32,
time_elapsed: f32,
}
impl ApplicationHandler for DeferredDemoApp {
fn resumed(&mut self, event_loop: &ActiveEventLoop) {
let config = WindowConfig {
title: "Voltex - Deferred Rendering Demo".to_string(),
width: 1280,
height: 720,
..Default::default()
};
let window = VoltexWindow::new(event_loop, &config);
let gpu = GpuContext::new(window.handle.clone());
// Dynamic uniform buffer alignment
let alignment = gpu.device.limits().min_uniform_buffer_offset_alignment;
let cam_aligned_size = align_up(std::mem::size_of::<CameraUniform>() as u32, alignment);
let mat_aligned_size = align_up(std::mem::size_of::<MaterialUniform>() as u32, alignment);
// Generate sphere mesh
let (vertices, indices) = generate_sphere(0.4, 32, 16);
let mesh = Mesh::new(&gpu.device, &vertices, &indices);
// Camera
let aspect = gpu.config.width as f32 / gpu.config.height as f32;
let camera = Camera::new(Vec3::new(0.0, 0.0, 8.0), aspect);
let fps_controller = FpsController::new();
// G-Buffer
let gbuffer = GBuffer::new(&gpu.device, gpu.config.width, gpu.config.height);
// ---------------------------------------------------------------
// G-Buffer pass bind group layouts
// ---------------------------------------------------------------
let gbuf_cam_layout = gbuffer_camera_bind_group_layout(&gpu.device);
let pbr_tex_layout = pbr_texture_bind_group_layout(&gpu.device);
let mat_layout = MaterialUniform::bind_group_layout(&gpu.device);
// Camera dynamic uniform buffer (one CameraUniform per sphere)
let camera_buffer = gpu.device.create_buffer(&wgpu::BufferDescriptor {
label: Some("GBuf Camera Dynamic UBO"),
size: (cam_aligned_size as usize * NUM_SPHERES) as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
// Material dynamic uniform buffer
let material_buffer = gpu.device.create_buffer(&wgpu::BufferDescriptor {
label: Some("GBuf Material Dynamic UBO"),
size: (mat_aligned_size as usize * NUM_SPHERES) as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
// Camera bind group (dynamic offset, group 0)
let camera_bind_group = gpu.device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("GBuf Camera Bind Group"),
layout: &gbuf_cam_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
buffer: &camera_buffer,
offset: 0,
size: wgpu::BufferSize::new(std::mem::size_of::<CameraUniform>() as u64),
}),
}],
});
// PBR textures: white albedo + flat normal
let old_tex_layout = GpuTexture::bind_group_layout(&gpu.device);
let albedo_tex = GpuTexture::white_1x1(&gpu.device, &gpu.queue, &old_tex_layout);
let normal_tex = GpuTexture::flat_normal_1x1(&gpu.device, &gpu.queue);
let pbr_texture_bind_group = create_pbr_texture_bind_group(
&gpu.device,
&pbr_tex_layout,
&albedo_tex.view,
&albedo_tex.sampler,
&normal_tex.1,
&normal_tex.2,
);
// Material bind group (dynamic offset, group 2)
let material_bind_group = gpu.device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("GBuf Material Bind Group"),
layout: &mat_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
buffer: &material_buffer,
offset: 0,
size: wgpu::BufferSize::new(std::mem::size_of::<MaterialUniform>() as u64),
}),
}],
});
// G-Buffer pipeline
let gbuffer_pipeline = create_gbuffer_pipeline(
&gpu.device,
&gbuf_cam_layout,
&pbr_tex_layout,
&mat_layout,
);
// ---------------------------------------------------------------
// Lighting pass bind group layouts
// ---------------------------------------------------------------
let gbuffer_layout = lighting_gbuffer_bind_group_layout(&gpu.device);
let lights_layout = lighting_lights_bind_group_layout(&gpu.device);
let shadow_layout = lighting_shadow_bind_group_layout(&gpu.device);
// Fullscreen triangle vertex buffer
let fullscreen_vb = create_fullscreen_vertex_buffer(&gpu.device);
// GBuffer bind group (group 0) — non-filtering sampler for Rgba32Float position
let nearest_sampler = gpu.device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("GBuffer Nearest Sampler"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Nearest,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::MipmapFilterMode::Nearest,
..Default::default()
});
let gbuffer_bind_group = create_gbuffer_bind_group(
&gpu.device,
&gbuffer_layout,
&gbuffer,
&nearest_sampler,
);
// Lights uniform buffer
let light_buffer = gpu.device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Lights Uniform Buffer"),
size: std::mem::size_of::<LightsUniform>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
// Camera position uniform buffer (for lighting pass)
let cam_pos_buffer = gpu.device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Camera Position Uniform Buffer"),
size: std::mem::size_of::<CameraPositionUniform>() as u64,
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
// Lights bind group (group 1)
let lights_bind_group = gpu.device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Lighting Lights Bind Group"),
layout: &lights_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: light_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: cam_pos_buffer.as_entire_binding(),
},
],
});
// Shadow + IBL bind group (group 2)
let shadow_map = ShadowMap::new(&gpu.device);
let ibl = IblResources::new(&gpu.device, &gpu.queue);
let shadow_uniform = ShadowUniform {
light_view_proj: [[0.0; 4]; 4],
shadow_map_size: 0.0,
shadow_bias: 0.0,
_padding: [0.0; 2],
};
let shadow_uniform_buffer = gpu.device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Shadow Uniform Buffer"),
contents: bytemuck::cast_slice(&[shadow_uniform]),
usage: wgpu::BufferUsages::UNIFORM,
});
let shadow_bind_group = gpu.device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Lighting Shadow Bind Group"),
layout: &shadow_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&shadow_map.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&shadow_map.sampler),
},
wgpu::BindGroupEntry {
binding: 2,
resource: shadow_uniform_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 3,
resource: wgpu::BindingResource::TextureView(&ibl.brdf_lut_view),
},
wgpu::BindGroupEntry {
binding: 4,
resource: wgpu::BindingResource::Sampler(&ibl.brdf_lut_sampler),
},
],
});
// Lighting pipeline
let lighting_pipeline = create_lighting_pipeline(
&gpu.device,
gpu.surface_format,
&gbuffer_layout,
&lights_layout,
&shadow_layout,
);
self.state = Some(AppState {
window,
gpu,
gbuffer,
gbuffer_pipeline,
mesh,
camera,
fps_controller,
camera_buffer,
material_buffer,
camera_bind_group,
pbr_texture_bind_group,
material_bind_group,
lighting_pipeline,
fullscreen_vb,
gbuffer_bind_group,
lights_bind_group,
shadow_bind_group,
light_buffer,
cam_pos_buffer,
gbuffer_layout,
_albedo_tex: albedo_tex,
_normal_tex: normal_tex,
_shadow_map: shadow_map,
_ibl: ibl,
input: InputState::new(),
timer: GameTimer::new(60),
cam_aligned_size,
mat_aligned_size,
time_elapsed: 0.0,
});
}
fn window_event(
&mut self,
event_loop: &ActiveEventLoop,
_window_id: WindowId,
event: WindowEvent,
) {
let state = match &mut self.state {
Some(s) => s,
None => return,
};
match event {
WindowEvent::CloseRequested => event_loop.exit(),
WindowEvent::KeyboardInput {
event:
winit::event::KeyEvent {
physical_key: PhysicalKey::Code(key_code),
state: key_state,
..
},
..
} => {
let pressed = key_state == winit::event::ElementState::Pressed;
state.input.process_key(key_code, pressed);
if key_code == KeyCode::Escape && pressed {
event_loop.exit();
}
}
WindowEvent::Resized(size) => {
state.gpu.resize(size.width, size.height);
if size.width > 0 && size.height > 0 {
state.camera.aspect = size.width as f32 / size.height as f32;
// Resize G-Buffer
state.gbuffer.resize(&state.gpu.device, size.width, size.height);
// Recreate gbuffer bind group with new texture views
let nearest_sampler = state.gpu.device.create_sampler(&wgpu::SamplerDescriptor {
label: Some("GBuffer Nearest Sampler"),
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Nearest,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::MipmapFilterMode::Nearest,
..Default::default()
});
state.gbuffer_bind_group = create_gbuffer_bind_group(
&state.gpu.device,
&state.gbuffer_layout,
&state.gbuffer,
&nearest_sampler,
);
}
}
WindowEvent::CursorMoved { position, .. } => {
state.input.process_mouse_move(position.x, position.y);
}
WindowEvent::MouseInput {
state: btn_state,
button,
..
} => {
let pressed = btn_state == winit::event::ElementState::Pressed;
state.input.process_mouse_button(button, pressed);
}
WindowEvent::MouseWheel { delta, .. } => {
let y = match delta {
winit::event::MouseScrollDelta::LineDelta(_, y) => y,
winit::event::MouseScrollDelta::PixelDelta(pos) => pos.y as f32,
};
state.input.process_scroll(y);
}
WindowEvent::RedrawRequested => {
state.timer.tick();
let dt = state.timer.frame_dt();
state.time_elapsed += dt;
// Camera input
if state.input.is_mouse_button_pressed(winit::event::MouseButton::Right) {
let (dx, dy) = state.input.mouse_delta();
state.fps_controller.process_mouse(&mut state.camera, dx, dy);
}
let mut forward = 0.0f32;
let mut right = 0.0f32;
let mut up = 0.0f32;
if state.input.is_key_pressed(KeyCode::KeyW) { forward += 1.0; }
if state.input.is_key_pressed(KeyCode::KeyS) { forward -= 1.0; }
if state.input.is_key_pressed(KeyCode::KeyD) { right += 1.0; }
if state.input.is_key_pressed(KeyCode::KeyA) { right -= 1.0; }
if state.input.is_key_pressed(KeyCode::Space) { up += 1.0; }
if state.input.is_key_pressed(KeyCode::ShiftLeft) { up -= 1.0; }
state.fps_controller.process_movement(&mut state.camera, forward, right, up, dt);
state.input.begin_frame();
// Compute view-projection
let view_proj = state.camera.view_projection();
let cam_pos = [
state.camera.position.x,
state.camera.position.y,
state.camera.position.z,
];
let cam_aligned = state.cam_aligned_size as usize;
let mat_aligned = state.mat_aligned_size as usize;
// Build staging data for camera and material uniforms
let cam_total = NUM_SPHERES * cam_aligned;
let mat_total = NUM_SPHERES * mat_aligned;
let mut cam_staging = vec![0u8; cam_total];
let mut mat_staging = vec![0u8; mat_total];
let half_grid = (GRID_SIZE as f32 - 1.0) * SPACING * 0.5;
for row in 0..GRID_SIZE {
for col in 0..GRID_SIZE {
let i = row * GRID_SIZE + col;
let x = col as f32 * SPACING - half_grid;
let y = row as f32 * SPACING - half_grid;
let model = Mat4::translation(x, y, 0.0);
let cam_uniform = CameraUniform {
view_proj: view_proj.cols,
model: model.cols,
camera_pos: cam_pos,
_padding: 0.0,
};
let bytes = bytemuck::bytes_of(&cam_uniform);
let offset = i * cam_aligned;
cam_staging[offset..offset + bytes.len()].copy_from_slice(bytes);
// Material: metallic varies with col, roughness with row
let metallic = col as f32 / (GRID_SIZE as f32 - 1.0);
let roughness = 0.05 + row as f32 * (0.95 / (GRID_SIZE as f32 - 1.0));
let mat_uniform = MaterialUniform::with_params(
[0.8, 0.2, 0.2, 1.0],
metallic,
roughness,
);
let bytes = bytemuck::bytes_of(&mat_uniform);
let offset = i * mat_aligned;
mat_staging[offset..offset + bytes.len()].copy_from_slice(bytes);
}
}
state.gpu.queue.write_buffer(&state.camera_buffer, 0, &cam_staging);
state.gpu.queue.write_buffer(&state.material_buffer, 0, &mat_staging);
// Build lights: 1 directional + 8 animated point lights
let mut lights_uniform = LightsUniform::new();
lights_uniform.add_light(LightData::directional(
[-0.5, -1.0, -0.5],
[1.0, 1.0, 1.0],
0.3,
));
let t = state.time_elapsed;
let orbit_radius = 3.5;
for i in 0..NUM_POINT_LIGHTS {
let angle = t * 0.8 + (i as f32) * std::f32::consts::TAU / NUM_POINT_LIGHTS as f32;
let px = orbit_radius * angle.cos();
let pz = orbit_radius * angle.sin();
let py = 1.0 * ((t * 1.5 + i as f32 * 0.7).sin());
// Animated color cycling through hues
let hue = (i as f32 / NUM_POINT_LIGHTS as f32 + t * 0.1) % 1.0;
let (r, g, b) = hue_to_rgb(hue);
lights_uniform.add_light(LightData::point(
[px, py, pz],
[r, g, b],
2.0,
10.0,
));
}
state.gpu.queue.write_buffer(
&state.light_buffer,
0,
bytemuck::cast_slice(&[lights_uniform]),
);
// Camera position for lighting pass
let cam_pos_uniform = CameraPositionUniform {
camera_pos: cam_pos,
_padding: 0.0,
};
state.gpu.queue.write_buffer(
&state.cam_pos_buffer,
0,
bytemuck::cast_slice(&[cam_pos_uniform]),
);
// Render
let output = match state.gpu.surface.get_current_texture() {
Ok(t) => t,
Err(wgpu::SurfaceError::Lost) => {
let (w, h) = state.window.inner_size();
state.gpu.resize(w, h);
return;
}
Err(wgpu::SurfaceError::OutOfMemory) => {
event_loop.exit();
return;
}
Err(_) => return,
};
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = state.gpu.device.create_command_encoder(
&wgpu::CommandEncoderDescriptor {
label: Some("Deferred Render Encoder"),
},
);
// ---- Pass 1: G-Buffer ----
{
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("GBuffer Pass"),
color_attachments: &[
Some(wgpu::RenderPassColorAttachment {
view: &state.gbuffer.position_view,
resolve_target: None,
depth_slice: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color { r: 0.0, g: 0.0, b: 0.0, a: 0.0 }),
store: wgpu::StoreOp::Store,
},
}),
Some(wgpu::RenderPassColorAttachment {
view: &state.gbuffer.normal_view,
resolve_target: None,
depth_slice: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color { r: 0.0, g: 0.0, b: 0.0, a: 0.0 }),
store: wgpu::StoreOp::Store,
},
}),
Some(wgpu::RenderPassColorAttachment {
view: &state.gbuffer.albedo_view,
resolve_target: None,
depth_slice: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color { r: 0.0, g: 0.0, b: 0.0, a: 0.0 }),
store: wgpu::StoreOp::Store,
},
}),
Some(wgpu::RenderPassColorAttachment {
view: &state.gbuffer.material_view,
resolve_target: None,
depth_slice: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color { r: 0.0, g: 0.0, b: 0.0, a: 0.0 }),
store: wgpu::StoreOp::Store,
},
}),
],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: &state.gbuffer.depth_view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0),
store: wgpu::StoreOp::Store,
}),
stencil_ops: None,
}),
occlusion_query_set: None,
timestamp_writes: None,
multiview_mask: None,
});
rpass.set_pipeline(&state.gbuffer_pipeline);
rpass.set_bind_group(1, &state.pbr_texture_bind_group, &[]);
rpass.set_vertex_buffer(0, state.mesh.vertex_buffer.slice(..));
rpass.set_index_buffer(
state.mesh.index_buffer.slice(..),
wgpu::IndexFormat::Uint32,
);
for i in 0..NUM_SPHERES {
let cam_offset = (i as u32) * state.cam_aligned_size;
let mat_offset = (i as u32) * state.mat_aligned_size;
rpass.set_bind_group(0, &state.camera_bind_group, &[cam_offset]);
rpass.set_bind_group(2, &state.material_bind_group, &[mat_offset]);
rpass.draw_indexed(0..state.mesh.num_indices, 0, 0..1);
}
}
// ---- Pass 2: Lighting (fullscreen) ----
{
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Lighting Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
depth_slice: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.02,
g: 0.02,
b: 0.03,
a: 1.0,
}),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
multiview_mask: None,
});
rpass.set_pipeline(&state.lighting_pipeline);
rpass.set_bind_group(0, &state.gbuffer_bind_group, &[]);
rpass.set_bind_group(1, &state.lights_bind_group, &[]);
rpass.set_bind_group(2, &state.shadow_bind_group, &[]);
rpass.set_vertex_buffer(0, state.fullscreen_vb.slice(..));
rpass.draw(0..3, 0..1);
}
state.gpu.queue.submit(std::iter::once(encoder.finish()));
output.present();
}
_ => {}
}
}
fn about_to_wait(&mut self, _event_loop: &ActiveEventLoop) {
if let Some(state) = &self.state {
state.window.request_redraw();
}
}
}
/// Helper: create the lighting pass G-Buffer bind group.
fn create_gbuffer_bind_group(
device: &wgpu::Device,
layout: &wgpu::BindGroupLayout,
gbuffer: &GBuffer,
sampler: &wgpu::Sampler,
) -> wgpu::BindGroup {
device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("Lighting GBuffer Bind Group"),
layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&gbuffer.position_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(&gbuffer.normal_view),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::TextureView(&gbuffer.albedo_view),
},
wgpu::BindGroupEntry {
binding: 3,
resource: wgpu::BindingResource::TextureView(&gbuffer.material_view),
},
wgpu::BindGroupEntry {
binding: 4,
resource: wgpu::BindingResource::Sampler(sampler),
},
],
})
}
/// Convert HSV hue (0..1) to RGB.
fn hue_to_rgb(h: f32) -> (f32, f32, f32) {
let h6 = h * 6.0;
let f = h6 - h6.floor();
let sector = h6.floor() as i32 % 6;
match sector {
0 => (1.0, f, 0.0),
1 => (1.0 - f, 1.0, 0.0),
2 => (0.0, 1.0, f),
3 => (0.0, 1.0 - f, 1.0),
4 => (f, 0.0, 1.0),
_ => (1.0, 0.0, 1.0 - f),
}
}
fn main() {
env_logger::init();
let event_loop = EventLoop::new().unwrap();
let mut app = DeferredDemoApp { state: None };
event_loop.run_app(&mut app).unwrap();
}