1#![allow(dead_code)]
14
15use alloc::{vec, vec::Vec};
16use core::sync::atomic::{AtomicBool, AtomicU32, AtomicU64, Ordering};
17
18use spin::Mutex;
19
20use super::PixelFormat;
21
22const FP_SHIFT: u32 = 16;
28
29const FP_ONE: i32 = 1 << FP_SHIFT;
31
32#[inline]
34fn fp_from_int(v: i32) -> i32 {
35 v << FP_SHIFT as i32
36}
37
38#[inline]
40fn fp_to_int(v: i32) -> i32 {
41 v >> FP_SHIFT as i32
42}
43
44#[inline]
46fn fp_mul(a: i32, b: i32) -> i32 {
47 let product = (a as i64).checked_mul(b as i64).unwrap_or(i64::MAX);
48 let shifted = product >> FP_SHIFT;
49 shifted.clamp(i32::MIN as i64, i32::MAX as i64) as i32
50}
51
52#[inline]
54fn fp_div(a: i32, b: i32) -> i32 {
55 if b == 0 {
56 return if a >= 0 { i32::MAX } else { i32::MIN };
57 }
58 let numerator = (a as i64) << FP_SHIFT;
59 (numerator / (b as i64)).clamp(i32::MIN as i64, i32::MAX as i64) as i32
60}
61
62#[inline]
65fn fp_lerp(a: i32, b: i32, t: i32) -> i32 {
66 a + fp_mul(t, b - a)
67}
68
69#[derive(Debug, Clone, Copy, PartialEq, Eq)]
75#[repr(u32)]
76pub enum Virgl3dResourceType {
77 Texture2D = 1,
79 Texture3D = 2,
81 TextureCube = 3,
83 Buffer = 4,
85 Renderbuffer = 5,
87 TextureArray = 6,
89}
90
91#[derive(Debug, Clone, Copy, PartialEq, Eq)]
93#[repr(u32)]
94pub enum VirglFormat {
95 B8G8R8A8Unorm = 1,
96 B8G8R8X8Unorm = 2,
97 R8G8B8A8Unorm = 3,
98 R8Unorm = 4,
99 R16G16B16A16Float = 5,
100 Z24UnormS8Uint = 6,
101 Z32Float = 7,
102 R32Uint = 8,
103}
104
105#[derive(Debug, Clone)]
107pub struct Virgl3dResource {
108 pub resource_id: u32,
109 pub resource_type: Virgl3dResourceType,
110 pub format: VirglFormat,
111 pub width: u32,
112 pub height: u32,
113 pub depth: u32,
114 pub array_size: u32,
115 pub last_level: u32,
116 pub nr_samples: u32,
117 pub bind_flags: u32,
118}
119
120#[derive(Debug)]
122pub struct VirglContext {
123 pub ctx_id: u32,
124 pub name: [u8; 64],
125 pub name_len: usize,
126 pub resources: Vec<u32>,
127 pub active: bool,
128}
129
130impl VirglContext {
131 pub fn new(ctx_id: u32, name: &[u8]) -> Self {
132 let mut name_buf = [0u8; 64];
133 let len = name.len().min(64);
134 name_buf[..len].copy_from_slice(&name[..len]);
135 Self {
136 ctx_id,
137 name: name_buf,
138 name_len: len,
139 resources: Vec::new(),
140 active: true,
141 }
142 }
143
144 pub(crate) fn attach_resource(&mut self, resource_id: u32) {
146 if !self.resources.contains(&resource_id) {
147 self.resources.push(resource_id);
148 }
149 }
150
151 pub(crate) fn detach_resource(&mut self, resource_id: u32) {
153 self.resources.retain(|&r| r != resource_id);
154 }
155}
156
157#[derive(Debug, Clone, PartialEq, Eq)]
159pub enum VirglCommand {
160 CreateResource3d {
162 resource_id: u32,
163 resource_type: Virgl3dResourceType,
164 format: VirglFormat,
165 width: u32,
166 height: u32,
167 depth: u32,
168 array_size: u32,
169 last_level: u32,
170 nr_samples: u32,
171 bind_flags: u32,
172 },
173 Transfer3d {
175 resource_id: u32,
176 level: u32,
177 x: u32,
178 y: u32,
179 z: u32,
180 width: u32,
181 height: u32,
182 depth: u32,
183 stride: u32,
184 layer_stride: u32,
185 direction: TransferDirection,
186 },
187 CtxCreate { ctx_id: u32, name_len: u32 },
189 CtxDestroy { ctx_id: u32 },
191 SubmitCommandBuffer { ctx_id: u32, data_len: u32 },
193 CreateFence { fence_id: u64, ctx_id: u32 },
195}
196
197#[derive(Debug, Clone, Copy, PartialEq, Eq)]
199pub enum TransferDirection {
200 ToHost,
202 FromHost,
204}
205
206#[derive(Debug)]
208pub struct VirglFence {
209 pub fence_id: u64,
210 pub ctx_id: u32,
211 pub signaled: AtomicBool,
212}
213
214impl VirglFence {
215 pub fn new(fence_id: u64, ctx_id: u32) -> Self {
216 Self {
217 fence_id,
218 ctx_id,
219 signaled: AtomicBool::new(false),
220 }
221 }
222
223 pub(crate) fn signal(&self) {
224 self.signaled.store(true, Ordering::Release);
225 }
226
227 pub(crate) fn is_signaled(&self) -> bool {
228 self.signaled.load(Ordering::Acquire)
229 }
230}
231
232pub struct VirglDriver {
234 pub contexts: Vec<VirglContext>,
235 pub resources: Vec<Virgl3dResource>,
236 pub fences: Vec<VirglFence>,
237 pub command_queue: Vec<VirglCommand>,
238 next_ctx_id: u32,
239 next_resource_id: u32,
240 next_fence_id: u64,
241}
242
243impl VirglDriver {
244 pub fn new() -> Self {
245 Self {
246 contexts: Vec::new(),
247 resources: Vec::new(),
248 fences: Vec::new(),
249 command_queue: Vec::new(),
250 next_ctx_id: 1,
251 next_resource_id: 1,
252 next_fence_id: 1,
253 }
254 }
255
256 pub(crate) fn create_context(&mut self, name: &[u8]) -> u32 {
258 let ctx_id = self.next_ctx_id;
259 self.next_ctx_id += 1;
260 let ctx = VirglContext::new(ctx_id, name);
261 self.command_queue.push(VirglCommand::CtxCreate {
262 ctx_id,
263 name_len: ctx.name_len as u32,
264 });
265 self.contexts.push(ctx);
266 ctx_id
267 }
268
269 pub(crate) fn destroy_context(&mut self, ctx_id: u32) -> bool {
271 if let Some(pos) = self.contexts.iter().position(|c| c.ctx_id == ctx_id) {
272 self.contexts[pos].active = false;
273 self.command_queue.push(VirglCommand::CtxDestroy { ctx_id });
274 self.contexts.remove(pos);
275 true
276 } else {
277 false
278 }
279 }
280
281 #[allow(clippy::too_many_arguments)]
283 pub(crate) fn create_resource_3d(
284 &mut self,
285 resource_type: Virgl3dResourceType,
286 format: VirglFormat,
287 width: u32,
288 height: u32,
289 depth: u32,
290 array_size: u32,
291 last_level: u32,
292 nr_samples: u32,
293 bind_flags: u32,
294 ) -> u32 {
295 let resource_id = self.next_resource_id;
296 self.next_resource_id += 1;
297
298 let resource = Virgl3dResource {
299 resource_id,
300 resource_type,
301 format,
302 width,
303 height,
304 depth,
305 array_size,
306 last_level,
307 nr_samples,
308 bind_flags,
309 };
310
311 self.command_queue.push(VirglCommand::CreateResource3d {
312 resource_id,
313 resource_type,
314 format,
315 width,
316 height,
317 depth,
318 array_size,
319 last_level,
320 nr_samples,
321 bind_flags,
322 });
323
324 self.resources.push(resource);
325 resource_id
326 }
327
328 #[allow(clippy::too_many_arguments)]
330 pub(crate) fn transfer_3d(
331 &mut self,
332 resource_id: u32,
333 level: u32,
334 x: u32,
335 y: u32,
336 z: u32,
337 width: u32,
338 height: u32,
339 depth: u32,
340 stride: u32,
341 layer_stride: u32,
342 direction: TransferDirection,
343 ) -> bool {
344 if !self.resources.iter().any(|r| r.resource_id == resource_id) {
345 return false;
346 }
347
348 self.command_queue.push(VirglCommand::Transfer3d {
349 resource_id,
350 level,
351 x,
352 y,
353 z,
354 width,
355 height,
356 depth,
357 stride,
358 layer_stride,
359 direction,
360 });
361 true
362 }
363
364 pub(crate) fn submit_command_buffer(&mut self, ctx_id: u32, data_len: u32) -> bool {
366 if !self.contexts.iter().any(|c| c.ctx_id == ctx_id && c.active) {
367 return false;
368 }
369 self.command_queue
370 .push(VirglCommand::SubmitCommandBuffer { ctx_id, data_len });
371 true
372 }
373
374 pub(crate) fn create_fence(&mut self, ctx_id: u32) -> u64 {
376 let fence_id = self.next_fence_id;
377 self.next_fence_id += 1;
378
379 self.command_queue
380 .push(VirglCommand::CreateFence { fence_id, ctx_id });
381 self.fences.push(VirglFence::new(fence_id, ctx_id));
382 fence_id
383 }
384
385 pub(crate) fn is_fence_signaled(&self, fence_id: u64) -> Option<bool> {
387 self.fences
388 .iter()
389 .find(|f| f.fence_id == fence_id)
390 .map(|f| f.is_signaled())
391 }
392
393 pub(crate) fn signal_fence(&self, fence_id: u64) -> bool {
395 if let Some(fence) = self.fences.iter().find(|f| f.fence_id == fence_id) {
396 fence.signal();
397 true
398 } else {
399 false
400 }
401 }
402
403 pub(crate) fn flush(&mut self) -> usize {
405 let count = self.command_queue.len();
406 self.command_queue.clear();
407 count
408 }
409
410 pub(crate) fn find_resource(&self, resource_id: u32) -> Option<&Virgl3dResource> {
412 self.resources.iter().find(|r| r.resource_id == resource_id)
413 }
414
415 pub(crate) fn destroy_resource(&mut self, resource_id: u32) -> bool {
417 for ctx in &mut self.contexts {
419 ctx.detach_resource(resource_id);
420 }
421 if let Some(pos) = self
422 .resources
423 .iter()
424 .position(|r| r.resource_id == resource_id)
425 {
426 self.resources.remove(pos);
427 true
428 } else {
429 false
430 }
431 }
432}
433
434impl Default for VirglDriver {
435 fn default() -> Self {
436 Self::new()
437 }
438}
439
440const MAX_VERTEX_ATTRIBS: usize = 8;
446
447const MAX_UNIFORMS: usize = 32;
449
450const MAX_TEXTURE_SIZE: u32 = 4096;
452
453#[derive(Debug, Clone, Copy, PartialEq, Eq)]
455pub enum PrimitiveType {
456 Triangles,
457 TriangleStrip,
458 TriangleFan,
459 Lines,
460 LineStrip,
461 Points,
462}
463
464#[derive(Debug, Clone, Copy, PartialEq, Eq)]
466pub enum BlendMode {
467 None,
468 Alpha,
469 Additive,
470 Multiply,
471}
472
473#[derive(Debug, Clone, Copy, PartialEq, Eq)]
475pub enum DepthFunc {
476 Never,
477 Less,
478 Equal,
479 LessEqual,
480 Greater,
481 NotEqual,
482 GreaterEqual,
483 Always,
484}
485
486#[derive(Debug, Clone, Copy, PartialEq, Eq)]
488pub enum TextureFilter {
489 Nearest,
490 Bilinear,
491}
492
493#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
496pub struct Vec4 {
497 pub x: i32,
498 pub y: i32,
499 pub z: i32,
500 pub w: i32,
501}
502
503impl Vec4 {
504 pub const ZERO: Self = Self {
505 x: 0,
506 y: 0,
507 z: 0,
508 w: 0,
509 };
510
511 pub const fn new(x: i32, y: i32, z: i32, w: i32) -> Self {
512 Self { x, y, z, w }
513 }
514
515 pub(crate) fn from_ints(x: i32, y: i32, z: i32, w: i32) -> Self {
517 Self {
518 x: fp_from_int(x),
519 y: fp_from_int(y),
520 z: fp_from_int(z),
521 w: fp_from_int(w),
522 }
523 }
524
525 pub(crate) fn lerp(a: &Vec4, b: &Vec4, t: i32) -> Vec4 {
527 Vec4 {
528 x: fp_lerp(a.x, b.x, t),
529 y: fp_lerp(a.y, b.y, t),
530 z: fp_lerp(a.z, b.z, t),
531 w: fp_lerp(a.w, b.w, t),
532 }
533 }
534}
535
536#[derive(Debug, Clone, Copy, Default)]
538pub struct Vertex {
539 pub position: Vec4,
541 pub color: Vec4,
543 pub texcoord_u: i32,
545 pub texcoord_v: i32,
546}
547
548pub struct SoftTexture {
550 pub width: u32,
551 pub height: u32,
552 pub pixels: Vec<u32>,
553 pub filter: TextureFilter,
554}
555
556impl SoftTexture {
557 pub fn new(width: u32, height: u32) -> Self {
558 let size = (width as usize).checked_mul(height as usize).unwrap_or(0);
559 Self {
560 width,
561 height,
562 pixels: vec![0u32; size],
563 filter: TextureFilter::Nearest,
564 }
565 }
566
567 pub(crate) fn sample(&self, u: i32, v: i32) -> u32 {
569 if self.width == 0 || self.height == 0 {
570 return 0;
571 }
572 match self.filter {
573 TextureFilter::Nearest => self.sample_nearest(u, v),
574 TextureFilter::Bilinear => self.sample_bilinear(u, v),
575 }
576 }
577
578 fn sample_nearest(&self, u: i32, v: i32) -> u32 {
579 let u_wrapped = ((u % FP_ONE) + FP_ONE) % FP_ONE;
581 let v_wrapped = ((v % FP_ONE) + FP_ONE) % FP_ONE;
582
583 let x = (fp_mul(u_wrapped, fp_from_int(self.width as i32)) >> FP_SHIFT) as u32;
584 let y = (fp_mul(v_wrapped, fp_from_int(self.height as i32)) >> FP_SHIFT) as u32;
585 let x = x.min(self.width.saturating_sub(1));
586 let y = y.min(self.height.saturating_sub(1));
587
588 let idx = (y as usize)
589 .checked_mul(self.width as usize)
590 .and_then(|v| v.checked_add(x as usize))
591 .unwrap_or(0);
592
593 self.pixels.get(idx).copied().unwrap_or(0)
594 }
595
596 fn sample_bilinear(&self, u: i32, v: i32) -> u32 {
597 if self.width < 2 || self.height < 2 {
598 return self.sample_nearest(u, v);
599 }
600
601 let u_wrapped = ((u % FP_ONE) + FP_ONE) % FP_ONE;
602 let v_wrapped = ((v % FP_ONE) + FP_ONE) % FP_ONE;
603
604 let tx = fp_mul(u_wrapped, fp_from_int(self.width as i32)) - (FP_ONE / 2);
606 let ty = fp_mul(v_wrapped, fp_from_int(self.height as i32)) - (FP_ONE / 2);
607
608 let x0 = (fp_to_int(tx).max(0) as u32).min(self.width - 2);
609 let y0 = (fp_to_int(ty).max(0) as u32).min(self.height - 2);
610 let x1 = x0 + 1;
611 let y1 = y0 + 1;
612
613 let fx = tx & (FP_ONE - 1); let fy = ty & (FP_ONE - 1);
616
617 let w = self.width as usize;
618 let p00 = self
619 .pixels
620 .get(y0 as usize * w + x0 as usize)
621 .copied()
622 .unwrap_or(0);
623 let p10 = self
624 .pixels
625 .get(y0 as usize * w + x1 as usize)
626 .copied()
627 .unwrap_or(0);
628 let p01 = self
629 .pixels
630 .get(y1 as usize * w + x0 as usize)
631 .copied()
632 .unwrap_or(0);
633 let p11 = self
634 .pixels
635 .get(y1 as usize * w + x1 as usize)
636 .copied()
637 .unwrap_or(0);
638
639 bilinear_pixel(p00, p10, p01, p11, fx, fy)
640 }
641}
642
643fn bilinear_pixel(p00: u32, p10: u32, p01: u32, p11: u32, fx: i32, fy: i32) -> u32 {
645 let inv_fx = FP_ONE - fx;
646 let inv_fy = FP_ONE - fy;
647
648 let mut result = 0u32;
649 for shift in [0u32, 8, 16, 24] {
650 let c00 = ((p00 >> shift) & 0xFF) as i32;
651 let c10 = ((p10 >> shift) & 0xFF) as i32;
652 let c01 = ((p01 >> shift) & 0xFF) as i32;
653 let c11 = ((p11 >> shift) & 0xFF) as i32;
654
655 let top = fp_mul(c00 << FP_SHIFT, inv_fx) + fp_mul(c10 << FP_SHIFT, fx);
657 let bot = fp_mul(c01 << FP_SHIFT, inv_fx) + fp_mul(c11 << FP_SHIFT, fx);
658 let val = fp_mul(top, inv_fy) + fp_mul(bot, fy);
659 let byte = fp_to_int(val).clamp(0, 255) as u32;
660 result |= byte << shift;
661 }
662 result
663}
664
665#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
667pub struct ScissorRect {
668 pub x: i32,
669 pub y: i32,
670 pub width: u32,
671 pub height: u32,
672}
673
674#[derive(Debug, Clone, Copy)]
676pub struct Viewport {
677 pub x: i32,
678 pub y: i32,
679 pub width: u32,
680 pub height: u32,
681 pub near: i32,
683 pub far: i32,
685}
686
687impl Default for Viewport {
688 fn default() -> Self {
689 Self {
690 x: 0,
691 y: 0,
692 width: 640,
693 height: 480,
694 near: 0,
695 far: FP_ONE,
696 }
697 }
698}
699
700#[derive(Debug, Clone)]
702pub struct ShaderState {
703 pub mvp: [Vec4; 4],
705 pub uniforms: Vec<Vec4>,
707 pub color_enabled: bool,
709 pub texcoord_enabled: bool,
711}
712
713impl Default for ShaderState {
714 fn default() -> Self {
715 Self {
716 mvp: [
717 Vec4::new(FP_ONE, 0, 0, 0),
718 Vec4::new(0, FP_ONE, 0, 0),
719 Vec4::new(0, 0, FP_ONE, 0),
720 Vec4::new(0, 0, 0, FP_ONE),
721 ],
722 uniforms: Vec::new(),
723 color_enabled: true,
724 texcoord_enabled: false,
725 }
726 }
727}
728
729pub struct SoftwareRasterizer {
731 pub color_buffer: Vec<u32>,
733 pub depth_buffer: Vec<i32>,
735 pub width: u32,
737 pub height: u32,
739 pub viewport: Viewport,
741 pub scissor: Option<ScissorRect>,
743 pub depth_func: DepthFunc,
745 pub depth_write: bool,
747 pub depth_test: bool,
749 pub blend_mode: BlendMode,
751 pub texture: Option<SoftTexture>,
753 pub shader: ShaderState,
755 pub clear_color: u32,
757 pub clear_depth: i32,
759}
760
761impl SoftwareRasterizer {
762 pub fn new(width: u32, height: u32) -> Self {
763 let pixel_count = (width as usize).checked_mul(height as usize).unwrap_or(0);
764 Self {
765 color_buffer: vec![0u32; pixel_count],
766 depth_buffer: vec![i32::MAX; pixel_count],
767 width,
768 height,
769 viewport: Viewport {
770 x: 0,
771 y: 0,
772 width,
773 height,
774 near: 0,
775 far: FP_ONE,
776 },
777 scissor: None,
778 depth_func: DepthFunc::Less,
779 depth_write: true,
780 depth_test: true,
781 blend_mode: BlendMode::None,
782 texture: None,
783 shader: ShaderState::default(),
784 clear_color: 0xFF000000,
785 clear_depth: i32::MAX,
786 }
787 }
788
789 pub(crate) fn clear_color_buffer(&mut self) {
791 for px in &mut self.color_buffer {
792 *px = self.clear_color;
793 }
794 }
795
796 pub(crate) fn clear_depth_buffer(&mut self) {
798 for d in &mut self.depth_buffer {
799 *d = self.clear_depth;
800 }
801 }
802
803 fn transform_vertex(&self, pos: &Vec4) -> Vec4 {
805 let m = &self.shader.mvp;
806 Vec4 {
807 x: fp_mul(m[0].x, pos.x)
808 + fp_mul(m[0].y, pos.y)
809 + fp_mul(m[0].z, pos.z)
810 + fp_mul(m[0].w, pos.w),
811 y: fp_mul(m[1].x, pos.x)
812 + fp_mul(m[1].y, pos.y)
813 + fp_mul(m[1].z, pos.z)
814 + fp_mul(m[1].w, pos.w),
815 z: fp_mul(m[2].x, pos.x)
816 + fp_mul(m[2].y, pos.y)
817 + fp_mul(m[2].z, pos.z)
818 + fp_mul(m[2].w, pos.w),
819 w: fp_mul(m[3].x, pos.x)
820 + fp_mul(m[3].y, pos.y)
821 + fp_mul(m[3].z, pos.z)
822 + fp_mul(m[3].w, pos.w),
823 }
824 }
825
826 fn viewport_transform(&self, ndc: &Vec4) -> Vec4 {
828 let vp = &self.viewport;
829 let half_w = fp_from_int(vp.width as i32) / 2;
830 let half_h = fp_from_int(vp.height as i32) / 2;
831
832 Vec4 {
833 x: fp_mul(ndc.x, half_w) + fp_from_int(vp.x) + half_w,
834 y: fp_mul(ndc.y, half_h) + fp_from_int(vp.y) + half_h,
835 z: fp_lerp(vp.near, vp.far, (ndc.z + FP_ONE) / 2),
836 w: ndc.w,
837 }
838 }
839
840 fn depth_test_pass(&self, new_z: i32, old_z: i32) -> bool {
842 match self.depth_func {
843 DepthFunc::Never => false,
844 DepthFunc::Less => new_z < old_z,
845 DepthFunc::Equal => new_z == old_z,
846 DepthFunc::LessEqual => new_z <= old_z,
847 DepthFunc::Greater => new_z > old_z,
848 DepthFunc::NotEqual => new_z != old_z,
849 DepthFunc::GreaterEqual => new_z >= old_z,
850 DepthFunc::Always => true,
851 }
852 }
853
854 fn scissor_test(&self, x: i32, y: i32) -> bool {
856 match &self.scissor {
857 None => true,
858 Some(s) => {
859 x >= s.x && x < s.x + s.width as i32 && y >= s.y && y < s.y + s.height as i32
860 }
861 }
862 }
863
864 fn alpha_blend(&self, src: u32, dst: u32) -> u32 {
866 match self.blend_mode {
867 BlendMode::None => src,
868 BlendMode::Alpha => {
869 let sa = (src >> 24) & 0xFF;
870 if sa == 255 {
871 return src;
872 }
873 if sa == 0 {
874 return dst;
875 }
876 let inv_sa = 255 - sa;
877 let mut result = 0u32;
878 for shift in [0u32, 8, 16] {
879 let sc = (src >> shift) & 0xFF;
880 let dc = (dst >> shift) & 0xFF;
881 let blended = (sc * sa + dc * inv_sa + 127) / 255;
883 result |= blended.min(255) << shift;
884 }
885 let da = (dst >> 24) & 0xFF;
887 let out_a = (sa + (da * inv_sa + 127) / 255).min(255);
888 result |= out_a << 24;
889 result
890 }
891 BlendMode::Additive => {
892 let mut result = 0u32;
893 for shift in [0u32, 8, 16, 24] {
894 let sc = (src >> shift) & 0xFF;
895 let dc = (dst >> shift) & 0xFF;
896 result |= (sc + dc).min(255) << shift;
897 }
898 result
899 }
900 BlendMode::Multiply => {
901 let mut result = 0u32;
902 for shift in [0u32, 8, 16] {
903 let sc = (src >> shift) & 0xFF;
904 let dc = (dst >> shift) & 0xFF;
905 result |= ((sc * dc + 127) / 255) << shift;
906 }
907 let sa = (src >> 24) & 0xFF;
909 let da = (dst >> 24) & 0xFF;
910 result |= sa.max(da) << 24;
911 result
912 }
913 }
914 }
915
916 fn write_fragment(&mut self, x: i32, y: i32, z: i32, color: u32) {
918 if x < 0 || y < 0 || x >= self.width as i32 || y >= self.height as i32 {
919 return;
920 }
921 if !self.scissor_test(x, y) {
922 return;
923 }
924
925 let idx = y as usize * self.width as usize + x as usize;
926 if idx >= self.color_buffer.len() {
927 return;
928 }
929
930 if self.depth_test {
931 if !self.depth_test_pass(z, self.depth_buffer[idx]) {
932 return;
933 }
934 if self.depth_write {
935 self.depth_buffer[idx] = z;
936 }
937 }
938
939 let final_color = if self.blend_mode != BlendMode::None {
940 self.alpha_blend(color, self.color_buffer[idx])
941 } else {
942 color
943 };
944 self.color_buffer[idx] = final_color;
945 }
946
947 fn vec4_to_argb(color: &Vec4) -> u32 {
949 let r = (fp_to_int(color.x).clamp(0, 255) as u32) & 0xFF;
950 let g = (fp_to_int(color.y).clamp(0, 255) as u32) & 0xFF;
951 let b = (fp_to_int(color.z).clamp(0, 255) as u32) & 0xFF;
952 let a = (fp_to_int(color.w).clamp(0, 255) as u32) & 0xFF;
953 (a << 24) | (r << 16) | (g << 8) | b
954 }
955
956 pub(crate) fn rasterize_triangle(&mut self, v0: &Vertex, v1: &Vertex, v2: &Vertex) {
958 let pos0 = self.transform_vertex(&v0.position);
960 let pos1 = self.transform_vertex(&v1.position);
961 let pos2 = self.transform_vertex(&v2.position);
962
963 let ndc0 = if pos0.w != 0 && pos0.w != FP_ONE {
965 Vec4::new(
966 fp_div(pos0.x, pos0.w),
967 fp_div(pos0.y, pos0.w),
968 fp_div(pos0.z, pos0.w),
969 FP_ONE,
970 )
971 } else {
972 pos0
973 };
974 let ndc1 = if pos1.w != 0 && pos1.w != FP_ONE {
975 Vec4::new(
976 fp_div(pos1.x, pos1.w),
977 fp_div(pos1.y, pos1.w),
978 fp_div(pos1.z, pos1.w),
979 FP_ONE,
980 )
981 } else {
982 pos1
983 };
984 let ndc2 = if pos2.w != 0 && pos2.w != FP_ONE {
985 Vec4::new(
986 fp_div(pos2.x, pos2.w),
987 fp_div(pos2.y, pos2.w),
988 fp_div(pos2.z, pos2.w),
989 FP_ONE,
990 )
991 } else {
992 pos2
993 };
994
995 let s0 = self.viewport_transform(&ndc0);
997 let s1 = self.viewport_transform(&ndc1);
998 let s2 = self.viewport_transform(&ndc2);
999
1000 let x0 = fp_to_int(s0.x);
1002 let y0 = fp_to_int(s0.y);
1003 let x1 = fp_to_int(s1.x);
1004 let y1 = fp_to_int(s1.y);
1005 let x2 = fp_to_int(s2.x);
1006 let y2 = fp_to_int(s2.y);
1007
1008 let min_x = x0.min(x1).min(x2).max(0);
1010 let max_x = x0.max(x1).max(x2).min(self.width as i32 - 1);
1011 let min_y = y0.min(y1).min(y2).max(0);
1012 let max_y = y0.max(y1).max(y2).min(self.height as i32 - 1);
1013
1014 if min_x > max_x || min_y > max_y {
1015 return;
1016 }
1017
1018 let area = (x1 - x0) as i64 * (y2 - y0) as i64 - (x2 - x0) as i64 * (y1 - y0) as i64;
1021 if area == 0 {
1022 return; }
1024
1025 let mut py = min_y;
1027 while py <= max_y {
1028 let mut px = min_x;
1029 while px <= max_x {
1030 let w0 = (px - x1) as i64 * (y2 - y1) as i64 - (py - y1) as i64 * (x2 - x1) as i64;
1032 let w1 = (px - x2) as i64 * (y0 - y2) as i64 - (py - y2) as i64 * (x0 - x2) as i64;
1033 let w2 = (px - x0) as i64 * (y1 - y0) as i64 - (py - y0) as i64 * (x1 - x0) as i64;
1034
1035 let inside = if area > 0 {
1037 w0 >= 0 && w1 >= 0 && w2 >= 0
1038 } else {
1039 w0 <= 0 && w1 <= 0 && w2 <= 0
1040 };
1041
1042 if inside {
1043 let bary0 = ((w0 << FP_SHIFT) / area) as i32;
1045 let bary1 = ((w1 << FP_SHIFT) / area) as i32;
1046 let bary2 = FP_ONE - bary0 - bary1;
1047
1048 let z = fp_mul(bary0, s0.z) + fp_mul(bary1, s1.z) + fp_mul(bary2, s2.z);
1050
1051 let color = Vec4 {
1053 x: fp_mul(bary0, v0.color.x)
1054 + fp_mul(bary1, v1.color.x)
1055 + fp_mul(bary2, v2.color.x),
1056 y: fp_mul(bary0, v0.color.y)
1057 + fp_mul(bary1, v1.color.y)
1058 + fp_mul(bary2, v2.color.y),
1059 z: fp_mul(bary0, v0.color.z)
1060 + fp_mul(bary1, v1.color.z)
1061 + fp_mul(bary2, v2.color.z),
1062 w: fp_mul(bary0, v0.color.w)
1063 + fp_mul(bary1, v1.color.w)
1064 + fp_mul(bary2, v2.color.w),
1065 };
1066
1067 let mut frag_color = Self::vec4_to_argb(&color);
1068
1069 if self.shader.texcoord_enabled {
1071 if let Some(ref tex) = self.texture {
1072 let u = fp_mul(bary0, v0.texcoord_u)
1073 + fp_mul(bary1, v1.texcoord_u)
1074 + fp_mul(bary2, v2.texcoord_u);
1075 let v = fp_mul(bary0, v0.texcoord_v)
1076 + fp_mul(bary1, v1.texcoord_v)
1077 + fp_mul(bary2, v2.texcoord_v);
1078 frag_color = tex.sample(u, v);
1079 }
1080 }
1081
1082 self.write_fragment(px, py, z, frag_color);
1083 }
1084 px += 1;
1085 }
1086 py += 1;
1087 }
1088 }
1089
1090 pub(crate) fn draw_triangles(&mut self, vertices: &[Vertex]) {
1092 let count = vertices.len() / 3;
1093 let mut i = 0;
1094 while i < count {
1095 let base = i * 3;
1096 self.rasterize_triangle(&vertices[base], &vertices[base + 1], &vertices[base + 2]);
1097 i += 1;
1098 }
1099 }
1100
1101 pub(crate) fn draw_indexed_triangles(&mut self, vertices: &[Vertex], indices: &[u32]) {
1103 let count = indices.len() / 3;
1104 let mut i = 0;
1105 while i < count {
1106 let i0 = indices[i * 3] as usize;
1107 let i1 = indices[i * 3 + 1] as usize;
1108 let i2 = indices[i * 3 + 2] as usize;
1109 if i0 < vertices.len() && i1 < vertices.len() && i2 < vertices.len() {
1110 self.rasterize_triangle(&vertices[i0], &vertices[i1], &vertices[i2]);
1111 }
1112 i += 1;
1113 }
1114 }
1115}
1116
1117#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1123pub enum MemoryDomain {
1124 Cpu,
1126 Gpu,
1128 Gtt,
1130 Shared,
1132}
1133
1134#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1136pub enum CacheMode {
1137 Cached,
1139 WriteCombine,
1141 Uncached,
1143}
1144
1145pub struct GemBufferObject {
1147 pub handle: u32,
1149 pub size: usize,
1151 pub domain: MemoryDomain,
1153 pub cache_mode: CacheMode,
1155 ref_count: AtomicU32,
1157 pub pinned: bool,
1159 pub data: Vec<u8>,
1161 pub name: [u8; 32],
1163 pub name_len: usize,
1164}
1165
1166impl GemBufferObject {
1167 pub fn new(handle: u32, size: usize) -> Self {
1168 Self {
1169 handle,
1170 size,
1171 domain: MemoryDomain::Cpu,
1172 cache_mode: CacheMode::Cached,
1173 ref_count: AtomicU32::new(1),
1174 pinned: false,
1175 data: vec![0u8; size],
1176 name: [0u8; 32],
1177 name_len: 0,
1178 }
1179 }
1180
1181 pub(crate) fn set_name(&mut self, name: &[u8]) {
1182 let len = name.len().min(32);
1183 self.name[..len].copy_from_slice(&name[..len]);
1184 self.name_len = len;
1185 }
1186
1187 pub(crate) fn ref_count(&self) -> u32 {
1188 self.ref_count.load(Ordering::Acquire)
1189 }
1190
1191 pub(crate) fn add_ref(&self) -> u32 {
1192 self.ref_count.fetch_add(1, Ordering::AcqRel) + 1
1193 }
1194
1195 pub(crate) fn release(&self) -> u32 {
1196 self.ref_count.fetch_sub(1, Ordering::AcqRel) - 1
1197 }
1198}
1199
1200pub struct GemManager {
1202 pub buffers: Vec<GemBufferObject>,
1203 next_handle: u32,
1204 pub total_allocated: usize,
1206 pub max_allocation: usize,
1208}
1209
1210impl GemManager {
1211 pub fn new(max_allocation: usize) -> Self {
1212 Self {
1213 buffers: Vec::new(),
1214 next_handle: 1,
1215 total_allocated: 0,
1216 max_allocation,
1217 }
1218 }
1219
1220 pub(crate) fn create_buffer(&mut self, size: usize) -> Option<u32> {
1222 if size == 0 || self.total_allocated + size > self.max_allocation {
1223 return None;
1224 }
1225
1226 let handle = self.next_handle;
1227 self.next_handle += 1;
1228 let bo = GemBufferObject::new(handle, size);
1229 self.total_allocated += size;
1230 self.buffers.push(bo);
1231 Some(handle)
1232 }
1233
1234 pub(crate) fn destroy_buffer(&mut self, handle: u32) -> bool {
1236 if let Some(pos) = self.buffers.iter().position(|b| b.handle == handle) {
1237 let remaining = self.buffers[pos].release();
1238 if remaining == 0 {
1239 let bo = self.buffers.remove(pos);
1240 self.total_allocated = self.total_allocated.saturating_sub(bo.size);
1241 return true;
1242 }
1243 }
1244 false
1245 }
1246
1247 pub(crate) fn find_buffer(&self, handle: u32) -> Option<&GemBufferObject> {
1249 self.buffers.iter().find(|b| b.handle == handle)
1250 }
1251
1252 pub(crate) fn find_buffer_mut(&mut self, handle: u32) -> Option<&mut GemBufferObject> {
1254 self.buffers.iter_mut().find(|b| b.handle == handle)
1255 }
1256
1257 pub(crate) fn pin_buffer(&mut self, handle: u32) -> bool {
1259 if let Some(bo) = self.find_buffer_mut(handle) {
1260 bo.pinned = true;
1261 true
1262 } else {
1263 false
1264 }
1265 }
1266
1267 pub(crate) fn unpin_buffer(&mut self, handle: u32) -> bool {
1269 if let Some(bo) = self.find_buffer_mut(handle) {
1270 bo.pinned = false;
1271 true
1272 } else {
1273 false
1274 }
1275 }
1276
1277 pub(crate) fn set_domain(&mut self, handle: u32, domain: MemoryDomain) -> bool {
1279 if let Some(bo) = self.find_buffer_mut(handle) {
1280 bo.domain = domain;
1281 true
1282 } else {
1283 false
1284 }
1285 }
1286
1287 pub(crate) fn set_cache_mode(&mut self, handle: u32, mode: CacheMode) -> bool {
1289 if let Some(bo) = self.find_buffer_mut(handle) {
1290 bo.cache_mode = mode;
1291 true
1292 } else {
1293 false
1294 }
1295 }
1296
1297 pub(crate) fn add_ref(&self, handle: u32) -> bool {
1299 if let Some(bo) = self.buffers.iter().find(|b| b.handle == handle) {
1300 bo.add_ref();
1301 true
1302 } else {
1303 false
1304 }
1305 }
1306
1307 pub(crate) fn buffer_count(&self) -> usize {
1309 self.buffers.len()
1310 }
1311}
1312
1313impl Default for GemManager {
1314 fn default() -> Self {
1315 Self::new(256 * 1024 * 1024) }
1317}
1318
1319#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1325pub enum ConnectorType {
1326 Hdmi,
1327 DisplayPort,
1328 Vga,
1329 Edp,
1330 Dvi,
1331 Lvds,
1332 Virtual,
1333}
1334
1335#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1337pub enum ConnectorStatus {
1338 Connected,
1339 Disconnected,
1340 Unknown,
1341}
1342
1343#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1348pub struct DisplayMode {
1349 pub hdisplay: u32,
1351 pub vdisplay: u32,
1353 pub clock_khz: u32,
1355 pub hsync_start: u32,
1357 pub hsync_end: u32,
1359 pub htotal: u32,
1361 pub vsync_start: u32,
1363 pub vsync_end: u32,
1365 pub vtotal: u32,
1367 pub vrefresh_mhz: u32,
1369}
1370
1371impl DisplayMode {
1372 pub(crate) fn mode_1080p60() -> Self {
1374 Self {
1375 hdisplay: 1920,
1376 vdisplay: 1080,
1377 clock_khz: 148500,
1378 hsync_start: 2008,
1379 hsync_end: 2052,
1380 htotal: 2200,
1381 vsync_start: 1084,
1382 vsync_end: 1089,
1383 vtotal: 1125,
1384 vrefresh_mhz: 60000,
1385 }
1386 }
1387
1388 pub(crate) fn mode_720p60() -> Self {
1390 Self {
1391 hdisplay: 1280,
1392 vdisplay: 720,
1393 clock_khz: 74250,
1394 hsync_start: 1390,
1395 hsync_end: 1430,
1396 htotal: 1650,
1397 vsync_start: 725,
1398 vsync_end: 730,
1399 vtotal: 750,
1400 vrefresh_mhz: 60000,
1401 }
1402 }
1403
1404 pub(crate) fn mode_wxga60() -> Self {
1406 Self {
1407 hdisplay: 1280,
1408 vdisplay: 800,
1409 clock_khz: 83500,
1410 hsync_start: 1352,
1411 hsync_end: 1480,
1412 htotal: 1680,
1413 vsync_start: 803,
1414 vsync_end: 809,
1415 vtotal: 831,
1416 vrefresh_mhz: 60000,
1417 }
1418 }
1419
1420 pub(crate) fn validate(&self) -> bool {
1422 if self.hdisplay == 0 || self.vdisplay == 0 {
1424 return false;
1425 }
1426 if self.htotal == 0 || self.vtotal == 0 {
1427 return false;
1428 }
1429 if self.hsync_start > self.hsync_end || self.hsync_end > self.htotal {
1431 return false;
1432 }
1433 if self.vsync_start > self.vsync_end || self.vsync_end > self.vtotal {
1435 return false;
1436 }
1437 if self.hdisplay > self.htotal || self.vdisplay > self.vtotal {
1439 return false;
1440 }
1441 if self.clock_khz == 0 {
1443 return false;
1444 }
1445 if self.hdisplay > 7680 || self.vdisplay > 4320 {
1447 return false;
1448 }
1449 true
1450 }
1451
1452 pub(crate) fn calculated_refresh_mhz(&self) -> u32 {
1454 if self.htotal == 0 || self.vtotal == 0 {
1455 return 0;
1456 }
1457 let total_pixels = (self.htotal as u64)
1461 .checked_mul(self.vtotal as u64)
1462 .unwrap_or(1);
1463 let numerator = (self.clock_khz as u64).checked_mul(1_000_000).unwrap_or(0);
1464 (numerator / total_pixels) as u32
1465 }
1466}
1467
1468#[derive(Debug, Clone)]
1470pub struct DrmFramebuffer {
1471 pub fb_id: u32,
1472 pub width: u32,
1474 pub height: u32,
1476 pub pitch: u32,
1478 pub offset: u32,
1480 pub format: PixelFormat,
1482 pub gem_handle: u32,
1484}
1485
1486impl DrmFramebuffer {
1487 pub fn new(fb_id: u32, width: u32, height: u32, format: PixelFormat, gem_handle: u32) -> Self {
1488 let pitch = width * format.bpp();
1489 Self {
1490 fb_id,
1491 width,
1492 height,
1493 pitch,
1494 offset: 0,
1495 format,
1496 gem_handle,
1497 }
1498 }
1499
1500 pub(crate) fn size_bytes(&self) -> usize {
1502 (self.pitch as usize)
1503 .checked_mul(self.height as usize)
1504 .unwrap_or(0)
1505 }
1506}
1507
1508#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1510pub enum EncoderType {
1511 None,
1512 Dac,
1513 Tmds,
1514 Lvds,
1515 DpMst,
1516 Virtual,
1517}
1518
1519#[derive(Debug, Clone)]
1521pub struct DrmEncoder {
1522 pub encoder_id: u32,
1523 pub encoder_type: EncoderType,
1524 pub crtc_id: Option<u32>,
1526 pub possible_crtcs: u32,
1528}
1529
1530#[derive(Debug, Clone)]
1532pub struct DrmConnector {
1533 pub connector_id: u32,
1534 pub connector_type: ConnectorType,
1535 pub status: ConnectorStatus,
1536 pub encoder_id: Option<u32>,
1538 pub modes: Vec<DisplayMode>,
1540}
1541
1542impl DrmConnector {
1543 pub fn new(connector_id: u32, connector_type: ConnectorType) -> Self {
1544 Self {
1545 connector_id,
1546 connector_type,
1547 status: ConnectorStatus::Disconnected,
1548 encoder_id: None,
1549 modes: Vec::new(),
1550 }
1551 }
1552}
1553
1554#[derive(Debug, Clone)]
1556pub struct DrmCrtc {
1557 pub crtc_id: u32,
1558 pub fb_id: Option<u32>,
1560 pub mode: Option<DisplayMode>,
1562 pub active: bool,
1564 pub gamma_size: u32,
1566}
1567
1568impl DrmCrtc {
1569 pub fn new(crtc_id: u32) -> Self {
1570 Self {
1571 crtc_id,
1572 fb_id: None,
1573 mode: None,
1574 active: false,
1575 gamma_size: 256,
1576 }
1577 }
1578}
1579
1580#[derive(Debug, Clone)]
1582pub struct AtomicCommit {
1583 pub crtc_id: u32,
1585 pub connector_id: u32,
1587 pub fb_id: u32,
1589 pub mode: DisplayMode,
1591 pub test_only: bool,
1593}
1594
1595pub struct KmsManager {
1597 pub crtcs: Vec<DrmCrtc>,
1598 pub connectors: Vec<DrmConnector>,
1599 pub encoders: Vec<DrmEncoder>,
1600 pub framebuffers: Vec<DrmFramebuffer>,
1601 next_crtc_id: u32,
1602 next_connector_id: u32,
1603 next_encoder_id: u32,
1604 next_fb_id: u32,
1605}
1606
1607impl KmsManager {
1608 pub fn new() -> Self {
1609 Self {
1610 crtcs: Vec::new(),
1611 connectors: Vec::new(),
1612 encoders: Vec::new(),
1613 framebuffers: Vec::new(),
1614 next_crtc_id: 1,
1615 next_connector_id: 1,
1616 next_encoder_id: 1,
1617 next_fb_id: 1,
1618 }
1619 }
1620
1621 pub(crate) fn add_crtc(&mut self) -> u32 {
1623 let id = self.next_crtc_id;
1624 self.next_crtc_id += 1;
1625 self.crtcs.push(DrmCrtc::new(id));
1626 id
1627 }
1628
1629 pub(crate) fn add_connector(&mut self, connector_type: ConnectorType) -> u32 {
1631 let id = self.next_connector_id;
1632 self.next_connector_id += 1;
1633 self.connectors.push(DrmConnector::new(id, connector_type));
1634 id
1635 }
1636
1637 pub(crate) fn add_encoder(&mut self, encoder_type: EncoderType, possible_crtcs: u32) -> u32 {
1639 let id = self.next_encoder_id;
1640 self.next_encoder_id += 1;
1641 self.encoders.push(DrmEncoder {
1642 encoder_id: id,
1643 encoder_type,
1644 crtc_id: None,
1645 possible_crtcs,
1646 });
1647 id
1648 }
1649
1650 pub(crate) fn create_framebuffer(
1652 &mut self,
1653 width: u32,
1654 height: u32,
1655 format: PixelFormat,
1656 gem_handle: u32,
1657 ) -> u32 {
1658 let id = self.next_fb_id;
1659 self.next_fb_id += 1;
1660 self.framebuffers
1661 .push(DrmFramebuffer::new(id, width, height, format, gem_handle));
1662 id
1663 }
1664
1665 pub(crate) fn destroy_framebuffer(&mut self, fb_id: u32) -> bool {
1667 if let Some(pos) = self.framebuffers.iter().position(|f| f.fb_id == fb_id) {
1668 self.framebuffers.remove(pos);
1669 true
1670 } else {
1671 false
1672 }
1673 }
1674
1675 pub(crate) fn bind_encoder(&mut self, encoder_id: u32, crtc_id: u32) -> bool {
1677 if !self.crtcs.iter().any(|c| c.crtc_id == crtc_id) {
1679 return false;
1680 }
1681 if let Some(enc) = self
1682 .encoders
1683 .iter_mut()
1684 .find(|e| e.encoder_id == encoder_id)
1685 {
1686 let crtc_idx = self
1688 .crtcs
1689 .iter()
1690 .position(|c| c.crtc_id == crtc_id)
1691 .unwrap_or(0);
1692 if (enc.possible_crtcs >> crtc_idx) & 1 == 0 {
1693 return false;
1694 }
1695 enc.crtc_id = Some(crtc_id);
1696 true
1697 } else {
1698 false
1699 }
1700 }
1701
1702 pub(crate) fn connect_encoder(&mut self, connector_id: u32, encoder_id: u32) -> bool {
1704 if !self.encoders.iter().any(|e| e.encoder_id == encoder_id) {
1705 return false;
1706 }
1707 if let Some(conn) = self
1708 .connectors
1709 .iter_mut()
1710 .find(|c| c.connector_id == connector_id)
1711 {
1712 conn.encoder_id = Some(encoder_id);
1713 true
1714 } else {
1715 false
1716 }
1717 }
1718
1719 pub(crate) fn set_connector_status(
1721 &mut self,
1722 connector_id: u32,
1723 status: ConnectorStatus,
1724 modes: Vec<DisplayMode>,
1725 ) -> bool {
1726 if let Some(conn) = self
1727 .connectors
1728 .iter_mut()
1729 .find(|c| c.connector_id == connector_id)
1730 {
1731 conn.status = status;
1732 conn.modes = modes;
1733 true
1734 } else {
1735 false
1736 }
1737 }
1738
1739 pub(crate) fn atomic_commit(&mut self, commit: &AtomicCommit) -> bool {
1741 if !commit.mode.validate() {
1743 return false;
1744 }
1745
1746 if !self.framebuffers.iter().any(|f| f.fb_id == commit.fb_id) {
1748 return false;
1749 }
1750
1751 if !self
1753 .connectors
1754 .iter()
1755 .any(|c| c.connector_id == commit.connector_id)
1756 {
1757 return false;
1758 }
1759
1760 if commit.test_only {
1761 return true; }
1763
1764 if let Some(crtc) = self.crtcs.iter_mut().find(|c| c.crtc_id == commit.crtc_id) {
1766 crtc.fb_id = Some(commit.fb_id);
1767 crtc.mode = Some(commit.mode);
1768 crtc.active = true;
1769 true
1770 } else {
1771 false
1772 }
1773 }
1774
1775 pub(crate) fn find_framebuffer(&self, fb_id: u32) -> Option<&DrmFramebuffer> {
1777 self.framebuffers.iter().find(|f| f.fb_id == fb_id)
1778 }
1779
1780 pub(crate) fn find_crtc(&self, crtc_id: u32) -> Option<&DrmCrtc> {
1782 self.crtcs.iter().find(|c| c.crtc_id == crtc_id)
1783 }
1784}
1785
1786impl Default for KmsManager {
1787 fn default() -> Self {
1788 Self::new()
1789 }
1790}
1791
1792#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1798pub enum FlipState {
1799 Idle,
1801 Pending,
1803 Completed,
1805}
1806
1807#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1809pub struct VblankEvent {
1810 pub sequence: u64,
1812 pub timestamp_ns: u64,
1814 pub crtc_id: u32,
1816}
1817
1818#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1820pub struct PageFlipRequest {
1821 pub crtc_id: u32,
1823 pub fb_id: u32,
1825 pub user_data: u64,
1827}
1828
1829pub struct PageFlipManager {
1831 pub front_buffers: Vec<(u32, u32)>, pub back_buffers: Vec<(u32, u32)>,
1835 pub pending_flips: Vec<PageFlipRequest>,
1837 pub flip_states: Vec<(u32, FlipState)>,
1839 pub vblank_counters: Vec<(u32, AtomicU64)>,
1841 pub vblank_timestamps: Vec<(u32, AtomicU64)>,
1843 pub vblank_events: Vec<VblankEvent>,
1845}
1846
1847impl PageFlipManager {
1848 pub fn new() -> Self {
1849 Self {
1850 front_buffers: Vec::new(),
1851 back_buffers: Vec::new(),
1852 pending_flips: Vec::new(),
1853 flip_states: Vec::new(),
1854 vblank_counters: Vec::new(),
1855 vblank_timestamps: Vec::new(),
1856 vblank_events: Vec::new(),
1857 }
1858 }
1859
1860 pub(crate) fn register_crtc(&mut self, crtc_id: u32, initial_fb: u32) {
1862 if self.front_buffers.iter().any(|(id, _)| *id == crtc_id) {
1864 return;
1865 }
1866 self.front_buffers.push((crtc_id, initial_fb));
1867 self.back_buffers.push((crtc_id, 0));
1868 self.flip_states.push((crtc_id, FlipState::Idle));
1869 self.vblank_counters.push((crtc_id, AtomicU64::new(0)));
1870 self.vblank_timestamps.push((crtc_id, AtomicU64::new(0)));
1871 }
1872
1873 pub(crate) fn request_flip(&mut self, request: PageFlipRequest) -> bool {
1875 let state_idx = self
1877 .flip_states
1878 .iter()
1879 .position(|(id, _)| *id == request.crtc_id);
1880 let state_idx = match state_idx {
1881 Some(i) => i,
1882 None => return false,
1883 };
1884
1885 if self.flip_states[state_idx].1 == FlipState::Pending {
1887 return false;
1888 }
1889
1890 if let Some(back) = self
1892 .back_buffers
1893 .iter_mut()
1894 .find(|(id, _)| *id == request.crtc_id)
1895 {
1896 back.1 = request.fb_id;
1897 }
1898 self.flip_states[state_idx].1 = FlipState::Pending;
1899 self.pending_flips.push(request);
1900 true
1901 }
1902
1903 pub(crate) fn handle_vblank(&mut self, crtc_id: u32, timestamp_ns: u64) {
1905 if let Some((_, counter)) = self.vblank_counters.iter().find(|(id, _)| *id == crtc_id) {
1907 counter.fetch_add(1, Ordering::Relaxed);
1908 }
1909
1910 if let Some((_, ts)) = self.vblank_timestamps.iter().find(|(id, _)| *id == crtc_id) {
1912 ts.store(timestamp_ns, Ordering::Relaxed);
1913 }
1914
1915 let mut completed_indices = Vec::new();
1917 for (i, flip) in self.pending_flips.iter().enumerate() {
1918 if flip.crtc_id == crtc_id {
1919 if let Some(front) = self.front_buffers.iter_mut().find(|(id, _)| *id == crtc_id) {
1921 if let Some(back) = self.back_buffers.iter().find(|(id, _)| *id == crtc_id) {
1922 front.1 = back.1;
1923 }
1924 }
1925
1926 if let Some(state) = self.flip_states.iter_mut().find(|(id, _)| *id == crtc_id) {
1928 state.1 = FlipState::Completed;
1929 }
1930
1931 let seq = self
1933 .vblank_counters
1934 .iter()
1935 .find(|(id, _)| *id == crtc_id)
1936 .map(|(_, c)| c.load(Ordering::Relaxed))
1937 .unwrap_or(0);
1938
1939 self.vblank_events.push(VblankEvent {
1940 sequence: seq,
1941 timestamp_ns,
1942 crtc_id,
1943 });
1944
1945 completed_indices.push(i);
1946 }
1947 }
1948
1949 for &idx in completed_indices.iter().rev() {
1951 if idx < self.pending_flips.len() {
1952 self.pending_flips.remove(idx);
1953 }
1954 }
1955
1956 if let Some(state) = self.flip_states.iter_mut().find(|(id, _)| *id == crtc_id) {
1958 if state.1 == FlipState::Completed {
1959 state.1 = FlipState::Idle;
1960 }
1961 }
1962 }
1963
1964 pub(crate) fn front_buffer(&self, crtc_id: u32) -> Option<u32> {
1966 self.front_buffers
1967 .iter()
1968 .find(|(id, _)| *id == crtc_id)
1969 .map(|(_, fb)| *fb)
1970 }
1971
1972 pub(crate) fn vblank_count(&self, crtc_id: u32) -> Option<u64> {
1974 self.vblank_counters
1975 .iter()
1976 .find(|(id, _)| *id == crtc_id)
1977 .map(|(_, c)| c.load(Ordering::Relaxed))
1978 }
1979
1980 pub(crate) fn vblank_timestamp(&self, crtc_id: u32) -> Option<u64> {
1982 self.vblank_timestamps
1983 .iter()
1984 .find(|(id, _)| *id == crtc_id)
1985 .map(|(_, ts)| ts.load(Ordering::Relaxed))
1986 }
1987
1988 pub(crate) fn drain_events(&mut self) -> Vec<VblankEvent> {
1990 core::mem::take(&mut self.vblank_events)
1991 }
1992
1993 pub(crate) fn is_flip_pending(&self, crtc_id: u32) -> bool {
1995 self.flip_states
1996 .iter()
1997 .find(|(id, _)| *id == crtc_id)
1998 .map(|(_, s)| *s == FlipState::Pending)
1999 .unwrap_or(false)
2000 }
2001}
2002
2003impl Default for PageFlipManager {
2004 fn default() -> Self {
2005 Self::new()
2006 }
2007}
2008
2009const MAX_CURSOR_SIZE: u32 = 64;
2015
2016#[derive(Debug, Clone, Copy, PartialEq, Eq)]
2018pub enum CursorFormat {
2019 Argb32x32,
2021 Argb64x64,
2023}
2024
2025impl CursorFormat {
2026 pub(crate) fn width(&self) -> u32 {
2027 match self {
2028 CursorFormat::Argb32x32 => 32,
2029 CursorFormat::Argb64x64 => 64,
2030 }
2031 }
2032
2033 pub(crate) fn height(&self) -> u32 {
2034 self.width() }
2036
2037 pub(crate) fn pixel_count(&self) -> usize {
2038 let w = self.width() as usize;
2039 w.checked_mul(w).unwrap_or(0)
2040 }
2041}
2042
2043pub struct HardwareCursor {
2045 pub image: Vec<u32>,
2047 pub format: CursorFormat,
2049 pub x: i32,
2051 pub y: i32,
2053 pub hotspot_x: u32,
2055 pub hotspot_y: u32,
2057 pub enabled: bool,
2059 pub dirty: bool,
2061}
2062
2063impl HardwareCursor {
2064 pub fn new(format: CursorFormat) -> Self {
2065 let pixel_count = format.pixel_count();
2066 Self {
2067 image: vec![0u32; pixel_count],
2068 format,
2069 x: 0,
2070 y: 0,
2071 hotspot_x: 0,
2072 hotspot_y: 0,
2073 enabled: false,
2074 dirty: true,
2075 }
2076 }
2077
2078 pub(crate) fn set_image(&mut self, pixels: &[u32]) -> bool {
2080 let expected = self.format.pixel_count();
2081 if pixels.len() != expected {
2082 return false;
2083 }
2084 self.image.copy_from_slice(pixels);
2085 self.dirty = true;
2086 true
2087 }
2088
2089 pub(crate) fn set_position(&mut self, x: i32, y: i32) {
2091 self.x = x;
2092 self.y = y;
2093 self.dirty = true;
2094 }
2095
2096 pub(crate) fn set_hotspot(&mut self, x: u32, y: u32) {
2098 self.hotspot_x = x.min(self.format.width().saturating_sub(1));
2099 self.hotspot_y = y.min(self.format.height().saturating_sub(1));
2100 self.dirty = true;
2101 }
2102
2103 pub(crate) fn enable(&mut self) {
2105 self.enabled = true;
2106 self.dirty = true;
2107 }
2108
2109 pub(crate) fn disable(&mut self) {
2111 self.enabled = false;
2112 self.dirty = true;
2113 }
2114
2115 pub(crate) fn composite_onto(&self, target: &mut [u32], target_width: u32, target_height: u32) {
2120 if !self.enabled {
2121 return;
2122 }
2123
2124 let cursor_w = self.format.width() as i32;
2125 let cursor_h = self.format.height() as i32;
2126 let draw_x = self.x - self.hotspot_x as i32;
2127 let draw_y = self.y - self.hotspot_y as i32;
2128
2129 let mut cy = 0i32;
2130 while cy < cursor_h {
2131 let screen_y = draw_y + cy;
2132 if screen_y >= 0 && screen_y < target_height as i32 {
2133 let mut cx = 0i32;
2134 while cx < cursor_w {
2135 let screen_x = draw_x + cx;
2136 if screen_x >= 0 && screen_x < target_width as i32 {
2137 let cursor_idx = cy as usize * cursor_w as usize + cx as usize;
2138 let target_idx =
2139 screen_y as usize * target_width as usize + screen_x as usize;
2140
2141 if cursor_idx < self.image.len() && target_idx < target.len() {
2142 let src = self.image[cursor_idx];
2143 let sa = (src >> 24) & 0xFF;
2144
2145 if sa == 255 {
2146 target[target_idx] = src;
2147 } else if sa > 0 {
2148 let dst = target[target_idx];
2149 let inv_sa = 255 - sa;
2150 let mut result = 0u32;
2151 let mut shift = 0u32;
2153 while shift <= 16 {
2154 let sc = (src >> shift) & 0xFF;
2155 let dc = (dst >> shift) & 0xFF;
2156 let blended = (sc * sa + dc * inv_sa + 127) / 255;
2157 result |= blended.min(255) << shift;
2158 shift += 8;
2159 }
2160 let da = (dst >> 24) & 0xFF;
2162 let out_a = (sa + (da * inv_sa + 127) / 255).min(255);
2163 result |= out_a << 24;
2164 target[target_idx] = result;
2165 }
2166 }
2167 }
2168 cx += 1;
2169 }
2170 }
2171 cy += 1;
2172 }
2173 }
2174
2175 pub(crate) fn load_default_arrow(&mut self) {
2177 let w = self.format.width() as usize;
2179 let _h = self.format.height() as usize;
2180
2181 for px in &mut self.image {
2183 *px = 0x00000000;
2184 }
2185
2186 let arrow_height = 16.min(w);
2188 let mut row = 0usize;
2189 while row < arrow_height {
2190 let row_width = row + 1;
2191 let mut col = 0usize;
2192 while col < row_width && col < w {
2193 let idx = row * w + col;
2194 if idx < self.image.len() {
2195 if col == 0 || col == row_width - 1 || row == arrow_height - 1 {
2197 self.image[idx] = 0xFF000000; } else {
2199 self.image[idx] = 0xFFFFFFFF; }
2201 }
2202 col += 1;
2203 }
2204 row += 1;
2205 }
2206
2207 self.hotspot_x = 0;
2208 self.hotspot_y = 0;
2209 self.dirty = true;
2210 }
2211}
2212
2213impl Default for HardwareCursor {
2214 fn default() -> Self {
2215 Self::new(CursorFormat::Argb32x32)
2216 }
2217}
2218
2219static VIRGL_DRIVER: Mutex<Option<VirglDriver>> = Mutex::new(None);
2224static GEM_MANAGER: Mutex<Option<GemManager>> = Mutex::new(None);
2225static KMS_MANAGER: Mutex<Option<KmsManager>> = Mutex::new(None);
2226static PAGE_FLIP_MANAGER: Mutex<Option<PageFlipManager>> = Mutex::new(None);
2227static HARDWARE_CURSOR: Mutex<Option<HardwareCursor>> = Mutex::new(None);
2228
2229pub fn init() {
2231 *VIRGL_DRIVER.lock() = Some(VirglDriver::new());
2232 *GEM_MANAGER.lock() = Some(GemManager::default());
2233 *KMS_MANAGER.lock() = Some(KmsManager::new());
2234 *PAGE_FLIP_MANAGER.lock() = Some(PageFlipManager::new());
2235 *HARDWARE_CURSOR.lock() = Some(HardwareCursor::default());
2236}
2237
2238pub fn with_virgl<R, F: FnOnce(&mut VirglDriver) -> R>(f: F) -> Option<R> {
2240 VIRGL_DRIVER.lock().as_mut().map(f)
2241}
2242
2243pub fn with_gem<R, F: FnOnce(&mut GemManager) -> R>(f: F) -> Option<R> {
2245 GEM_MANAGER.lock().as_mut().map(f)
2246}
2247
2248pub fn with_kms<R, F: FnOnce(&mut KmsManager) -> R>(f: F) -> Option<R> {
2250 KMS_MANAGER.lock().as_mut().map(f)
2251}
2252
2253pub fn with_page_flip<R, F: FnOnce(&mut PageFlipManager) -> R>(f: F) -> Option<R> {
2255 PAGE_FLIP_MANAGER.lock().as_mut().map(f)
2256}
2257
2258pub fn with_cursor<R, F: FnOnce(&mut HardwareCursor) -> R>(f: F) -> Option<R> {
2260 HARDWARE_CURSOR.lock().as_mut().map(f)
2261}
2262
2263#[cfg(test)]
2268mod tests {
2269 #[allow(unused_imports)]
2270 use alloc::vec;
2271
2272 use super::*;
2273
2274 #[test]
2277 fn test_fp_from_int_and_back() {
2278 assert_eq!(fp_to_int(fp_from_int(42)), 42);
2279 assert_eq!(fp_to_int(fp_from_int(-7)), -7);
2280 assert_eq!(fp_to_int(fp_from_int(0)), 0);
2281 }
2282
2283 #[test]
2284 fn test_fp_mul_basic() {
2285 let two = fp_from_int(2);
2287 let three = fp_from_int(3);
2288 let result = fp_mul(two, three);
2289 assert_eq!(fp_to_int(result), 6);
2290 }
2291
2292 #[test]
2293 fn test_fp_mul_fractional() {
2294 let half = FP_ONE / 2;
2296 let quarter = fp_mul(half, half);
2297 assert_eq!(quarter, FP_ONE / 4);
2298 }
2299
2300 #[test]
2301 fn test_fp_div_basic() {
2302 let six = fp_from_int(6);
2304 let three = fp_from_int(3);
2305 assert_eq!(fp_to_int(fp_div(six, three)), 2);
2306 }
2307
2308 #[test]
2309 fn test_fp_div_by_zero() {
2310 let result = fp_div(fp_from_int(1), 0);
2311 assert_eq!(result, i32::MAX);
2312 }
2313
2314 #[test]
2315 fn test_fp_lerp() {
2316 let a = fp_from_int(0);
2317 let b = fp_from_int(10);
2318 let mid = fp_lerp(a, b, FP_ONE / 2);
2319 assert_eq!(fp_to_int(mid), 5);
2320 }
2321
2322 #[test]
2325 fn test_virgl_create_context() {
2326 let mut driver = VirglDriver::new();
2327 let ctx_id = driver.create_context(b"test_ctx");
2328 assert_eq!(ctx_id, 1);
2329 assert_eq!(driver.contexts.len(), 1);
2330 assert!(driver.contexts[0].active);
2331 }
2332
2333 #[test]
2334 fn test_virgl_destroy_context() {
2335 let mut driver = VirglDriver::new();
2336 let ctx_id = driver.create_context(b"test");
2337 assert!(driver.destroy_context(ctx_id));
2338 assert_eq!(driver.contexts.len(), 0);
2339 assert!(!driver.destroy_context(ctx_id)); }
2341
2342 #[test]
2343 fn test_virgl_create_resource_3d() {
2344 let mut driver = VirglDriver::new();
2345 let rid = driver.create_resource_3d(
2346 Virgl3dResourceType::Texture2D,
2347 VirglFormat::R8G8B8A8Unorm,
2348 256,
2349 256,
2350 1,
2351 1,
2352 0,
2353 1,
2354 0,
2355 );
2356 assert_eq!(rid, 1);
2357 let res = driver.find_resource(rid).unwrap();
2358 assert_eq!(res.width, 256);
2359 assert_eq!(res.resource_type, Virgl3dResourceType::Texture2D);
2360 }
2361
2362 #[test]
2363 fn test_virgl_transfer_3d() {
2364 let mut driver = VirglDriver::new();
2365 let rid = driver.create_resource_3d(
2366 Virgl3dResourceType::Buffer,
2367 VirglFormat::R32Uint,
2368 1024,
2369 1,
2370 1,
2371 1,
2372 0,
2373 1,
2374 0,
2375 );
2376 assert!(driver.transfer_3d(
2377 rid,
2378 0,
2379 0,
2380 0,
2381 0,
2382 512,
2383 1,
2384 1,
2385 4096,
2386 0,
2387 TransferDirection::ToHost
2388 ));
2389 assert!(!driver.transfer_3d(999, 0, 0, 0, 0, 1, 1, 1, 0, 0, TransferDirection::ToHost));
2390 }
2391
2392 #[test]
2393 fn test_virgl_fence() {
2394 let mut driver = VirglDriver::new();
2395 let ctx_id = driver.create_context(b"fence_ctx");
2396 let fence_id = driver.create_fence(ctx_id);
2397 assert_eq!(driver.is_fence_signaled(fence_id), Some(false));
2398 assert!(driver.signal_fence(fence_id));
2399 assert_eq!(driver.is_fence_signaled(fence_id), Some(true));
2400 }
2401
2402 #[test]
2403 fn test_virgl_flush() {
2404 let mut driver = VirglDriver::new();
2405 driver.create_context(b"a");
2406 driver.create_resource_3d(
2407 Virgl3dResourceType::Buffer,
2408 VirglFormat::R32Uint,
2409 64,
2410 1,
2411 1,
2412 1,
2413 0,
2414 1,
2415 0,
2416 );
2417 let count = driver.flush();
2418 assert!(count >= 2); assert_eq!(driver.command_queue.len(), 0);
2420 }
2421
2422 #[test]
2425 fn test_rasterizer_clear() {
2426 let mut rast = SoftwareRasterizer::new(4, 4);
2427 rast.clear_color = 0xFFFF0000;
2428 rast.clear_color_buffer();
2429 assert_eq!(rast.color_buffer[0], 0xFFFF0000);
2430 assert_eq!(rast.color_buffer[15], 0xFFFF0000);
2431 }
2432
2433 #[test]
2434 fn test_rasterizer_depth_test() {
2435 let rast = SoftwareRasterizer::new(4, 4);
2436 assert!(rast.depth_test_pass(5, 10)); assert!(!rast.depth_test_pass(10, 5)); }
2439
2440 #[test]
2441 fn test_rasterizer_scissor() {
2442 let mut rast = SoftwareRasterizer::new(100, 100);
2443 rast.scissor = Some(ScissorRect {
2444 x: 10,
2445 y: 10,
2446 width: 20,
2447 height: 20,
2448 });
2449 assert!(rast.scissor_test(15, 15));
2450 assert!(!rast.scissor_test(5, 5));
2451 assert!(!rast.scissor_test(35, 35));
2452 }
2453
2454 #[test]
2455 fn test_rasterizer_alpha_blend() {
2456 let mut rast = SoftwareRasterizer::new(1, 1);
2457 rast.blend_mode = BlendMode::Alpha;
2458 let result = rast.alpha_blend(0xFFFF0000, 0xFF00FF00);
2460 assert_eq!(result, 0xFFFF0000);
2461 let result = rast.alpha_blend(0x00FF0000, 0xFF00FF00);
2463 assert_eq!(result, 0xFF00FF00);
2464 }
2465
2466 #[test]
2467 fn test_texture_nearest_sampling() {
2468 let mut tex = SoftTexture::new(2, 2);
2469 tex.pixels[0] = 0xFFFF0000; tex.pixels[1] = 0xFF00FF00; tex.pixels[2] = 0xFF0000FF; tex.pixels[3] = 0xFFFFFFFF; tex.filter = TextureFilter::Nearest;
2474 let c = tex.sample(0, 0);
2476 assert_eq!(c, 0xFFFF0000);
2477 }
2478
2479 #[test]
2480 fn test_vec4_lerp() {
2481 let a = Vec4::from_ints(0, 0, 0, 0);
2482 let b = Vec4::from_ints(10, 20, 30, 40);
2483 let mid = Vec4::lerp(&a, &b, FP_ONE / 2);
2484 assert_eq!(fp_to_int(mid.x), 5);
2485 assert_eq!(fp_to_int(mid.y), 10);
2486 }
2487
2488 #[test]
2491 fn test_gem_create_destroy() {
2492 let mut gem = GemManager::new(1024);
2493 let h = gem.create_buffer(256).unwrap();
2494 assert_eq!(gem.buffer_count(), 1);
2495 assert_eq!(gem.total_allocated, 256);
2496 assert!(gem.destroy_buffer(h));
2497 assert_eq!(gem.buffer_count(), 0);
2498 assert_eq!(gem.total_allocated, 0);
2499 }
2500
2501 #[test]
2502 fn test_gem_over_allocation() {
2503 let mut gem = GemManager::new(100);
2504 assert!(gem.create_buffer(50).is_some());
2505 assert!(gem.create_buffer(50).is_some());
2506 assert!(gem.create_buffer(1).is_none()); }
2508
2509 #[test]
2510 fn test_gem_pin_unpin() {
2511 let mut gem = GemManager::new(1024);
2512 let h = gem.create_buffer(64).unwrap();
2513 assert!(gem.pin_buffer(h));
2514 assert!(gem.find_buffer(h).unwrap().pinned);
2515 assert!(gem.unpin_buffer(h));
2516 assert!(!gem.find_buffer(h).unwrap().pinned);
2517 }
2518
2519 #[test]
2520 fn test_gem_domain_change() {
2521 let mut gem = GemManager::new(1024);
2522 let h = gem.create_buffer(64).unwrap();
2523 assert_eq!(gem.find_buffer(h).unwrap().domain, MemoryDomain::Cpu);
2524 assert!(gem.set_domain(h, MemoryDomain::Gpu));
2525 assert_eq!(gem.find_buffer(h).unwrap().domain, MemoryDomain::Gpu);
2526 }
2527
2528 #[test]
2529 fn test_gem_ref_counting() {
2530 let mut gem = GemManager::new(1024);
2531 let h = gem.create_buffer(64).unwrap();
2532 assert_eq!(gem.find_buffer(h).unwrap().ref_count(), 1);
2533 gem.add_ref(h);
2534 assert_eq!(gem.find_buffer(h).unwrap().ref_count(), 2);
2535 assert!(!gem.destroy_buffer(h));
2537 assert_eq!(gem.buffer_count(), 1); assert!(gem.destroy_buffer(h));
2540 assert_eq!(gem.buffer_count(), 0);
2541 }
2542
2543 #[test]
2546 fn test_display_mode_validation() {
2547 let mode = DisplayMode::mode_1080p60();
2548 assert!(mode.validate());
2549
2550 let bad_mode = DisplayMode {
2551 hdisplay: 0,
2552 vdisplay: 0,
2553 clock_khz: 0,
2554 hsync_start: 0,
2555 hsync_end: 0,
2556 htotal: 0,
2557 vsync_start: 0,
2558 vsync_end: 0,
2559 vtotal: 0,
2560 vrefresh_mhz: 0,
2561 };
2562 assert!(!bad_mode.validate());
2563 }
2564
2565 #[test]
2566 fn test_kms_atomic_commit() {
2567 let mut kms = KmsManager::new();
2568 let crtc_id = kms.add_crtc();
2569 let conn_id = kms.add_connector(ConnectorType::Hdmi);
2570 let fb_id = kms.create_framebuffer(1920, 1080, PixelFormat::Xrgb8888, 1);
2571
2572 let commit = AtomicCommit {
2573 crtc_id,
2574 connector_id: conn_id,
2575 fb_id,
2576 mode: DisplayMode::mode_1080p60(),
2577 test_only: false,
2578 };
2579 assert!(kms.atomic_commit(&commit));
2580 let crtc = kms.find_crtc(crtc_id).unwrap();
2581 assert!(crtc.active);
2582 assert_eq!(crtc.fb_id, Some(fb_id));
2583 }
2584
2585 #[test]
2586 fn test_kms_test_only_commit() {
2587 let mut kms = KmsManager::new();
2588 let crtc_id = kms.add_crtc();
2589 let conn_id = kms.add_connector(ConnectorType::DisplayPort);
2590 let fb_id = kms.create_framebuffer(1280, 720, PixelFormat::Xrgb8888, 1);
2591
2592 let commit = AtomicCommit {
2593 crtc_id,
2594 connector_id: conn_id,
2595 fb_id,
2596 mode: DisplayMode::mode_720p60(),
2597 test_only: true,
2598 };
2599 assert!(kms.atomic_commit(&commit));
2600 let crtc = kms.find_crtc(crtc_id).unwrap();
2602 assert!(!crtc.active);
2603 }
2604
2605 #[test]
2608 fn test_page_flip_basic() {
2609 let mut pfm = PageFlipManager::new();
2610 pfm.register_crtc(1, 10); let req = PageFlipRequest {
2613 crtc_id: 1,
2614 fb_id: 20,
2615 user_data: 0,
2616 };
2617 assert!(pfm.request_flip(req));
2618 assert!(pfm.is_flip_pending(1));
2619
2620 pfm.handle_vblank(1, 16_666_666);
2622 assert!(!pfm.is_flip_pending(1));
2623 assert_eq!(pfm.front_buffer(1), Some(20));
2624 assert_eq!(pfm.vblank_count(1), Some(1));
2625 }
2626
2627 #[test]
2628 fn test_page_flip_reject_double() {
2629 let mut pfm = PageFlipManager::new();
2630 pfm.register_crtc(1, 10);
2631
2632 let req = PageFlipRequest {
2633 crtc_id: 1,
2634 fb_id: 20,
2635 user_data: 0,
2636 };
2637 assert!(pfm.request_flip(req));
2638 let req2 = PageFlipRequest {
2640 crtc_id: 1,
2641 fb_id: 30,
2642 user_data: 0,
2643 };
2644 assert!(!pfm.request_flip(req2));
2645 }
2646
2647 #[test]
2650 fn test_cursor_set_image() {
2651 let mut cursor = HardwareCursor::new(CursorFormat::Argb32x32);
2652 let pixels = vec![0xFFFFFFFF; 32 * 32];
2653 assert!(cursor.set_image(&pixels));
2654 let bad = vec![0u32; 10];
2656 assert!(!cursor.set_image(&bad));
2657 }
2658
2659 #[test]
2660 fn test_cursor_composite() {
2661 let mut cursor = HardwareCursor::new(CursorFormat::Argb32x32);
2662 cursor.enable();
2663 cursor.set_position(0, 0);
2664
2665 let mut pixels = vec![0x00000000u32; 32 * 32];
2667 pixels[0] = 0xFFFFFFFF;
2668 cursor.set_image(&pixels);
2669
2670 let mut target = vec![0xFF000000u32; 64 * 64]; cursor.composite_onto(&mut target, 64, 64);
2672 assert_eq!(target[0], 0xFFFFFFFF); assert_eq!(target[1], 0xFF000000); }
2675
2676 #[test]
2677 fn test_cursor_default_arrow() {
2678 let mut cursor = HardwareCursor::new(CursorFormat::Argb32x32);
2679 cursor.load_default_arrow();
2680 assert_eq!(cursor.image[0], 0xFF000000);
2682 assert_eq!(cursor.hotspot_x, 0);
2683 assert_eq!(cursor.hotspot_y, 0);
2684 }
2685
2686 #[test]
2687 fn test_cursor_hotspot() {
2688 let mut cursor = HardwareCursor::new(CursorFormat::Argb64x64);
2689 cursor.set_hotspot(16, 16);
2690 assert_eq!(cursor.hotspot_x, 16);
2691 assert_eq!(cursor.hotspot_y, 16);
2692 cursor.set_hotspot(100, 100);
2694 assert_eq!(cursor.hotspot_x, 63);
2695 assert_eq!(cursor.hotspot_y, 63);
2696 }
2697
2698 #[test]
2699 fn test_display_mode_refresh_calc() {
2700 let mode = DisplayMode::mode_1080p60();
2701 let refresh = mode.calculated_refresh_mhz();
2702 assert_eq!(refresh, 60000);
2705 }
2706
2707 #[test]
2708 fn test_framebuffer_size() {
2709 let fb = DrmFramebuffer::new(1, 1920, 1080, PixelFormat::Xrgb8888, 1);
2710 assert_eq!(fb.pitch, 1920 * 4);
2711 assert_eq!(fb.size_bytes(), 1920 * 4 * 1080);
2712 }
2713
2714 #[test]
2715 fn test_pixel_format_bpp() {
2716 assert_eq!(PixelFormat::Xrgb8888.bpp(), 4);
2717 assert_eq!(PixelFormat::Rgb565.bpp(), 2);
2718 assert_eq!(PixelFormat::Argb8888.bpp(), 4);
2719 }
2720}