1#![allow(unexpected_cfgs)]
2#![allow(dead_code)]
9
10#[cfg(feature = "alloc")]
11use alloc::collections::BTreeMap;
12#[cfg(feature = "alloc")]
13use alloc::collections::VecDeque;
14#[cfg(feature = "alloc")]
15use alloc::vec::Vec;
16
17const MAX_CHANNEL_CAPACITY: usize = 256;
19
20const RIGHT_SEND: u32 = 1 << 0;
22const RIGHT_RECV: u32 = 1 << 1;
23
24#[derive(Debug, Clone, Copy, PartialEq, Eq)]
26pub enum MessageType {
27 Small = 0,
29 Large = 1,
31 Notification = 2,
33 CapabilityTransfer = 3,
35}
36
37#[derive(Debug, Clone, PartialEq, Eq)]
39pub struct IpcMessage {
40 pub sequence: u64,
42 pub payload: u64,
44 pub msg_type: MessageType,
46 pub sender: u64,
48 pub channel_id: u64,
50}
51
52#[derive(Debug, Clone)]
54pub struct IpcChannelModel {
55 pub id: u64,
57 pub capacity: usize,
59 #[cfg(feature = "alloc")]
61 pub messages: VecDeque<IpcMessage>,
62 pub sender_count: u32,
64 pub receiver_count: u32,
66 pub total_sent: u64,
68 pub total_received: u64,
70 next_sequence: u64,
72}
73
74#[cfg(feature = "alloc")]
75impl Default for IpcChannelModel {
76 fn default() -> Self {
77 Self {
78 id: 0,
79 capacity: MAX_CHANNEL_CAPACITY,
80 messages: VecDeque::new(),
81 sender_count: 0,
82 receiver_count: 0,
83 total_sent: 0,
84 total_received: 0,
85 next_sequence: 0,
86 }
87 }
88}
89
90#[derive(Debug, Clone, Copy, PartialEq, Eq)]
92pub enum IpcModelError {
93 ChannelFull,
95 ChannelEmpty,
97 NoSendCapability,
99 NoRecvCapability,
101 InvalidChannel,
103 FifoViolation,
105 MessageLoss,
107 BufferOverflow,
109 DeadlockDetected,
111 TypeMismatch,
113 IsolationBreach,
115 RegionOverlap,
117}
118
119#[cfg(feature = "alloc")]
120impl IpcChannelModel {
121 pub fn new(id: u64, capacity: usize) -> Self {
123 Self {
124 id,
125 capacity,
126 messages: VecDeque::new(),
127 sender_count: 1,
128 receiver_count: 1,
129 total_sent: 0,
130 total_received: 0,
131 next_sequence: 0,
132 }
133 }
134
135 pub fn send(
137 &mut self,
138 payload: u64,
139 msg_type: MessageType,
140 sender: u64,
141 rights: u32,
142 ) -> Result<u64, IpcModelError> {
143 if rights & RIGHT_SEND == 0 {
145 return Err(IpcModelError::NoSendCapability);
146 }
147
148 if self.messages.len() >= self.capacity {
150 return Err(IpcModelError::ChannelFull);
151 }
152
153 let seq = self.next_sequence;
154 let msg = IpcMessage {
155 sequence: seq,
156 payload,
157 msg_type,
158 sender,
159 channel_id: self.id,
160 };
161
162 self.messages.push_back(msg);
163 self.next_sequence += 1;
164 self.total_sent += 1;
165
166 Ok(seq)
167 }
168
169 pub fn receive(&mut self, rights: u32) -> Result<IpcMessage, IpcModelError> {
171 if rights & RIGHT_RECV == 0 {
173 return Err(IpcModelError::NoRecvCapability);
174 }
175
176 match self.messages.pop_front() {
177 Some(msg) => {
178 self.total_received += 1;
179 Ok(msg)
180 }
181 None => Err(IpcModelError::ChannelEmpty),
182 }
183 }
184
185 pub fn pending_count(&self) -> usize {
187 self.messages.len()
188 }
189}
190
191pub struct IpcInvariantChecker;
193
194#[cfg(feature = "alloc")]
195impl IpcInvariantChecker {
196 pub fn verify_fifo_ordering(channel: &IpcChannelModel) -> Result<(), IpcModelError> {
198 let msgs: Vec<&IpcMessage> = channel.messages.iter().collect();
199 for window in msgs.windows(2) {
200 if window[0].sequence >= window[1].sequence {
201 return Err(IpcModelError::FifoViolation);
202 }
203 }
204 Ok(())
205 }
206
207 pub fn verify_no_message_loss(channel: &IpcChannelModel) -> Result<(), IpcModelError> {
209 let expected_pending = channel.total_sent.saturating_sub(channel.total_received);
210 if expected_pending != channel.messages.len() as u64 {
211 return Err(IpcModelError::MessageLoss);
212 }
213 Ok(())
214 }
215
216 pub fn verify_channel_isolation(channel: &IpcChannelModel) -> Result<(), IpcModelError> {
219 for msg in channel.messages.iter() {
220 if msg.channel_id != channel.id {
221 return Err(IpcModelError::IsolationBreach);
222 }
223 }
224 Ok(())
225 }
226
227 pub fn verify_buffer_bounds(channel: &IpcChannelModel) -> Result<(), IpcModelError> {
229 if channel.messages.len() > channel.capacity {
230 return Err(IpcModelError::BufferOverflow);
231 }
232 Ok(())
233 }
234
235 pub fn verify_capability_enforcement(
237 channel: &mut IpcChannelModel,
238 ) -> Result<(), IpcModelError> {
239 let result = channel.send(0, MessageType::Small, 0, 0);
241 if result != Err(IpcModelError::NoSendCapability) {
242 return Err(IpcModelError::NoSendCapability);
243 }
244
245 let result = channel.receive(0);
247 if result != Err(IpcModelError::NoRecvCapability) {
248 return Err(IpcModelError::NoRecvCapability);
249 }
250
251 Ok(())
252 }
253
254 pub fn verify_deadlock_freedom(graph: &WaitGraph) -> Result<(), IpcModelError> {
256 if graph.has_cycle() {
257 Err(IpcModelError::DeadlockDetected)
258 } else {
259 Ok(())
260 }
261 }
262}
263
264#[derive(Debug, Clone, Default)]
266pub struct WaitGraph {
267 #[cfg(feature = "alloc")]
269 edges: BTreeMap<u64, Vec<u64>>,
270}
271
272#[cfg(feature = "alloc")]
273impl WaitGraph {
274 pub fn new() -> Self {
276 Self::default()
277 }
278
279 pub fn add_edge(&mut self, from: u64, to: u64) {
281 self.edges.entry(from).or_default().push(to);
282 }
283
284 pub fn remove_edges(&mut self, from: u64) {
286 self.edges.remove(&from);
287 }
288
289 pub fn has_cycle(&self) -> bool {
291 let mut visited = BTreeMap::new();
292
293 for &node in self.edges.keys() {
294 if !visited.contains_key(&node) && self.dfs_cycle(node, &mut visited) {
295 return true;
296 }
297 }
298
299 false
300 }
301
302 fn dfs_cycle(&self, node: u64, visited: &mut BTreeMap<u64, u8>) -> bool {
305 visited.insert(node, 1); if let Some(neighbors) = self.edges.get(&node) {
308 for &next in neighbors {
309 match visited.get(&next) {
310 Some(&1) => return true, Some(&2) => continue, _ => {
313 if self.dfs_cycle(next, visited) {
314 return true;
315 }
316 }
317 }
318 }
319 }
320
321 visited.insert(node, 2); false
323 }
324
325 pub fn node_count(&self) -> usize {
327 self.edges.len()
328 }
329}
330
331#[derive(Debug, Clone, Copy, PartialEq, Eq)]
333pub struct SharedRegion {
334 pub base: u64,
336 pub length: u64,
338 pub owner: u64,
340}
341
342impl SharedRegion {
343 pub fn overlaps(&self, other: &SharedRegion) -> bool {
345 self.base < other.base.saturating_add(other.length)
346 && other.base < self.base.saturating_add(self.length)
347 }
348}
349
350#[derive(Debug, Clone)]
352pub struct AsyncRingBuffer {
353 pub capacity: u32,
355 pub write_idx: u32,
357 pub read_idx: u32,
359 pub count: u32,
361}
362
363impl AsyncRingBuffer {
364 pub fn new(capacity: u32) -> Self {
366 let cap = capacity.next_power_of_two();
368 Self {
369 capacity: cap,
370 write_idx: 0,
371 read_idx: 0,
372 count: 0,
373 }
374 }
375
376 pub fn push(&mut self) -> bool {
378 if self.count >= self.capacity {
379 return false;
380 }
381 self.write_idx = (self.write_idx + 1) & (self.capacity - 1);
382 self.count += 1;
383 true
384 }
385
386 pub fn pop(&mut self) -> bool {
388 if self.count == 0 {
389 return false;
390 }
391 self.read_idx = (self.read_idx + 1) & (self.capacity - 1);
392 self.count -= 1;
393 true
394 }
395
396 pub fn is_full(&self) -> bool {
398 self.count >= self.capacity
399 }
400
401 pub fn is_empty(&self) -> bool {
403 self.count == 0
404 }
405}
406
407#[derive(Debug, Clone, Copy, PartialEq, Eq)]
409pub struct Notification {
410 pub target: u64,
412 pub bits: u64,
414 pub delivered: bool,
416}
417
418#[cfg(kani)]
423mod kani_proofs {
424 use super::*;
425
426 #[kani::proof]
428 fn proof_fast_path_register_integrity() {
429 let payload: u64 = kani::any();
430 let msg_type = MessageType::Small;
431
432 let mut channel = IpcChannelModel::new(1, 16);
433 let seq = channel.send(payload, msg_type, 1, RIGHT_SEND).unwrap();
434 let received = channel.receive(RIGHT_RECV).unwrap();
435
436 assert_eq!(received.payload, payload, "Payload must be preserved");
437 assert_eq!(received.sequence, seq, "Sequence must be preserved");
438 assert_eq!(
439 received.msg_type,
440 MessageType::Small,
441 "Type must be preserved"
442 );
443 }
444
445 #[kani::proof]
447 fn proof_send_receive_roundtrip() {
448 let payload: u64 = kani::any();
449
450 let mut channel = IpcChannelModel::new(1, 16);
451 channel
452 .send(payload, MessageType::Small, 1, RIGHT_SEND)
453 .unwrap();
454 let msg = channel.receive(RIGHT_RECV).unwrap();
455
456 assert_eq!(msg.payload, payload);
457 }
458
459 #[kani::proof]
461 fn proof_fifo_ordering() {
462 let p1: u64 = kani::any();
463 let p2: u64 = kani::any();
464
465 let mut channel = IpcChannelModel::new(1, 16);
466 channel.send(p1, MessageType::Small, 1, RIGHT_SEND).unwrap();
467 channel.send(p2, MessageType::Small, 1, RIGHT_SEND).unwrap();
468
469 let m1 = channel.receive(RIGHT_RECV).unwrap();
470 let m2 = channel.receive(RIGHT_RECV).unwrap();
471
472 assert_eq!(m1.payload, p1);
473 assert_eq!(m2.payload, p2);
474 assert!(m1.sequence < m2.sequence);
475 }
476
477 #[kani::proof]
479 fn proof_no_message_loss() {
480 let mut channel = IpcChannelModel::new(1, 16);
481 let n: u8 = kani::any();
482 kani::assume(n <= 4);
483
484 for i in 0..n {
485 let _ = channel.send(i as u64, MessageType::Small, 1, RIGHT_SEND);
486 }
487
488 assert_eq!(
489 channel.total_sent - channel.total_received,
490 channel.pending_count() as u64
491 );
492 }
493
494 #[kani::proof]
496 fn proof_channel_capacity_bound() {
497 let cap: u8 = kani::any();
498 kani::assume(cap > 0 && cap <= 8);
499
500 let mut channel = IpcChannelModel::new(1, cap as usize);
501
502 for i in 0..((cap as u16) + 2) {
504 let _ = channel.send(i as u64, MessageType::Small, 1, RIGHT_SEND);
505 }
506
507 assert!(channel.pending_count() <= cap as usize);
508 }
509
510 #[kani::proof]
512 fn proof_channel_isolation() {
513 let p1: u64 = kani::any();
514 let p2: u64 = kani::any();
515
516 let mut ch1 = IpcChannelModel::new(1, 16);
517 let mut ch2 = IpcChannelModel::new(2, 16);
518
519 ch1.send(p1, MessageType::Small, 1, RIGHT_SEND).unwrap();
520 ch2.send(p2, MessageType::Small, 2, RIGHT_SEND).unwrap();
521
522 let m1 = ch1.receive(RIGHT_RECV).unwrap();
523 let m2 = ch2.receive(RIGHT_RECV).unwrap();
524
525 assert_eq!(m1.channel_id, 1);
526 assert_eq!(m2.channel_id, 2);
527 assert_eq!(m1.payload, p1);
528 assert_eq!(m2.payload, p2);
529 }
530
531 #[kani::proof]
533 fn proof_capability_required() {
534 let mut channel = IpcChannelModel::new(1, 16);
535
536 let result = channel.send(42, MessageType::Small, 1, 0);
538 assert_eq!(result, Err(IpcModelError::NoSendCapability));
539
540 let result = channel.send(42, MessageType::Small, 1, RIGHT_RECV);
542 assert_eq!(result, Err(IpcModelError::NoSendCapability));
543 }
544
545 #[kani::proof]
547 fn proof_zero_copy_no_overlap() {
548 let base1: u64 = kani::any();
549 let len1: u64 = kani::any();
550 let base2: u64 = kani::any();
551 let len2: u64 = kani::any();
552
553 kani::assume(len1 > 0 && len1 < 0x1000);
554 kani::assume(len2 > 0 && len2 < 0x1000);
555 kani::assume(base1 < 0x8000_0000_0000_0000);
557 kani::assume(base2 >= base1.saturating_add(len1));
558
559 let r1 = SharedRegion {
560 base: base1,
561 length: len1,
562 owner: 1,
563 };
564 let r2 = SharedRegion {
565 base: base2,
566 length: len2,
567 owner: 2,
568 };
569
570 assert!(!r1.overlaps(&r2));
571 }
572
573 #[kani::proof]
575 fn proof_deadlock_freedom() {
576 let mut graph = WaitGraph::new();
577 graph.add_edge(0, 1);
579 graph.add_edge(1, 2);
580
581 assert!(!graph.has_cycle());
582 }
583
584 #[kani::proof]
586 fn proof_async_ring_buffer_safety() {
587 let mut rb = AsyncRingBuffer::new(4);
588
589 assert!(rb.push());
591 assert!(rb.push());
592 assert!(rb.push());
593 assert!(rb.push());
594 assert!(!rb.push()); assert!(rb.pop());
598 assert!(rb.pop());
599 assert!(rb.pop());
600 assert!(rb.pop());
601 assert!(!rb.pop()); assert_eq!(rb.count, 0);
605 }
606
607 #[kani::proof]
609 fn proof_message_type_safety() {
610 let type_val: u8 = kani::any();
611 kani::assume(type_val < 4);
612
613 let msg_type = match type_val {
614 0 => MessageType::Small,
615 1 => MessageType::Large,
616 2 => MessageType::Notification,
617 3 => MessageType::CapabilityTransfer,
618 _ => unreachable!(),
619 };
620
621 let mut channel = IpcChannelModel::new(1, 16);
622 channel.send(0, msg_type, 1, RIGHT_SEND).unwrap();
623 let received = channel.receive(RIGHT_RECV).unwrap();
624
625 assert_eq!(received.msg_type, msg_type);
626 }
627
628 #[kani::proof]
630 fn proof_notification_delivery() {
631 let target: u64 = kani::any();
632 let bits: u64 = kani::any();
633
634 let mut notif = Notification {
635 target,
636 bits,
637 delivered: false,
638 };
639
640 notif.delivered = true;
642
643 assert!(notif.delivered);
644 assert_eq!(notif.target, target);
645 assert_eq!(notif.bits, bits);
646 }
647}
648
649#[cfg(test)]
654mod tests {
655 use super::*;
656
657 #[cfg(feature = "alloc")]
658 #[test]
659 fn test_channel_send_receive() {
660 let mut ch = IpcChannelModel::new(1, 16);
661 ch.send(42, MessageType::Small, 1, RIGHT_SEND).unwrap();
662 let msg = ch.receive(RIGHT_RECV).unwrap();
663 assert_eq!(msg.payload, 42);
664 }
665
666 #[cfg(feature = "alloc")]
667 #[test]
668 fn test_channel_fifo() {
669 let mut ch = IpcChannelModel::new(1, 16);
670 ch.send(1, MessageType::Small, 1, RIGHT_SEND).unwrap();
671 ch.send(2, MessageType::Small, 1, RIGHT_SEND).unwrap();
672 ch.send(3, MessageType::Small, 1, RIGHT_SEND).unwrap();
673
674 assert_eq!(ch.receive(RIGHT_RECV).unwrap().payload, 1);
675 assert_eq!(ch.receive(RIGHT_RECV).unwrap().payload, 2);
676 assert_eq!(ch.receive(RIGHT_RECV).unwrap().payload, 3);
677 }
678
679 #[cfg(feature = "alloc")]
680 #[test]
681 fn test_channel_capacity() {
682 let mut ch = IpcChannelModel::new(1, 2);
683 assert!(ch.send(1, MessageType::Small, 1, RIGHT_SEND).is_ok());
684 assert!(ch.send(2, MessageType::Small, 1, RIGHT_SEND).is_ok());
685 assert_eq!(
686 ch.send(3, MessageType::Small, 1, RIGHT_SEND),
687 Err(IpcModelError::ChannelFull)
688 );
689 }
690
691 #[cfg(feature = "alloc")]
692 #[test]
693 fn test_no_send_capability() {
694 let mut ch = IpcChannelModel::new(1, 16);
695 assert_eq!(
696 ch.send(42, MessageType::Small, 1, 0),
697 Err(IpcModelError::NoSendCapability)
698 );
699 assert_eq!(
700 ch.send(42, MessageType::Small, 1, RIGHT_RECV),
701 Err(IpcModelError::NoSendCapability)
702 );
703 }
704
705 #[cfg(feature = "alloc")]
706 #[test]
707 fn test_no_recv_capability() {
708 let mut ch = IpcChannelModel::new(1, 16);
709 ch.send(42, MessageType::Small, 1, RIGHT_SEND).unwrap();
710 assert_eq!(ch.receive(0), Err(IpcModelError::NoRecvCapability));
711 assert_eq!(ch.receive(RIGHT_SEND), Err(IpcModelError::NoRecvCapability));
712 }
713
714 #[cfg(feature = "alloc")]
715 #[test]
716 fn test_message_conservation() {
717 let mut ch = IpcChannelModel::new(1, 16);
718 for i in 0..5 {
719 ch.send(i, MessageType::Small, 1, RIGHT_SEND).unwrap();
720 }
721 ch.receive(RIGHT_RECV).unwrap();
722 ch.receive(RIGHT_RECV).unwrap();
723
724 assert!(IpcInvariantChecker::verify_no_message_loss(&ch).is_ok());
725 }
726
727 #[cfg(feature = "alloc")]
728 #[test]
729 fn test_channel_isolation() {
730 let mut ch = IpcChannelModel::new(42, 16);
731 ch.send(1, MessageType::Small, 1, RIGHT_SEND).unwrap();
732 assert!(IpcInvariantChecker::verify_channel_isolation(&ch).is_ok());
733 }
734
735 #[cfg(feature = "alloc")]
736 #[test]
737 fn test_wait_graph_no_cycle() {
738 let mut g = WaitGraph::new();
739 g.add_edge(0, 1);
740 g.add_edge(1, 2);
741 assert!(!g.has_cycle());
742 }
743
744 #[cfg(feature = "alloc")]
745 #[test]
746 fn test_wait_graph_with_cycle() {
747 let mut g = WaitGraph::new();
748 g.add_edge(0, 1);
749 g.add_edge(1, 2);
750 g.add_edge(2, 0);
751 assert!(g.has_cycle());
752 }
753
754 #[cfg(feature = "alloc")]
755 #[test]
756 fn test_wait_graph_remove_breaks_cycle() {
757 let mut g = WaitGraph::new();
758 g.add_edge(0, 1);
759 g.add_edge(1, 0);
760 assert!(g.has_cycle());
761
762 g.remove_edges(1);
763 assert!(!g.has_cycle());
764 }
765
766 #[test]
767 fn test_shared_region_overlap() {
768 let r1 = SharedRegion {
769 base: 0x1000,
770 length: 0x1000,
771 owner: 1,
772 };
773 let r2 = SharedRegion {
774 base: 0x1800,
775 length: 0x1000,
776 owner: 2,
777 };
778 assert!(r1.overlaps(&r2));
779
780 let r3 = SharedRegion {
781 base: 0x3000,
782 length: 0x1000,
783 owner: 3,
784 };
785 assert!(!r1.overlaps(&r3));
786 }
787
788 #[test]
789 fn test_ring_buffer() {
790 let mut rb = AsyncRingBuffer::new(4);
791 assert!(rb.is_empty());
792 assert!(!rb.is_full());
793
794 assert!(rb.push());
795 assert!(rb.push());
796 assert!(rb.push());
797 assert!(rb.push());
798 assert!(rb.is_full());
799 assert!(!rb.push());
800
801 assert!(rb.pop());
802 assert!(!rb.is_full());
803 assert!(rb.push()); }
805}