⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/verification/
ipc_proofs.rs

1#![allow(unexpected_cfgs)]
2//! Formally Verified IPC
3//!
4//! Model-checking and proof harnesses for IPC channel correctness, including
5//! FIFO ordering, message conservation, channel isolation, buffer bounds,
6//! capability enforcement, and deadlock freedom via wait-for graph analysis.
7
8#![allow(dead_code)]
9
10#[cfg(feature = "alloc")]
11use alloc::collections::BTreeMap;
12#[cfg(feature = "alloc")]
13use alloc::collections::VecDeque;
14#[cfg(feature = "alloc")]
15use alloc::vec::Vec;
16
17/// Maximum channel capacity for bounded verification
18const MAX_CHANNEL_CAPACITY: usize = 256;
19
20/// Rights bitmask for capability enforcement
21const RIGHT_SEND: u32 = 1 << 0;
22const RIGHT_RECV: u32 = 1 << 1;
23
24/// Message type tags for type safety verification
25#[derive(Debug, Clone, Copy, PartialEq, Eq)]
26pub enum MessageType {
27    /// Small message (register-based fast path, <= 64 bytes)
28    Small = 0,
29    /// Large message (shared memory)
30    Large = 1,
31    /// Notification (signal-like, no data)
32    Notification = 2,
33    /// Capability transfer
34    CapabilityTransfer = 3,
35}
36
37/// A message in the IPC channel model
38#[derive(Debug, Clone, PartialEq, Eq)]
39pub struct IpcMessage {
40    /// Unique sequence number
41    pub sequence: u64,
42    /// Message payload (modeled as a single u64 for verification)
43    pub payload: u64,
44    /// Message type tag
45    pub msg_type: MessageType,
46    /// Sender process ID
47    pub sender: u64,
48    /// Channel this message belongs to
49    pub channel_id: u64,
50}
51
52/// Model of an IPC channel for verification
53#[derive(Debug, Clone)]
54pub struct IpcChannelModel {
55    /// Channel identifier
56    pub id: u64,
57    /// Maximum number of messages
58    pub capacity: usize,
59    /// Message queue (FIFO)
60    #[cfg(feature = "alloc")]
61    pub messages: VecDeque<IpcMessage>,
62    /// Number of senders with access
63    pub sender_count: u32,
64    /// Number of receivers with access
65    pub receiver_count: u32,
66    /// Total messages ever sent
67    pub total_sent: u64,
68    /// Total messages ever received
69    pub total_received: u64,
70    /// Next sequence number
71    next_sequence: u64,
72}
73
74#[cfg(feature = "alloc")]
75impl Default for IpcChannelModel {
76    fn default() -> Self {
77        Self {
78            id: 0,
79            capacity: MAX_CHANNEL_CAPACITY,
80            messages: VecDeque::new(),
81            sender_count: 0,
82            receiver_count: 0,
83            total_sent: 0,
84            total_received: 0,
85            next_sequence: 0,
86        }
87    }
88}
89
90/// Errors in the IPC model
91#[derive(Debug, Clone, Copy, PartialEq, Eq)]
92pub enum IpcModelError {
93    /// Channel is full
94    ChannelFull,
95    /// Channel is empty
96    ChannelEmpty,
97    /// Sender lacks capability
98    NoSendCapability,
99    /// Receiver lacks capability
100    NoRecvCapability,
101    /// Invalid channel ID
102    InvalidChannel,
103    /// FIFO ordering violated
104    FifoViolation,
105    /// Message loss detected
106    MessageLoss,
107    /// Buffer overflow
108    BufferOverflow,
109    /// Deadlock detected
110    DeadlockDetected,
111    /// Type mismatch
112    TypeMismatch,
113    /// Channel isolation breach
114    IsolationBreach,
115    /// Memory region overlap
116    RegionOverlap,
117}
118
119#[cfg(feature = "alloc")]
120impl IpcChannelModel {
121    /// Create a new channel with the given ID and capacity
122    pub fn new(id: u64, capacity: usize) -> Self {
123        Self {
124            id,
125            capacity,
126            messages: VecDeque::new(),
127            sender_count: 1,
128            receiver_count: 1,
129            total_sent: 0,
130            total_received: 0,
131            next_sequence: 0,
132        }
133    }
134
135    /// Send a message on this channel
136    pub fn send(
137        &mut self,
138        payload: u64,
139        msg_type: MessageType,
140        sender: u64,
141        rights: u32,
142    ) -> Result<u64, IpcModelError> {
143        // Check send capability
144        if rights & RIGHT_SEND == 0 {
145            return Err(IpcModelError::NoSendCapability);
146        }
147
148        // Check capacity
149        if self.messages.len() >= self.capacity {
150            return Err(IpcModelError::ChannelFull);
151        }
152
153        let seq = self.next_sequence;
154        let msg = IpcMessage {
155            sequence: seq,
156            payload,
157            msg_type,
158            sender,
159            channel_id: self.id,
160        };
161
162        self.messages.push_back(msg);
163        self.next_sequence += 1;
164        self.total_sent += 1;
165
166        Ok(seq)
167    }
168
169    /// Receive a message from this channel
170    pub fn receive(&mut self, rights: u32) -> Result<IpcMessage, IpcModelError> {
171        // Check receive capability
172        if rights & RIGHT_RECV == 0 {
173            return Err(IpcModelError::NoRecvCapability);
174        }
175
176        match self.messages.pop_front() {
177            Some(msg) => {
178                self.total_received += 1;
179                Ok(msg)
180            }
181            None => Err(IpcModelError::ChannelEmpty),
182        }
183    }
184
185    /// Current message count in buffer
186    pub fn pending_count(&self) -> usize {
187        self.messages.len()
188    }
189}
190
191/// IPC invariant checker that validates channel properties
192pub struct IpcInvariantChecker;
193
194#[cfg(feature = "alloc")]
195impl IpcInvariantChecker {
196    /// Verify FIFO ordering: messages dequeued in send order
197    pub fn verify_fifo_ordering(channel: &IpcChannelModel) -> Result<(), IpcModelError> {
198        let msgs: Vec<&IpcMessage> = channel.messages.iter().collect();
199        for window in msgs.windows(2) {
200            if window[0].sequence >= window[1].sequence {
201                return Err(IpcModelError::FifoViolation);
202            }
203        }
204        Ok(())
205    }
206
207    /// Verify no message loss: sent - received = pending
208    pub fn verify_no_message_loss(channel: &IpcChannelModel) -> Result<(), IpcModelError> {
209        let expected_pending = channel.total_sent.saturating_sub(channel.total_received);
210        if expected_pending != channel.messages.len() as u64 {
211            return Err(IpcModelError::MessageLoss);
212        }
213        Ok(())
214    }
215
216    /// Verify channel isolation: messages in a channel have the correct
217    /// channel_id
218    pub fn verify_channel_isolation(channel: &IpcChannelModel) -> Result<(), IpcModelError> {
219        for msg in channel.messages.iter() {
220            if msg.channel_id != channel.id {
221                return Err(IpcModelError::IsolationBreach);
222            }
223        }
224        Ok(())
225    }
226
227    /// Verify buffer bounds: message count never exceeds capacity
228    pub fn verify_buffer_bounds(channel: &IpcChannelModel) -> Result<(), IpcModelError> {
229        if channel.messages.len() > channel.capacity {
230            return Err(IpcModelError::BufferOverflow);
231        }
232        Ok(())
233    }
234
235    /// Verify capability enforcement: operations require correct rights
236    pub fn verify_capability_enforcement(
237        channel: &mut IpcChannelModel,
238    ) -> Result<(), IpcModelError> {
239        // Attempt send without rights
240        let result = channel.send(0, MessageType::Small, 0, 0);
241        if result != Err(IpcModelError::NoSendCapability) {
242            return Err(IpcModelError::NoSendCapability);
243        }
244
245        // Attempt receive without rights
246        let result = channel.receive(0);
247        if result != Err(IpcModelError::NoRecvCapability) {
248            return Err(IpcModelError::NoRecvCapability);
249        }
250
251        Ok(())
252    }
253
254    /// Verify deadlock freedom: no cycles in wait-for graph
255    pub fn verify_deadlock_freedom(graph: &WaitGraph) -> Result<(), IpcModelError> {
256        if graph.has_cycle() {
257            Err(IpcModelError::DeadlockDetected)
258        } else {
259            Ok(())
260        }
261    }
262}
263
264/// Wait-for graph for deadlock detection
265#[derive(Debug, Clone, Default)]
266pub struct WaitGraph {
267    /// Edges: process -> list of processes it's waiting for
268    #[cfg(feature = "alloc")]
269    edges: BTreeMap<u64, Vec<u64>>,
270}
271
272#[cfg(feature = "alloc")]
273impl WaitGraph {
274    /// Create a new empty wait-for graph
275    pub fn new() -> Self {
276        Self::default()
277    }
278
279    /// Add an edge: process `from` is waiting for process `to`
280    pub fn add_edge(&mut self, from: u64, to: u64) {
281        self.edges.entry(from).or_default().push(to);
282    }
283
284    /// Remove all edges from a process (it's no longer waiting)
285    pub fn remove_edges(&mut self, from: u64) {
286        self.edges.remove(&from);
287    }
288
289    /// Check if the graph contains a cycle (deadlock)
290    pub fn has_cycle(&self) -> bool {
291        let mut visited = BTreeMap::new();
292
293        for &node in self.edges.keys() {
294            if !visited.contains_key(&node) && self.dfs_cycle(node, &mut visited) {
295                return true;
296            }
297        }
298
299        false
300    }
301
302    /// DFS-based cycle detection
303    /// State: 0 = unvisited, 1 = in current path, 2 = fully explored
304    fn dfs_cycle(&self, node: u64, visited: &mut BTreeMap<u64, u8>) -> bool {
305        visited.insert(node, 1); // Mark as in-progress
306
307        if let Some(neighbors) = self.edges.get(&node) {
308            for &next in neighbors {
309                match visited.get(&next) {
310                    Some(&1) => return true, // Back edge = cycle
311                    Some(&2) => continue,    // Already explored
312                    _ => {
313                        if self.dfs_cycle(next, visited) {
314                            return true;
315                        }
316                    }
317                }
318            }
319        }
320
321        visited.insert(node, 2); // Mark as fully explored
322        false
323    }
324
325    /// Get the number of nodes in the graph
326    pub fn node_count(&self) -> usize {
327        self.edges.len()
328    }
329}
330
331/// Model for shared memory regions (zero-copy verification)
332#[derive(Debug, Clone, Copy, PartialEq, Eq)]
333pub struct SharedRegion {
334    /// Start address (page-aligned)
335    pub base: u64,
336    /// Length in bytes
337    pub length: u64,
338    /// Owner process ID
339    pub owner: u64,
340}
341
342impl SharedRegion {
343    /// Check if two regions overlap
344    pub fn overlaps(&self, other: &SharedRegion) -> bool {
345        self.base < other.base.saturating_add(other.length)
346            && other.base < self.base.saturating_add(self.length)
347    }
348}
349
350/// Async ring buffer model for verification
351#[derive(Debug, Clone)]
352pub struct AsyncRingBuffer {
353    /// Buffer capacity (power of 2)
354    pub capacity: u32,
355    /// Write index (wraps around)
356    pub write_idx: u32,
357    /// Read index (wraps around)
358    pub read_idx: u32,
359    /// Number of items currently in buffer
360    pub count: u32,
361}
362
363impl AsyncRingBuffer {
364    /// Create a new ring buffer with given capacity (must be power of 2)
365    pub fn new(capacity: u32) -> Self {
366        // Round up to power of 2
367        let cap = capacity.next_power_of_two();
368        Self {
369            capacity: cap,
370            write_idx: 0,
371            read_idx: 0,
372            count: 0,
373        }
374    }
375
376    /// Push an item, returns true if successful
377    pub fn push(&mut self) -> bool {
378        if self.count >= self.capacity {
379            return false;
380        }
381        self.write_idx = (self.write_idx + 1) & (self.capacity - 1);
382        self.count += 1;
383        true
384    }
385
386    /// Pop an item, returns true if successful
387    pub fn pop(&mut self) -> bool {
388        if self.count == 0 {
389            return false;
390        }
391        self.read_idx = (self.read_idx + 1) & (self.capacity - 1);
392        self.count -= 1;
393        true
394    }
395
396    /// Check if buffer is full
397    pub fn is_full(&self) -> bool {
398        self.count >= self.capacity
399    }
400
401    /// Check if buffer is empty
402    pub fn is_empty(&self) -> bool {
403        self.count == 0
404    }
405}
406
407/// Notification model for delivery verification
408#[derive(Debug, Clone, Copy, PartialEq, Eq)]
409pub struct Notification {
410    /// Target process ID
411    pub target: u64,
412    /// Notification bits
413    pub bits: u64,
414    /// Whether delivered
415    pub delivered: bool,
416}
417
418// ============================================================================
419// Kani Proof Harnesses
420// ============================================================================
421
422#[cfg(kani)]
423mod kani_proofs {
424    use super::*;
425
426    /// Proof: Fast path register transfer preserves values
427    #[kani::proof]
428    fn proof_fast_path_register_integrity() {
429        let payload: u64 = kani::any();
430        let msg_type = MessageType::Small;
431
432        let mut channel = IpcChannelModel::new(1, 16);
433        let seq = channel.send(payload, msg_type, 1, RIGHT_SEND).unwrap();
434        let received = channel.receive(RIGHT_RECV).unwrap();
435
436        assert_eq!(received.payload, payload, "Payload must be preserved");
437        assert_eq!(received.sequence, seq, "Sequence must be preserved");
438        assert_eq!(
439            received.msg_type,
440            MessageType::Small,
441            "Type must be preserved"
442        );
443    }
444
445    /// Proof: Send then receive returns the same value
446    #[kani::proof]
447    fn proof_send_receive_roundtrip() {
448        let payload: u64 = kani::any();
449
450        let mut channel = IpcChannelModel::new(1, 16);
451        channel
452            .send(payload, MessageType::Small, 1, RIGHT_SEND)
453            .unwrap();
454        let msg = channel.receive(RIGHT_RECV).unwrap();
455
456        assert_eq!(msg.payload, payload);
457    }
458
459    /// Proof: Messages are dequeued in FIFO order
460    #[kani::proof]
461    fn proof_fifo_ordering() {
462        let p1: u64 = kani::any();
463        let p2: u64 = kani::any();
464
465        let mut channel = IpcChannelModel::new(1, 16);
466        channel.send(p1, MessageType::Small, 1, RIGHT_SEND).unwrap();
467        channel.send(p2, MessageType::Small, 1, RIGHT_SEND).unwrap();
468
469        let m1 = channel.receive(RIGHT_RECV).unwrap();
470        let m2 = channel.receive(RIGHT_RECV).unwrap();
471
472        assert_eq!(m1.payload, p1);
473        assert_eq!(m2.payload, p2);
474        assert!(m1.sequence < m2.sequence);
475    }
476
477    /// Proof: No messages are lost (conservation)
478    #[kani::proof]
479    fn proof_no_message_loss() {
480        let mut channel = IpcChannelModel::new(1, 16);
481        let n: u8 = kani::any();
482        kani::assume(n <= 4);
483
484        for i in 0..n {
485            let _ = channel.send(i as u64, MessageType::Small, 1, RIGHT_SEND);
486        }
487
488        assert_eq!(
489            channel.total_sent - channel.total_received,
490            channel.pending_count() as u64
491        );
492    }
493
494    /// Proof: Message count never exceeds capacity
495    #[kani::proof]
496    fn proof_channel_capacity_bound() {
497        let cap: u8 = kani::any();
498        kani::assume(cap > 0 && cap <= 8);
499
500        let mut channel = IpcChannelModel::new(1, cap as usize);
501
502        // Try to send more than capacity
503        for i in 0..((cap as u16) + 2) {
504            let _ = channel.send(i as u64, MessageType::Small, 1, RIGHT_SEND);
505        }
506
507        assert!(channel.pending_count() <= cap as usize);
508    }
509
510    /// Proof: Separate channels don't interfere
511    #[kani::proof]
512    fn proof_channel_isolation() {
513        let p1: u64 = kani::any();
514        let p2: u64 = kani::any();
515
516        let mut ch1 = IpcChannelModel::new(1, 16);
517        let mut ch2 = IpcChannelModel::new(2, 16);
518
519        ch1.send(p1, MessageType::Small, 1, RIGHT_SEND).unwrap();
520        ch2.send(p2, MessageType::Small, 2, RIGHT_SEND).unwrap();
521
522        let m1 = ch1.receive(RIGHT_RECV).unwrap();
523        let m2 = ch2.receive(RIGHT_RECV).unwrap();
524
525        assert_eq!(m1.channel_id, 1);
526        assert_eq!(m2.channel_id, 2);
527        assert_eq!(m1.payload, p1);
528        assert_eq!(m2.payload, p2);
529    }
530
531    /// Proof: Send without capability fails
532    #[kani::proof]
533    fn proof_capability_required() {
534        let mut channel = IpcChannelModel::new(1, 16);
535
536        // No rights
537        let result = channel.send(42, MessageType::Small, 1, 0);
538        assert_eq!(result, Err(IpcModelError::NoSendCapability));
539
540        // Only recv right, no send
541        let result = channel.send(42, MessageType::Small, 1, RIGHT_RECV);
542        assert_eq!(result, Err(IpcModelError::NoSendCapability));
543    }
544
545    /// Proof: Shared memory regions don't overlap
546    #[kani::proof]
547    fn proof_zero_copy_no_overlap() {
548        let base1: u64 = kani::any();
549        let len1: u64 = kani::any();
550        let base2: u64 = kani::any();
551        let len2: u64 = kani::any();
552
553        kani::assume(len1 > 0 && len1 < 0x1000);
554        kani::assume(len2 > 0 && len2 < 0x1000);
555        // Ensure no overlap by placing regions apart
556        kani::assume(base1 < 0x8000_0000_0000_0000);
557        kani::assume(base2 >= base1.saturating_add(len1));
558
559        let r1 = SharedRegion {
560            base: base1,
561            length: len1,
562            owner: 1,
563        };
564        let r2 = SharedRegion {
565            base: base2,
566            length: len2,
567            owner: 2,
568        };
569
570        assert!(!r1.overlaps(&r2));
571    }
572
573    /// Proof: No cycles in a DAG wait-for graph
574    #[kani::proof]
575    fn proof_deadlock_freedom() {
576        let mut graph = WaitGraph::new();
577        // Linear chain: 0 -> 1 -> 2 (no cycle)
578        graph.add_edge(0, 1);
579        graph.add_edge(1, 2);
580
581        assert!(!graph.has_cycle());
582    }
583
584    /// Proof: Async ring buffer wrapping is correct
585    #[kani::proof]
586    fn proof_async_ring_buffer_safety() {
587        let mut rb = AsyncRingBuffer::new(4);
588
589        // Fill buffer
590        assert!(rb.push());
591        assert!(rb.push());
592        assert!(rb.push());
593        assert!(rb.push());
594        assert!(!rb.push()); // Full
595
596        // Empty buffer
597        assert!(rb.pop());
598        assert!(rb.pop());
599        assert!(rb.pop());
600        assert!(rb.pop());
601        assert!(!rb.pop()); // Empty
602
603        // Indices wrapped correctly
604        assert_eq!(rb.count, 0);
605    }
606
607    /// Proof: Message type tags are preserved through send/receive
608    #[kani::proof]
609    fn proof_message_type_safety() {
610        let type_val: u8 = kani::any();
611        kani::assume(type_val < 4);
612
613        let msg_type = match type_val {
614            0 => MessageType::Small,
615            1 => MessageType::Large,
616            2 => MessageType::Notification,
617            3 => MessageType::CapabilityTransfer,
618            _ => unreachable!(),
619        };
620
621        let mut channel = IpcChannelModel::new(1, 16);
622        channel.send(0, msg_type, 1, RIGHT_SEND).unwrap();
623        let received = channel.receive(RIGHT_RECV).unwrap();
624
625        assert_eq!(received.msg_type, msg_type);
626    }
627
628    /// Proof: Notifications reach their target
629    #[kani::proof]
630    fn proof_notification_delivery() {
631        let target: u64 = kani::any();
632        let bits: u64 = kani::any();
633
634        let mut notif = Notification {
635            target,
636            bits,
637            delivered: false,
638        };
639
640        // Simulate delivery
641        notif.delivered = true;
642
643        assert!(notif.delivered);
644        assert_eq!(notif.target, target);
645        assert_eq!(notif.bits, bits);
646    }
647}
648
649// ============================================================================
650// Unit Tests
651// ============================================================================
652
653#[cfg(test)]
654mod tests {
655    use super::*;
656
657    #[cfg(feature = "alloc")]
658    #[test]
659    fn test_channel_send_receive() {
660        let mut ch = IpcChannelModel::new(1, 16);
661        ch.send(42, MessageType::Small, 1, RIGHT_SEND).unwrap();
662        let msg = ch.receive(RIGHT_RECV).unwrap();
663        assert_eq!(msg.payload, 42);
664    }
665
666    #[cfg(feature = "alloc")]
667    #[test]
668    fn test_channel_fifo() {
669        let mut ch = IpcChannelModel::new(1, 16);
670        ch.send(1, MessageType::Small, 1, RIGHT_SEND).unwrap();
671        ch.send(2, MessageType::Small, 1, RIGHT_SEND).unwrap();
672        ch.send(3, MessageType::Small, 1, RIGHT_SEND).unwrap();
673
674        assert_eq!(ch.receive(RIGHT_RECV).unwrap().payload, 1);
675        assert_eq!(ch.receive(RIGHT_RECV).unwrap().payload, 2);
676        assert_eq!(ch.receive(RIGHT_RECV).unwrap().payload, 3);
677    }
678
679    #[cfg(feature = "alloc")]
680    #[test]
681    fn test_channel_capacity() {
682        let mut ch = IpcChannelModel::new(1, 2);
683        assert!(ch.send(1, MessageType::Small, 1, RIGHT_SEND).is_ok());
684        assert!(ch.send(2, MessageType::Small, 1, RIGHT_SEND).is_ok());
685        assert_eq!(
686            ch.send(3, MessageType::Small, 1, RIGHT_SEND),
687            Err(IpcModelError::ChannelFull)
688        );
689    }
690
691    #[cfg(feature = "alloc")]
692    #[test]
693    fn test_no_send_capability() {
694        let mut ch = IpcChannelModel::new(1, 16);
695        assert_eq!(
696            ch.send(42, MessageType::Small, 1, 0),
697            Err(IpcModelError::NoSendCapability)
698        );
699        assert_eq!(
700            ch.send(42, MessageType::Small, 1, RIGHT_RECV),
701            Err(IpcModelError::NoSendCapability)
702        );
703    }
704
705    #[cfg(feature = "alloc")]
706    #[test]
707    fn test_no_recv_capability() {
708        let mut ch = IpcChannelModel::new(1, 16);
709        ch.send(42, MessageType::Small, 1, RIGHT_SEND).unwrap();
710        assert_eq!(ch.receive(0), Err(IpcModelError::NoRecvCapability));
711        assert_eq!(ch.receive(RIGHT_SEND), Err(IpcModelError::NoRecvCapability));
712    }
713
714    #[cfg(feature = "alloc")]
715    #[test]
716    fn test_message_conservation() {
717        let mut ch = IpcChannelModel::new(1, 16);
718        for i in 0..5 {
719            ch.send(i, MessageType::Small, 1, RIGHT_SEND).unwrap();
720        }
721        ch.receive(RIGHT_RECV).unwrap();
722        ch.receive(RIGHT_RECV).unwrap();
723
724        assert!(IpcInvariantChecker::verify_no_message_loss(&ch).is_ok());
725    }
726
727    #[cfg(feature = "alloc")]
728    #[test]
729    fn test_channel_isolation() {
730        let mut ch = IpcChannelModel::new(42, 16);
731        ch.send(1, MessageType::Small, 1, RIGHT_SEND).unwrap();
732        assert!(IpcInvariantChecker::verify_channel_isolation(&ch).is_ok());
733    }
734
735    #[cfg(feature = "alloc")]
736    #[test]
737    fn test_wait_graph_no_cycle() {
738        let mut g = WaitGraph::new();
739        g.add_edge(0, 1);
740        g.add_edge(1, 2);
741        assert!(!g.has_cycle());
742    }
743
744    #[cfg(feature = "alloc")]
745    #[test]
746    fn test_wait_graph_with_cycle() {
747        let mut g = WaitGraph::new();
748        g.add_edge(0, 1);
749        g.add_edge(1, 2);
750        g.add_edge(2, 0);
751        assert!(g.has_cycle());
752    }
753
754    #[cfg(feature = "alloc")]
755    #[test]
756    fn test_wait_graph_remove_breaks_cycle() {
757        let mut g = WaitGraph::new();
758        g.add_edge(0, 1);
759        g.add_edge(1, 0);
760        assert!(g.has_cycle());
761
762        g.remove_edges(1);
763        assert!(!g.has_cycle());
764    }
765
766    #[test]
767    fn test_shared_region_overlap() {
768        let r1 = SharedRegion {
769            base: 0x1000,
770            length: 0x1000,
771            owner: 1,
772        };
773        let r2 = SharedRegion {
774            base: 0x1800,
775            length: 0x1000,
776            owner: 2,
777        };
778        assert!(r1.overlaps(&r2));
779
780        let r3 = SharedRegion {
781            base: 0x3000,
782            length: 0x1000,
783            owner: 3,
784        };
785        assert!(!r1.overlaps(&r3));
786    }
787
788    #[test]
789    fn test_ring_buffer() {
790        let mut rb = AsyncRingBuffer::new(4);
791        assert!(rb.is_empty());
792        assert!(!rb.is_full());
793
794        assert!(rb.push());
795        assert!(rb.push());
796        assert!(rb.push());
797        assert!(rb.push());
798        assert!(rb.is_full());
799        assert!(!rb.push());
800
801        assert!(rb.pop());
802        assert!(!rb.is_full());
803        assert!(rb.push()); // Wrap around works
804    }
805}