kernel/drivers/virtio/
queue.rs

1//! Virtio Queue module.
2//! 
3//! This module provides the implementation of the Virtio Queue.
4//! It includes the data structures and methods to manage the Virtio Queue.
5//! 
6
7use core::{alloc::Layout, mem};
8use alloc::{alloc::alloc_zeroed, vec::Vec};
9
10// struct RawVirtQueue {
11//     pub desc: [Descriptor; 0], /* Flexible array member */
12//     pub avail: RawAvailableRing,
13//     pub padding: [u8; 0], /* Padding to align the used ring */
14//     pub used: RawUsedRing,
15// }
16
17/// VirtQueue structure
18/// 
19/// This structure represents the wrapper of the virtqueue.
20/// It contains the descriptor table, available ring, and used ring.
21///
22/// # Fields
23/// 
24/// * `index`: The ID of the virtqueue.
25/// * `desc`: A mutable slice of descriptors.
26/// * `avail`: The available ring.
27/// * `used`: The used ring.
28/// * `free_head`: The index of the next free descriptor.
29/// * `last_used_idx`: The index of the last used descriptor.
30pub struct VirtQueue<'a> {
31    pub desc: &'a mut [Descriptor],
32    pub avail: AvailableRing<'a>,
33    pub used: UsedRing<'a>,
34    pub free_descriptors: Vec<usize>,
35    pub last_used_idx: usize,
36}
37
38impl<'a> VirtQueue<'a> {
39    pub fn new(queue_size: usize) -> Self {
40        /* Calculate the size of each ring */
41        let desc_size = queue_size * mem::size_of::<Descriptor>();
42        let avail_size = mem::size_of::<RawAvailableRing>() + queue_size * mem::size_of::<u16>();
43        let used_size = mem::size_of::<RawUsedRing>() + queue_size * mem::size_of::<RawUsedRingEntry>();
44
45        /* Floor the sum of desc_size, avail_size to the nearest multiple of 4 */
46        let align_size = (desc_size + avail_size + 3) & !3;
47        /* Calculate the size of the padding for the used ring */
48        let padding_size = align_size - (desc_size + avail_size);
49
50        /* Make layout for the virtqueue */
51        /* The size is the sum of the sizes of the descriptor table, available ring, and used ring */
52        let layout = Layout::from_size_align(
53            desc_size + avail_size + padding_size + used_size,
54            mem::align_of::<Descriptor>(),
55        )
56        .unwrap();
57
58        /* Allocate memory for the virtqueue */
59        let ptr = unsafe { alloc_zeroed(layout) };
60        if ptr.is_null() {
61            panic!("Memory allocation failed");
62        }
63
64        /* Create the descriptor table */
65        let desc_ptr = ptr as *mut Descriptor;
66        let desc = unsafe { core::slice::from_raw_parts_mut(desc_ptr, queue_size) };
67
68        /* Create the available ring */
69        let avail_ptr = unsafe { desc_ptr.add(queue_size) as *mut RawAvailableRing };
70        let avail = unsafe { AvailableRing::new(queue_size, avail_ptr) };
71
72        /* Create the used ring */
73        let used_ptr = unsafe {
74            (avail_ptr as *mut u8).add(mem::size_of::<RawAvailableRing>() + queue_size * mem::size_of::<u16>() + padding_size) as *mut RawUsedRing
75        };
76        let used = unsafe { UsedRing::new(queue_size, used_ptr) };
77
78        /* Create the virtqueue */
79        let mut free_descriptors = Vec::new();
80        for i in 0..queue_size {
81            free_descriptors.push(i);
82        }
83        let last_used_idx = 0;
84        
85        Self { desc, avail, used, free_descriptors, last_used_idx }
86    }
87
88    /// Initialize the virtqueue
89    /// 
90    /// This function initializes the descriptor table, available ring, and used ring.
91    /// It sets the next pointer of each descriptor to point to the next descriptor in the table.
92    /// 
93    pub fn init(&mut self) {
94        // Initialize the descriptor table
95        for i in 0..self.desc.len() {
96            self.desc[i].addr = 0;
97            self.desc[i].len = 0;
98            self.desc[i].flags = 0;
99            self.desc[i].next = (i as u16 + 1) % self.desc.len() as u16;
100        }
101
102        *(self.avail.flags) = 0;
103        *(self.avail.idx) = 0;
104        *(self.avail.used_event) = 0;
105        *(self.used.flags) = 0;
106        *(self.used.idx) = 0;
107        *(self.used.avail_event) = 0;
108    }
109
110    /// Get the raw pointer to the virtqueue
111    /// 
112    /// This function returns a raw pointer to the start of the virtqueue memory.
113    /// It can be used to access the memory directly.
114    /// 
115    /// # Returns
116    /// 
117    /// *const u8: A raw pointer to the start of the virtqueue memory.
118    pub fn get_raw_ptr(&self) -> *const u8 {
119        self.desc.as_ptr() as *const u8
120    }
121
122    /// Get the size of the raw virtqueue
123    /// 
124    /// This function returns the size of the virtqueue in bytes.
125    /// It is calculated as the sum of the sizes of the descriptor table, available ring, and used ring.
126    ///
127    /// # Returns
128    /// 
129    /// usize: The size of the virtqueue in bytes.
130    pub fn get_raw_size(&self) -> usize {
131        let desc_size = self.desc.len() * mem::size_of::<Descriptor>();
132        let avail_size = mem::size_of::<RawAvailableRing>() + self.desc.len() * mem::size_of::<u16>();
133        let used_size = mem::size_of::<RawUsedRing>() + self.desc.len() * mem::size_of::<RawUsedRingEntry>();
134        let align_size = (desc_size + avail_size + 3) & !3;
135        let padding_size = align_size - (desc_size + avail_size);
136        desc_size + avail_size + used_size + padding_size
137    }
138
139    /// Allocate a descriptor
140    ///
141    /// This function allocates a descriptor from the free list.
142    /// 
143    /// # Returns
144    /// 
145    /// Option<usize>: The index of the allocated descriptor, or None if no descriptors are available.
146    /// 
147    pub fn alloc_desc(&mut self) -> Option<usize> {
148        let desc = self.free_descriptors.pop();
149        if let Some(desc_idx) = desc {
150            self.desc[desc_idx].next = 0;
151            self.desc[desc_idx].addr = 0;
152            self.desc[desc_idx].len = 0;
153            self.desc[desc_idx].flags = 0;
154            Some(desc_idx)
155        } else {
156            None
157        }
158    }
159
160    /// Free a descriptor
161    /// 
162    /// This function frees a descriptor and adds it back to the free list.
163    /// 
164    /// # Arguments
165    /// 
166    /// * `desc_idx` - The index of the descriptor to free.
167    /// 
168    pub fn free_desc(&mut self, desc_idx: usize) {
169        if desc_idx < self.desc.len() {
170            self.desc[desc_idx].next = 0;
171            self.free_descriptors.push(desc_idx);
172        } else {
173            panic!("Invalid descriptor index");
174        }
175    }
176
177    /// Allocate a chain of descriptors
178    /// 
179    /// This function allocates a chain of descriptors of the specified length.
180    /// 
181    /// # Arguments
182    /// 
183    /// * `length` - The length of the chain to allocate.
184    /// 
185    /// # Returns
186    /// 
187    /// Option<usize>: The index of the first descriptor in the chain, or None if no descriptors are available.
188    /// 
189    pub fn alloc_desc_chain(&mut self, length: usize) -> Option<usize> {
190        let desc_idx = self.alloc_desc();
191        if desc_idx.is_none() {
192            return None;
193        }
194        let desc_idx = desc_idx.unwrap();
195        let mut prev_idx = desc_idx;
196
197        for _ in 1..length {
198            let next_idx = self.alloc_desc();
199            if next_idx.is_none() {
200                self.free_desc_chain(desc_idx);
201                return None;
202            }
203            let next_idx = next_idx.unwrap();
204            self.desc[prev_idx].next = next_idx as u16;
205            self.desc[prev_idx].flags = DescriptorFlag::Next as u16;
206            prev_idx = next_idx;
207        }
208
209        self.desc[prev_idx].next = 0;
210        Some(desc_idx)
211    }
212
213    /// Free a chain of descriptors
214    /// 
215    /// This function frees a chain of descriptors starting from the given index.
216    /// 
217    /// # Arguments
218    /// 
219    /// * `desc_idx` - The index of the first descriptor in the chain.
220    /// 
221    pub fn free_desc_chain(&mut self, desc_idx: usize) {
222        let mut idx = desc_idx;
223        loop {
224            if idx >= self.desc.len() {
225                break;
226            }
227            let next = self.desc[idx].next;
228            self.free_desc(idx);
229
230            if !DescriptorFlag::Next.is_set(self.desc[idx].flags) {
231                break;
232            }
233            idx = next as usize;
234        }
235    }
236
237    /// Check if the virtqueue is busy
238    /// 
239    /// This function checks if the virtqueue is busy by comparing the last used index with the current index.
240    /// 
241    /// # Returns
242    /// 
243    /// bool: True if the virtqueue is busy, false otherwise.
244    pub fn is_busy(&self) -> bool {
245        self.last_used_idx != *self.used.idx as usize
246    }
247
248    /// Push a descriptor index to the available ring
249    /// 
250    /// This function pushes a descriptor index to the available ring.
251    /// 
252    /// # Arguments
253    /// 
254    /// * `desc_idx` - The index of the descriptor to push. 
255    /// If you want to push a chain of descriptors, you should pass the first descriptor index.
256    /// 
257    /// # Returns
258    /// 
259    /// Result<(), &'static str>: Ok if the push was successful, or an error message if it failed.
260    pub fn push(&mut self, desc_idx: usize) -> Result<(), &'static str> {
261        if desc_idx >= self.desc.len() {
262            return Err("Invalid descriptor index");
263        }
264        
265        self.avail.ring[*self.avail.idx as usize] = desc_idx as u16;
266        *self.avail.idx = (*self.avail.idx + 1) % self.avail.size as u16;
267        Ok(())
268    }
269
270    /// Pop a buffer from the used ring
271    /// 
272    /// This function retrieves a buffer from the used ring when the device has finished processing it.
273    /// The caller is responsible for freeing the descriptor when it's done with the buffer.
274    /// 
275    /// # Returns
276    /// 
277    /// Option<usize>: The index of the descriptor that was used, or None if no descriptors are available.
278    ///
279    pub fn pop(&mut self) -> Option<usize> {
280        // Check if there are any used buffers available
281        if self.last_used_idx == *self.used.idx as usize {
282            return None;
283        }
284        
285        // Calculate the index in the used ring
286        let used_idx = self.last_used_idx % self.desc.len();
287        
288        // Retrieve the descriptor index from the used ring
289        let desc_idx = self.used.ring[used_idx].id as usize;
290        // Update the last used index
291        self.last_used_idx = (self.last_used_idx + 1) % self.used.ring.len();
292
293        Some(desc_idx)
294    }
295}
296
297/// Descriptor structure
298///
299/// This structure represents a descriptor in the descriptor table.
300/// It contains the address, length, flags, and next pointer.
301/// This structure is located in the physical memory directly.
302#[repr(C)]
303pub struct Descriptor {
304    pub addr: u64,
305    pub len: u32,
306    pub flags: u16,
307    pub next: u16,
308}
309
310/// Descriptor flags
311/// 
312/// This enum represents the flags that can be set for a descriptor.
313/// It includes flags for indicating the next descriptor, write operation, and indirect descriptor.
314#[derive(Clone, Copy)]
315pub enum DescriptorFlag {
316    Next = 0x1,
317    Write = 0x2,
318    Indirect = 0x4,
319}
320
321impl DescriptorFlag {
322    /// Check if the flag is set
323    /// 
324    /// This method checks if the specified flag is set in the given flags.
325    /// 
326    /// # Arguments
327    /// 
328    /// * `flags` - The flags to check.
329    /// 
330    /// # Returns
331    /// 
332    /// Returns true if the flag is set, false otherwise.
333    ///
334    pub fn is_set(&self, flags: u16) -> bool {
335        (flags & *self as u16) != 0
336    }
337
338    /// Set the flag
339    /// 
340    /// This method sets the specified flag in the given flags.
341    /// 
342    /// # Arguments
343    /// 
344    /// * `flags` - A mutable reference to the flags to modify.
345    /// 
346    pub fn set(&self, flags: &mut u16) {
347        (*flags) |= *self as u16;
348    }
349
350    /// Clear the flag
351    /// 
352    /// This method clears the specified flag in the given flags.
353    /// 
354    /// # Arguments
355    /// 
356    /// * `flags` - A mutable reference to the flags to modify.
357    /// 
358    pub fn clear(&self, flags: &mut u16) {
359        (*flags) &= !(*self as u16);
360    }
361
362    /// Toggle the flag
363    /// 
364    /// This method toggles the specified flag in the given flags.
365    /// 
366    /// # Arguments
367    /// 
368    /// * `flags` - A mutable reference to the flags to modify.
369    /// 
370    pub fn toggle(&self, flags: &mut u16) {
371        (*flags) ^= *self as u16;
372    }
373}
374
375/// Raw available ring structure
376/// 
377/// This structure represents the raw available ring.
378/// It contains the flags, index, ring buffer, and used event. 
379/// This structure is located in the physical memory directly.
380#[repr(C, align(2))]
381pub struct RawAvailableRing {
382    flags: u16,
383    idx: u16,
384    ring: [u16; 0], /* Flexible array member */
385    used_event: u16, /* Locate after ring */
386}
387
388/// Available ring structure
389/// 
390/// This structure is wrapped around the `RawAvailableRing` structure.
391/// It provides a safe interface to access the available ring entries.
392#[repr(C)]
393pub struct AvailableRing<'a> {
394    size: usize,
395    pub flags: &'a mut u16,
396    pub idx: &'a mut u16,
397    pub ring: &'a mut [u16],
398    pub used_event: &'a mut u16,
399}
400
401impl<'a> AvailableRing<'a> {
402    /// Create a new `AvailableRing` instance
403    /// 
404    /// This function creates a new `AvailableRing` instance from a raw pointer to a `RawAvailableRing`.
405    /// 
406    /// # Safety
407    /// 
408    /// This function is unsafe because it dereferences raw pointers and assumes that the memory layout is correct.
409    /// The caller must ensure that the pointer is valid and points to a properly initialized `RawAvailableRing`.
410    ///
411    /// # Arguments
412    /// 
413    /// * `size` - The size of the ring.
414    /// * `ptr` - A raw pointer to a `RawAvailableRing`.
415    /// 
416    /// # Returns
417    /// 
418    /// `AvailableRing` - A new `AvailableRing` instance.
419    pub unsafe fn new(size: usize, ptr: *mut RawAvailableRing) -> Self {
420        let flags = unsafe { &mut (*ptr).flags };
421        let idx = unsafe { &mut (*ptr).idx };
422        let ring = unsafe { core::slice::from_raw_parts_mut((*ptr).ring.as_mut_ptr(), size) };
423        let used_event = unsafe { &mut *((*ptr).ring.as_mut_ptr().add(size) as *mut u16) };
424
425        Self {
426            size,
427            flags,
428            idx,
429            ring,
430            used_event,
431        }
432    }
433}
434
435/// Raw used ring structure
436/// 
437/// This structure represents the raw used ring.
438/// It contains the flags, index, ring buffer, and available event.
439/// This structure is located in the physical memory directly.
440#[repr(C, align(4))]
441pub struct RawUsedRing {
442    flags: u16,
443    idx: u16,
444    ring: [RawUsedRingEntry; 0], /* Flexible array member */
445    avail_event: u16,
446}
447
448/// Raw used ring entry structure
449/// 
450/// This structure represents a single entry in the used ring.
451/// It contains the ID and length of the used buffer.
452/// 
453/// This structure is located in the physical memory directly.
454#[derive(Clone)]
455#[repr(C)]
456pub struct RawUsedRingEntry {
457    pub id: u32,
458    pub len: u32,
459}
460
461impl Default for RawUsedRingEntry {
462    fn default() -> Self {
463        Self {
464            id: 0,
465            len: 0,
466        }
467    }
468}
469
470/// Used ring structure
471/// 
472/// This structure is wrapped around the `RawUsedRing` structure.
473/// It provides a safe interface to access the used ring entries.
474pub struct UsedRing<'a> {
475    pub flags: &'a mut u16,
476    pub idx: &'a mut u16,
477    pub ring: &'a mut [RawUsedRingEntry],
478    pub avail_event: &'a mut u16,
479}
480
481impl<'a> UsedRing<'a> {
482
483    /// Create a new `UsedRing` instance
484    /// 
485    /// This function creates a new `UsedRing` instance from a raw pointer to a `RawUsedRing`.
486    /// 
487    /// # Safety
488    /// 
489    /// This function is unsafe because it dereferences raw pointers and assumes that the memory layout is correct.
490    /// The caller must ensure that the pointer is valid and points to a properly initialized `RawUsedRing`.
491    /// 
492    /// # Arguments
493    /// 
494    /// * `size` - The size of the ring.
495    /// * `ptr` - A raw pointer to a `RawUsedRing`.
496    /// 
497    /// # Returns
498    /// 
499    /// `UsedRing` - A new `UsedRing` instance.
500    pub unsafe fn new(size: usize, ptr: *mut RawUsedRing) -> Self {
501        let flags = unsafe { &mut (*ptr).flags };
502        let idx = unsafe { &mut (*ptr).idx };
503        let ring_ptr = unsafe { (*ptr).ring.as_mut_ptr() };
504        let ring = unsafe { core::slice::from_raw_parts_mut(ring_ptr, size) };
505        let avail_event = unsafe { &mut *((*ptr).ring.as_mut_ptr().add(size) as *mut u16) };
506
507        Self {
508            flags,
509            idx,
510            ring,
511            avail_event,
512        }
513    }
514}
515
516#[cfg(test)]
517mod tests {
518    use super::*;
519
520    #[test_case]
521    fn test_used_ring_flags_update() {
522        let mut raw = RawUsedRing {
523            flags: 0,
524            idx: 0,
525            ring: [RawUsedRingEntry { id: 0, len: 0 }; 0],
526            avail_event: 0,
527        };
528    
529        let used_ring = unsafe { UsedRing::new(0, &mut raw) };
530    
531        // Verify initial values
532        assert_eq!(raw.flags, 0);
533        assert_eq!(*used_ring.flags, 0);
534    
535        // Modify flags
536        *used_ring.flags = 42;
537    
538        // Verify the modification is reflected
539        assert_eq!(raw.flags, 42);
540        assert_eq!(*used_ring.flags, 42);
541    }
542
543    #[test_case]
544    fn test_raw_used_ring_direct_access() {
545        let queue_size = 2;
546        let mut virtqueue = VirtQueue::new(queue_size);
547        virtqueue.init();
548    
549        // 1. Write values to UsedRing via VirtQueue
550        *virtqueue.used.flags = 42;
551        *virtqueue.used.idx = 1;
552        for i in 0..queue_size {
553            virtqueue.used.ring[i].id = i as u32;
554            virtqueue.used.ring[i].len = 456;
555        }
556    
557        // 2. Get a pointer to RawUsedRing
558        let raw_used_ptr = virtqueue.used.flags as *mut u16 as *mut RawUsedRing;
559    
560        // 3. Directly access RawUsedRing and verify values
561        let raw_used = unsafe { &*raw_used_ptr };
562        assert_eq!(raw_used.flags, 42, "flags mismatch");
563        assert_eq!(raw_used.idx, 1, "idx mismatch");
564    
565        // 4. Verify the contents of the ring
566        unsafe {
567            let used_ring = &mut *virtqueue.used.ring.as_mut_ptr();
568            let ring = core::slice::from_raw_parts_mut(used_ring, queue_size);
569            
570            for i in 0..queue_size {
571                assert_eq!(ring[i].id, i as u32, "ring[{}].id mismatch", i);
572                assert_eq!(ring[i].len, 456, "ring[{}].len mismatch", i);
573            }
574        }
575    }
576
577    #[test_case]
578    fn test_raw_available_ring_direct_access() {
579        let queue_size = 16;
580        let mut virtqueue = VirtQueue::new(queue_size);
581        virtqueue.init();
582
583        // 1. Write values to AvailableRing via VirtQueue
584        *virtqueue.avail.flags = 24;
585        *virtqueue.avail.idx = 1;
586        for i in 0..queue_size {
587            virtqueue.avail.ring[i] = i as u16;
588        }
589
590        // 2. Get a pointer to RawAvailableRing
591        let raw_avail_ptr = virtqueue.avail.flags as *mut u16 as *mut RawAvailableRing;
592
593        // 3. Directly access RawAvailableRing and verify values
594        let raw_avail = unsafe { &*raw_avail_ptr };
595        assert_eq!(raw_avail.flags, 24, "flags mismatch");
596        assert_eq!(raw_avail.idx, 1, "idx mismatch");
597
598        // 4. Verify the contents of the ring
599        unsafe {
600            let avail_ring = &mut *virtqueue.avail.ring.as_mut_ptr();
601            let ring = core::slice::from_raw_parts_mut(avail_ring, queue_size);
602            
603            for i in 0..queue_size {
604                assert_eq!(ring[i], i as u16, "ring[{}] mismatch", i);
605            }
606
607        }
608    }
609
610    #[test_case]
611    fn test_initialize_virtqueue() {
612        let queue_size = 2;
613        let mut virtqueue = VirtQueue::new(queue_size);
614        virtqueue.init();
615
616        let total = 68;
617
618        assert_eq!(virtqueue.desc.len(), queue_size);
619        assert_eq!(*virtqueue.avail.idx, 0);
620        assert_eq!(*virtqueue.used.idx, 0);
621
622        // Check the size of the allocated memory
623        let allocated_size = virtqueue.get_raw_size();
624        assert_eq!(allocated_size, total);
625
626        // Check the next index of each descriptor
627        for i in 0..queue_size {
628            assert_eq!(virtqueue.desc[i].next, (i as u16 + 1) % queue_size as u16);
629            assert_eq!(virtqueue.avail.ring[i], 0);
630            assert_eq!(virtqueue.used.ring[i].len, 0);
631        }
632    }
633
634    #[test_case]
635    fn test_alloc_free_desc() {
636        let queue_size = 1;
637        let mut virtqueue = VirtQueue::new(queue_size);
638        virtqueue.init();
639
640        // Allocate a descriptor
641        let desc_idx = virtqueue.alloc_desc().unwrap();
642        assert_eq!(desc_idx, 0);
643
644        // Free the descriptor
645        virtqueue.free_desc(desc_idx);
646        assert_eq!(virtqueue.free_descriptors.len(), 1);
647    }
648
649    #[test_case]
650    fn test_alloc_free_desc_chain() {
651        let queue_size = 2;
652        let mut virtqueue = VirtQueue::new(queue_size);
653        virtqueue.init();
654
655        // Allocate a chain of descriptors
656        let desc_idx = virtqueue.alloc_desc_chain(2).unwrap();
657
658        // Free the chain of descriptors
659        virtqueue.free_desc_chain(desc_idx);
660        assert_eq!(virtqueue.free_descriptors.len(), 2);
661    }
662
663    #[test_case]
664    fn test_alloc_desc_chain_too_long() {
665        let queue_size = 2;
666        let mut virtqueue = VirtQueue::new(queue_size);
667        virtqueue.init();
668
669        // Allocate a chain of descriptors that is too long
670        let desc_idx = virtqueue.alloc_desc_chain(3);
671        assert!(desc_idx.is_none());
672    }
673
674    #[test_case]
675    fn test_push_pop() {
676        let queue_size = 2;
677        let mut virtqueue = VirtQueue::new(queue_size);
678        virtqueue.init();
679        
680        // 1. Allocate and configure a descriptor
681        let desc_idx = virtqueue.alloc_desc().unwrap();
682        virtqueue.desc[desc_idx].addr = 0x1000;
683        virtqueue.desc[desc_idx].len = 100;
684        
685        // 2. Push to the queue
686        assert!(virtqueue.push(desc_idx).is_ok());
687        
688        // 3. Simulate device processing the buffer
689        *virtqueue.used.idx = 1;
690        virtqueue.used.ring[0].id = desc_idx as u32;
691        
692        // 4. Pop the buffer
693        let popped = virtqueue.pop();
694        assert!(popped.is_some());
695        assert_eq!(popped.unwrap(), desc_idx);
696        
697        // 5. Verify no more buffers are available
698        assert!(virtqueue.pop().is_none());
699    }
700
701    #[test_case]
702    fn test_push_pop_chain() {
703        let queue_size = 4;
704        let mut virtqueue = VirtQueue::new(queue_size);
705        virtqueue.init();
706        
707        // 1. Allocate a chain of descriptors
708        let chain_len = 3;
709        let desc_idx = virtqueue.alloc_desc_chain(chain_len).unwrap();
710        
711        // 2. Configure the descriptors in the chain
712        let mut current_idx = desc_idx;
713        for i in 0..chain_len {
714            virtqueue.desc[current_idx].addr = 0x1000 + (i * 0x100) as u64;
715            virtqueue.desc[current_idx].len = 100;
716            
717            // Set appropriate flags (except for the last one)
718            if i < chain_len - 1 {
719                DescriptorFlag::Next.set(&mut virtqueue.desc[current_idx].flags);
720                current_idx = virtqueue.desc[current_idx].next as usize;
721            }
722        }
723        
724        // 3. Push the chain to the queue
725        assert!(virtqueue.push(desc_idx).is_ok());
726        
727        // 4. Simulate device processing the chain
728        *virtqueue.used.idx = 1;
729        virtqueue.used.ring[0].id = desc_idx as u32;
730        virtqueue.used.ring[0].len = 300; // Total bytes processed (100 per descriptor)
731        
732        // 5. Pop the buffer
733        let popped = virtqueue.pop();
734        assert!(popped.is_some());
735        assert_eq!(popped.unwrap(), desc_idx);
736        
737        // 6. Verify the chain is intact
738        let mut current_idx = desc_idx;
739        for i in 0..chain_len {
740            // Check each descriptor in the chain
741            assert_eq!(virtqueue.desc[current_idx].addr, 0x1000 + (i * 0x100) as u64);
742            assert_eq!(virtqueue.desc[current_idx].len, 100);
743            
744            if i < chain_len - 1 {
745                assert!(DescriptorFlag::Next.is_set(virtqueue.desc[current_idx].flags));
746                current_idx = virtqueue.desc[current_idx].next as usize;
747            } else {
748                // Last descriptor should not have NEXT flag
749                assert!(!DescriptorFlag::Next.is_set(virtqueue.desc[current_idx].flags));
750            }
751        }
752        
753        // 7. Free the chain after processing
754        virtqueue.free_desc_chain(desc_idx);
755        assert_eq!(virtqueue.free_descriptors.len(), queue_size);
756        
757        // 8. Verify no more buffers are available
758        assert!(virtqueue.pop().is_none());
759    }
760}
761