diff --git a/Cargo.toml b/Cargo.toml index 78f872160..c495be5e1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,33 +50,13 @@ exclude = [ ] [workspace.lints.rust] -function_casts_as_integer = "allow" -mismatched_lifetime_syntaxes = "allow" missing_crate_level_docs = "warn" unexpected_cfgs = { level = "deny", check-cfg = ['cfg(baseline_asterinas)', 'cfg(ktest)'] } -unpredictable-function-pointer-comparisons = "allow" unsafe_op_in_unsafe_fn = "deny" -unused_parens = "allow" [workspace.lints.clippy] allow_attributes = "warn" collapsible_match = "allow" -collapsible-if = "allow" -derivable_impls = "allow" -explicit_counter_loop = "allow" -filter_next = "allow" -implicit_saturating_sub = "allow" -iter_kv_map = "allow" -let_and_return = "allow" -manual_is_multiple_of = "allow" -manual_saturating_arithmetic = "allow" -mem-replace-option-with-some = "allow" -question_mark = "allow" -unnecessary_cast = "allow" -unnecessary_option_map_or_else = "allow" -unnecessary_sort_by = "allow" -unnecessary_unwrap = "allow" -while_let_loop = "allow" # TODO(arthurp, #48): Enable once the code can pass it. # undocumented_unsafe_blocks = "deny" diff --git a/kernel/comps/block/src/bio.rs b/kernel/comps/block/src/bio.rs index 10519a5c9..321f60fee 100644 --- a/kernel/comps/block/src/bio.rs +++ b/kernel/comps/block/src/bio.rs @@ -881,7 +881,7 @@ impl From for DmaDirection { /// Checks if the given offset is aligned to sector. pub fn is_sector_aligned(offset: usize) -> bool { - offset % SECTOR_SIZE == 0 + offset.is_multiple_of(SECTOR_SIZE) } /// An aligned unsigned integer number. @@ -918,7 +918,7 @@ pub struct AlignedUsize(usize); impl AlignedUsize { /// Constructs a new instance of aligned integer if the given value is aligned. pub fn new(val: usize) -> Option { - if val % (N as usize) == 0 { + if val.is_multiple_of(N as usize) { Some(Self(val)) } else { None diff --git a/kernel/comps/block/src/request_queue.rs b/kernel/comps/block/src/request_queue.rs index eb0530f4b..d57b604d8 100644 --- a/kernel/comps/block/src/request_queue.rs +++ b/kernel/comps/block/src/request_queue.rs @@ -62,13 +62,12 @@ impl BioRequestSingleQueue { } let mut queue = self.queue.lock(); - if let Some(request) = queue.front_mut() { - if request.can_merge(&bio) - && request.num_segments() + bio.segments().len() <= self.max_nr_segments_per_bio - { - request.merge_bio(bio); - return Ok(()); - } + if let Some(request) = queue.front_mut() + && request.can_merge(&bio) + && request.num_segments() + bio.segments().len() <= self.max_nr_segments_per_bio + { + request.merge_bio(bio); + return Ok(()); } let new_request = BioRequest::from(bio); diff --git a/kernel/comps/framebuffer/src/framebuffer.rs b/kernel/comps/framebuffer/src/framebuffer.rs index faafcee64..9cb49ff4f 100644 --- a/kernel/comps/framebuffer/src/framebuffer.rs +++ b/kernel/comps/framebuffer/src/framebuffer.rs @@ -97,7 +97,7 @@ impl FrameBuffer { } /// Calculates the offset of a pixel at the specified position. - pub fn calc_offset(&self, x: usize, y: usize) -> PixelOffset { + pub fn calc_offset(&self, x: usize, y: usize) -> PixelOffset<'_> { PixelOffset { fb: self, offset: ((y * self.width + x) * self.pixel_format.nbytes()) as isize, diff --git a/kernel/comps/mlsdisk/src/layers/0-bio/block_buf.rs b/kernel/comps/mlsdisk/src/layers/0-bio/block_buf.rs index 817e75f82..1c7273c11 100644 --- a/kernel/comps/mlsdisk/src/layers/0-bio/block_buf.rs +++ b/kernel/comps/mlsdisk/src/layers/0-bio/block_buf.rs @@ -99,7 +99,7 @@ impl<'a> TryFrom<&'a [u8]> for BufRef<'a> { if buf.is_empty() { return_errno_with_msg!(InvalidArgs, "empty buf in `BufRef::try_from`"); } - if buf.len() % BLOCK_SIZE != 0 { + if !buf.len().is_multiple_of(BLOCK_SIZE) { return_errno_with_msg!( NotBlockSizeAligned, "buf not block size aligned `BufRef::try_from`" @@ -154,7 +154,7 @@ impl<'a> TryFrom<&'a mut [u8]> for BufMut<'a> { if buf.is_empty() { return_errno_with_msg!(InvalidArgs, "empty buf in `BufMut::try_from`"); } - if buf.len() % BLOCK_SIZE != 0 { + if !buf.len().is_multiple_of(BLOCK_SIZE) { return_errno_with_msg!( NotBlockSizeAligned, "buf not block size aligned `BufMut::try_from`" diff --git a/kernel/comps/mlsdisk/src/layers/0-bio/block_set.rs b/kernel/comps/mlsdisk/src/layers/0-bio/block_set.rs index ed0604d65..183b6d41b 100644 --- a/kernel/comps/mlsdisk/src/layers/0-bio/block_set.rs +++ b/kernel/comps/mlsdisk/src/layers/0-bio/block_set.rs @@ -62,7 +62,7 @@ pub trait BlockSet: Sync + Send { blocks.as_mut_slice()[start_offset..end_offset].copy_from_slice(buf); // Maybe we should read the last block partially. - if end_offset % BLOCK_SIZE != 0 { + if !end_offset.is_multiple_of(BLOCK_SIZE) { let mut end_block = Buf::alloc(1)?; self.read(end_pos, end_block.as_mut())?; blocks.as_mut_slice()[end_offset..] diff --git a/kernel/comps/mlsdisk/src/layers/1-crypto/crypto_log.rs b/kernel/comps/mlsdisk/src/layers/1-crypto/crypto_log.rs index 5ae560db9..5cbc46879 100644 --- a/kernel/comps/mlsdisk/src/layers/1-crypto/crypto_log.rs +++ b/kernel/comps/mlsdisk/src/layers/1-crypto/crypto_log.rs @@ -263,12 +263,9 @@ impl CryptoLog { let data_nodes: Vec> = buf .iter() .map(|block_buf| { - let data_node = { - let mut node = DataNode::new_uninit(); - node.0.copy_from_slice(block_buf.as_slice()); - Arc::new(node) - }; - data_node + let mut node = DataNode::new_uninit(); + node.0.copy_from_slice(block_buf.as_slice()); + Arc::new(node) }) .collect(); @@ -489,9 +486,8 @@ impl MhtStorage { let num_append = nodes.len(); let mut node_entries = Vec::with_capacity(num_append); let mut cipher_buf = Buf::alloc(num_append)?; - let mut pos = self.block_log.nblocks() as BlockId; - let start_pos = pos; - for (i, node) in nodes.iter().enumerate() { + let start_pos = self.block_log.nblocks() as BlockId; + for (pos, (i, node)) in (start_pos..).zip(nodes.iter().enumerate()) { let plain = node.as_bytes(); let cipher = &mut cipher_buf.as_mut_slice()[i * BLOCK_SIZE..(i + 1) * BLOCK_SIZE]; let key = Key::random(); @@ -499,7 +495,6 @@ impl MhtStorage { node_entries.push(MhtNodeEntry { pos, key, mac }); self.node_cache.put(pos, node.clone()); - pos += 1; } let append_pos = self.block_log.append(cipher_buf.as_ref())?; @@ -515,15 +510,13 @@ impl MhtStorage { } let mut cipher_buf = Buf::alloc(num_append)?; - let mut pos = self.block_log.nblocks() as BlockId; - let start_pos = pos; - for (i, node) in nodes.iter().enumerate() { + let start_pos = self.block_log.nblocks() as BlockId; + for (pos, (i, node)) in (start_pos..).zip(nodes.iter().enumerate()) { let cipher = &mut cipher_buf.as_mut_slice()[i * BLOCK_SIZE..(i + 1) * BLOCK_SIZE]; let key = Key::random(); let mac = Aead::new().encrypt(&node.0, &key, &Iv::new_zeroed(), &[], cipher)?; node_entries.push(MhtNodeEntry { pos, key, mac }); - pos += 1; } let append_pos = self.block_log.append(cipher_buf.as_ref())?; @@ -607,7 +600,9 @@ impl MhtNode { } pub fn num_complete_children(&self) -> usize { - if self.num_data_nodes() % MHT_NBRANCHES == 0 || Self::is_lowest_level(self.height()) { + if self.num_data_nodes().is_multiple_of(MHT_NBRANCHES) + || Self::is_lowest_level(self.height()) + { self.num_valid_entries() } else { self.num_valid_entries() - 1 diff --git a/kernel/comps/mlsdisk/src/layers/4-lsm/mem_table.rs b/kernel/comps/mlsdisk/src/layers/4-lsm/mem_table.rs index 254e9ca40..ec5b78893 100644 --- a/kernel/comps/mlsdisk/src/layers/4-lsm/mem_table.rs +++ b/kernel/comps/mlsdisk/src/layers/4-lsm/mem_table.rs @@ -136,7 +136,7 @@ impl, V: RecordValue> MemTableManager { } /// Gets the immutable `MemTable` instance (read-only). - pub fn immutable_memtable(&self) -> RwLockReadGuard> { + pub fn immutable_memtable(&self) -> RwLockReadGuard<'_, MemTable> { self.immutable.read() } } diff --git a/kernel/comps/mlsdisk/src/layers/4-lsm/wal.rs b/kernel/comps/mlsdisk/src/layers/4-lsm/wal.rs index ba24087bd..499fb4af1 100644 --- a/kernel/comps/mlsdisk/src/layers/4-lsm/wal.rs +++ b/kernel/comps/mlsdisk/src/layers/4-lsm/wal.rs @@ -145,7 +145,7 @@ impl WalAppendTx { wal_tx: &CurrentTx<'_>, log: &Arc>, ) -> Result<()> { - debug_assert!(!record_buf.is_empty() && record_buf.len() % BLOCK_SIZE == 0); + debug_assert!(!record_buf.is_empty() && record_buf.len().is_multiple_of(BLOCK_SIZE)); let res = wal_tx.context(|| { let buf = BufRef::try_from(record_buf).unwrap(); log.append(buf) diff --git a/kernel/comps/mlsdisk/src/layers/5-disk/bio.rs b/kernel/comps/mlsdisk/src/layers/5-disk/bio.rs index c8675f393..4b771f806 100644 --- a/kernel/comps/mlsdisk/src/layers/5-disk/bio.rs +++ b/kernel/comps/mlsdisk/src/layers/5-disk/bio.rs @@ -159,7 +159,7 @@ impl BioReq { /// or accessed by block devices and their users. Each of the extension objects /// must have a different type. To avoid conflicts, it is recommended to use only /// private types for the extension objects. - pub fn ext(&self) -> MutexGuard>> { + pub fn ext(&self) -> MutexGuard<'_, HashMap>> { self.ext.lock() } diff --git a/kernel/comps/mlsdisk/src/layers/5-disk/mlsdisk.rs b/kernel/comps/mlsdisk/src/layers/5-disk/mlsdisk.rs index 88814b856..172572793 100644 --- a/kernel/comps/mlsdisk/src/layers/5-disk/mlsdisk.rs +++ b/kernel/comps/mlsdisk/src/layers/5-disk/mlsdisk.rs @@ -126,7 +126,7 @@ impl aster_block::BlockDevice for MlsDisk { } // Read the last unaligned block. - if end_offset % BLOCK_SIZE != 0 { + if !end_offset.is_multiple_of(BLOCK_SIZE) { let offset = buf.as_slice().len() - BLOCK_SIZE; let buf_mut = BufMut::try_from(&mut buf.as_mut_slice()[offset..]).unwrap(); if self.read(end_lba - 1, buf_mut).is_err() { @@ -434,7 +434,7 @@ impl DiskInner { let mut res = range_query_ctx.into_results(); let record_batches = { - res.sort_by(|(_, v1), (_, v2)| v1.hba.cmp(&v2.hba)); + res.sort_by_key(|(_, v1)| v1.hba); res.chunk_by(|(_, v1), (_, v2)| v2.hba - v1.hba == 1) }; diff --git a/kernel/comps/mlsdisk/src/util/bitmap.rs b/kernel/comps/mlsdisk/src/util/bitmap.rs index 3ec513c53..1fe9ac0c2 100644 --- a/kernel/comps/mlsdisk/src/util/bitmap.rs +++ b/kernel/comps/mlsdisk/src/util/bitmap.rs @@ -32,7 +32,7 @@ impl BitMap { } // Set the unused bits in the last u64 with zero. - if nbits % 64 != 0 { + if !nbits.is_multiple_of(64) { bits[vec_len - 1] .iter_ones() .filter(|index| (*index as usize) >= nbits % 64) diff --git a/kernel/comps/systree/src/node.rs b/kernel/comps/systree/src/node.rs index 0680caa78..e50628f6c 100644 --- a/kernel/comps/systree/src/node.rs +++ b/kernel/comps/systree/src/node.rs @@ -76,7 +76,7 @@ pub trait SysBranchNode: SysNode { fn visit_children_with( &self, min_id: u64, - f: &mut dyn for<'a> FnMut(&'a Arc<(dyn SysObj)>) -> Option<()>, + f: &mut dyn for<'a> FnMut(&'a Arc) -> Option<()>, ); /// Returns a child with a specified name. diff --git a/kernel/comps/time/src/tsc.rs b/kernel/comps/time/src/tsc.rs index be7141c26..d8fb450e0 100644 --- a/kernel/comps/time/src/tsc.rs +++ b/kernel/comps/time/src/tsc.rs @@ -79,7 +79,7 @@ fn init_timer() { let update = move || { let counter = TSC_UPDATE_COUNTER.fetch_add(1, Ordering::Relaxed); - if counter % delay_counts == 0 { + if counter.is_multiple_of(delay_counts) { update_clocksource(); } }; diff --git a/kernel/libs/aster-bigtcp/src/iface/common.rs b/kernel/libs/aster-bigtcp/src/iface/common.rs index e300f624a..8fbbee9a0 100644 --- a/kernel/libs/aster-bigtcp/src/iface/common.rs +++ b/kernel/libs/aster-bigtcp/src/iface/common.rs @@ -171,10 +171,10 @@ impl IfaceCommon { /// Releases the port so that it can be used again (if it is not being reused). fn release_port(&self, port: u16) { let mut used_ports = self.used_ports.lock(); - if let Some(used_times) = used_ports.remove(&port) { - if used_times != 1 { - used_ports.insert(port, used_times - 1); - } + if let Some(used_times) = used_ports.remove(&port) + && used_times != 1 + { + used_ports.insert(port, used_times - 1); } } } diff --git a/kernel/libs/aster-bigtcp/src/iface/poll.rs b/kernel/libs/aster-bigtcp/src/iface/poll.rs index e66327aed..6ac908b10 100644 --- a/kernel/libs/aster-bigtcp/src/iface/poll.rs +++ b/kernel/libs/aster-bigtcp/src/iface/poll.rs @@ -538,14 +538,14 @@ impl PollContext<'_, E> { })); }); - if let Some((ip_repr, ip_payload)) = deferred { - if let Some(reply) = self.parse_and_process_udp( + if let Some((ip_repr, ip_payload)) = deferred + && let Some(reply) = self.parse_and_process_udp( &ip_repr, &ip_payload, &ChecksumCapabilities::ignored(), - ) { - dispatch_phy(&reply, self.iface.context_mut(), tx_token.take().unwrap()); - } + ) + { + dispatch_phy(&reply, self.iface.context_mut(), tx_token.take().unwrap()); } if tx_token.is_none() { diff --git a/kernel/libs/aster-bigtcp/src/iface/poll_iface.rs b/kernel/libs/aster-bigtcp/src/iface/poll_iface.rs index ea4184ef8..ff0761025 100644 --- a/kernel/libs/aster-bigtcp/src/iface/poll_iface.rs +++ b/kernel/libs/aster-bigtcp/src/iface/poll_iface.rs @@ -28,7 +28,7 @@ impl PollableIface { } } - pub(super) fn as_mut(&mut self) -> PollableIfaceMut { + pub(super) fn as_mut(&'_ mut self) -> PollableIfaceMut<'_, E> { PollableIfaceMut { context: self.interface.context(), pending_conns: &mut self.pending_conns, diff --git a/kernel/libs/aster-bigtcp/src/socket/bound/tcp_conn.rs b/kernel/libs/aster-bigtcp/src/socket/bound/tcp_conn.rs index 9b0ee1bb2..8a1788eb5 100644 --- a/kernel/libs/aster-bigtcp/src/socket/bound/tcp_conn.rs +++ b/kernel/libs/aster-bigtcp/src/socket/bound/tcp_conn.rs @@ -244,7 +244,7 @@ impl TcpConnectionInner { } } - pub(super) fn lock(&self) -> SpinLockGuard, BottomHalfDisabled> { + pub(super) fn lock(&self) -> SpinLockGuard<'_, RawTcpSocketExt, BottomHalfDisabled> { self.socket.lock() } } diff --git a/kernel/libs/aster-bigtcp/src/socket/bound/udp.rs b/kernel/libs/aster-bigtcp/src/socket/bound/udp.rs index 47da2122c..657fb4e5f 100644 --- a/kernel/libs/aster-bigtcp/src/socket/bound/udp.rs +++ b/kernel/libs/aster-bigtcp/src/socket/bound/udp.rs @@ -153,10 +153,7 @@ impl UdpSocket { return Err(SendError::TooLarge); } - let buffer = match socket.send(size, meta) { - Ok(data) => data, - Err(err) => return Err(err.into()), - }; + let buffer = socket.send(size, meta)?; let result = f(buffer); self.0 diff --git a/kernel/libs/aster-util/src/safe_ptr.rs b/kernel/libs/aster-util/src/safe_ptr.rs index 889ae3d20..11626ae47 100644 --- a/kernel/libs/aster-util/src/safe_ptr.rs +++ b/kernel/libs/aster-util/src/safe_ptr.rs @@ -247,7 +247,7 @@ impl SafePtr> { // =============== Address-related methods ============== impl SafePtr { pub const fn is_aligned(&self) -> bool { - self.offset % core::mem::align_of::() == 0 + self.offset.is_multiple_of(core::mem::align_of::()) } /// Increase the address in units of bytes occupied by the generic T. diff --git a/kernel/libs/cpio-decoder/src/lib.rs b/kernel/libs/cpio-decoder/src/lib.rs index f958a3db7..628431204 100644 --- a/kernel/libs/cpio-decoder/src/lib.rs +++ b/kernel/libs/cpio-decoder/src/lib.rs @@ -281,7 +281,7 @@ impl FileMetadata { /// The type of the file. #[repr(u32)] -#[derive(Copy, Clone, Debug, Eq, PartialEq, TryFromInt)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, TryFromInt, Default)] pub enum FileType { /// FIFO special file FiFo = 0o010000, @@ -292,6 +292,7 @@ pub enum FileType { /// Block device Block = 0o060000, /// Regular file + #[default] File = 0o100000, /// Symbolic link Link = 0o120000, @@ -299,12 +300,6 @@ pub enum FileType { Socket = 0o140000, } -impl Default for FileType { - fn default() -> Self { - Self::File - } -} - const MAGIC: &[u8] = b"070701"; const TRAILER_NAME: &str = "TRAILER!!!"; diff --git a/kernel/libs/typeflags/src/type_flag.rs b/kernel/libs/typeflags/src/type_flag.rs index 400c660de..89114559c 100644 --- a/kernel/libs/typeflags/src/type_flag.rs +++ b/kernel/libs/typeflags/src/type_flag.rs @@ -96,7 +96,7 @@ impl TypeFlagDef { } /// return the items iter - pub fn items_iter(&self) -> syn::punctuated::Iter { + pub fn items_iter(&self) -> syn::punctuated::Iter<'_, TypeFlagItem> { self.items.iter() } diff --git a/kernel/libs/xarray/src/cursor.rs b/kernel/libs/xarray/src/cursor.rs index 4112b3445..523304a14 100644 --- a/kernel/libs/xarray/src/cursor.rs +++ b/kernel/libs/xarray/src/cursor.rs @@ -26,10 +26,12 @@ use crate::{ /// A cursor never ends up on an interior node. In other words, when methods /// of `Cursor` or `CursorMut` finish, the cursor will either not positioned on any node /// or positioned on some leaf node. +#[derive(Default)] enum CursorState<'a, P> where P: NonNullPtr + Send + Sync, { + #[default] Inactive, AtNode { node: NodeEntryRef<'a, P>, @@ -37,12 +39,6 @@ where }, } -impl Default for CursorState<'_, P> { - fn default() -> Self { - Self::Inactive - } -} - impl<'a, P: NonNullPtr + Send + Sync> CursorState<'a, P> { fn move_to(&mut self, node: NodeEntryRef<'a, P>, index: u64) { let operation_offset = node.entry_offset(index); @@ -271,7 +267,7 @@ impl<'a, P: NonNullPtr + Send + Sync, M> CursorMut<'a, P, M> { } /// Returns an `XLockGuard` that marks the `XArray` is locked. - fn lock_guard(&self) -> XLockGuard { + fn lock_guard(&self) -> XLockGuard<'_> { // Having a `CursorMut` means that the `XArray` is locked. XLockGuard(self.guard) } diff --git a/kernel/libs/xarray/src/lib.rs b/kernel/libs/xarray/src/lib.rs index 9f39c6dad..f62d48d51 100644 --- a/kernel/libs/xarray/src/lib.rs +++ b/kernel/libs/xarray/src/lib.rs @@ -133,7 +133,7 @@ impl XArray { } /// Acquires the lock to perform mutable operations. - pub fn lock(&self) -> LockedXArray { + pub fn lock(&self) -> LockedXArray<'_, P, M> { LockedXArray { xa: self, guard: self.xlock.lock(), @@ -142,7 +142,7 @@ impl XArray { } /// Acquires the lock with local IRQs disabled to perform mutable operations. - pub fn lock_irq_disabled(&self) -> LockedXArray { + pub fn lock_irq_disabled(&self) -> LockedXArray<'_, P, M, LocalIrqDisabled> { LockedXArray { xa: self, guard: self.xlock.disable_irq().lock(), diff --git a/kernel/src/context.rs b/kernel/src/context.rs index 50abe1c8b..3a21d1a2f 100644 --- a/kernel/src/context.rs +++ b/kernel/src/context.rs @@ -33,7 +33,7 @@ pub struct Context<'a> { impl Context<'_> { /// Gets the userspace of the current task. - pub fn user_space(&self) -> CurrentUserSpace { + pub fn user_space(&self) -> CurrentUserSpace<'_> { CurrentUserSpace(self.thread_local.root_vmar().borrow()) } } diff --git a/kernel/src/fs/epoll/entry.rs b/kernel/src/fs/epoll/entry.rs index 15e56b682..088ef87cc 100644 --- a/kernel/src/fs/epoll/entry.rs +++ b/kernel/src/fs/epoll/entry.rs @@ -318,7 +318,7 @@ impl ReadySet { self.pollee.notify(IoEvents::IN); } - pub(super) fn lock_pop(&self) -> ReadySetPopIter { + pub(super) fn lock_pop(&self) -> ReadySetPopIter<'_> { ReadySetPopIter { ready_set: self, _pop_guard: self.pop_guard.lock(), diff --git a/kernel/src/fs/exfat/dentry.rs b/kernel/src/fs/exfat/dentry.rs index 71f9bd4de..0ccef55e1 100644 --- a/kernel/src/fs/exfat/dentry.rs +++ b/kernel/src/fs/exfat/dentry.rs @@ -450,11 +450,11 @@ pub(super) struct ExfatDentryIterator { impl ExfatDentryIterator { pub fn new(page_cache: Vmo, offset: usize, size: Option) -> Result { - if size.is_some() && size.unwrap() % DENTRY_SIZE != 0 { + if size.is_some() && !size.unwrap().is_multiple_of(DENTRY_SIZE) { return_errno_with_message!(Errno::EINVAL, "remaining size unaligned to dentry size") } - if offset % DENTRY_SIZE != 0 { + if !offset.is_multiple_of(DENTRY_SIZE) { return_errno_with_message!(Errno::EINVAL, "dentry offset unaligned to dentry size") } diff --git a/kernel/src/fs/exfat/fs.rs b/kernel/src/fs/exfat/fs.rs index 79ec6207c..1b970b4ae 100644 --- a/kernel/src/fs/exfat/fs.rs +++ b/kernel/src/fs/exfat/fs.rs @@ -343,7 +343,7 @@ impl ExfatFS { self.super_block.cluster_size as usize * self.super_block.num_clusters as usize } - pub(super) fn lock(&self) -> MutexGuard<()> { + pub(super) fn lock(&self) -> MutexGuard<'_, ()> { self.mutex.lock() } diff --git a/kernel/src/fs/exfat/inode.rs b/kernel/src/fs/exfat/inode.rs index 2c29132e5..dad113132 100644 --- a/kernel/src/fs/exfat/inode.rs +++ b/kernel/src/fs/exfat/inode.rs @@ -1172,7 +1172,7 @@ impl DirentVisitor for EmptyVisitor { } } fn is_block_aligned(off: usize) -> bool { - off % PAGE_SIZE == 0 + off.is_multiple_of(PAGE_SIZE) } fn check_corner_cases_for_rename( @@ -1589,7 +1589,7 @@ impl Inode for ExfatInode { } // Skip . and .. - let dir_to_skip = if dir_cnt >= 2 { dir_cnt - 2 } else { 0 }; + let dir_to_skip = dir_cnt.saturating_sub(2); // Skip previous directories. let (off, _) = inner.visit_sub_inodes(0, dir_to_skip, &mut empty_visitor, &fs_guard)?; diff --git a/kernel/src/fs/ext2/fs.rs b/kernel/src/fs/ext2/fs.rs index e1c2ae7b8..6eb995c58 100644 --- a/kernel/src/fs/ext2/fs.rs +++ b/kernel/src/fs/ext2/fs.rs @@ -127,7 +127,7 @@ impl Ext2 { } /// Returns the super block. - pub fn super_block(&self) -> RwMutexReadGuard> { + pub fn super_block(&self) -> RwMutexReadGuard<'_, Dirty> { self.super_block.read() } diff --git a/kernel/src/fs/ext2/inode.rs b/kernel/src/fs/ext2/inode.rs index 2c40c9f51..5c95145a4 100644 --- a/kernel/src/fs/ext2/inode.rs +++ b/kernel/src/fs/ext2/inode.rs @@ -2502,5 +2502,5 @@ pub(super) struct Osd2 { } fn is_block_aligned(offset: usize) -> bool { - offset % BLOCK_SIZE == 0 + offset.is_multiple_of(BLOCK_SIZE) } diff --git a/kernel/src/fs/ext2/super_block.rs b/kernel/src/fs/ext2/super_block.rs index d6859aff2..40fb6213d 100644 --- a/kernel/src/fs/ext2/super_block.rs +++ b/kernel/src/fs/ext2/super_block.rs @@ -382,9 +382,10 @@ pub enum FsState { } #[repr(u16)] -#[derive(Copy, Clone, Debug, Eq, PartialEq, TryFromInt)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, TryFromInt, Default)] pub enum ErrorsBehaviour { /// Continue execution + #[default] Continue = 1, // Remount fs read-only RemountReadonly = 2, @@ -392,12 +393,6 @@ pub enum ErrorsBehaviour { Panic = 3, } -impl Default for ErrorsBehaviour { - fn default() -> Self { - Self::Continue - } -} - #[repr(u32)] #[derive(Copy, Clone, Debug, Eq, PartialEq, TryFromInt)] pub enum OsId { diff --git a/kernel/src/fs/file_table.rs b/kernel/src/fs/file_table.rs index 4b6c05d99..87af2931a 100644 --- a/kernel/src/fs/file_table.rs +++ b/kernel/src/fs/file_table.rs @@ -112,10 +112,10 @@ impl FileTable { ) -> Option> { let entry = FileTableEntry::new(item, flags); let entry = self.table.put_at(fd as usize, entry); - if entry.is_some() { + if let Some(entry) = &entry { let events = FdEvents::Close(fd); self.notify_fd_events(&events); - entry.as_ref().unwrap().notify_fd_events(&events); + entry.notify_fd_events(&events); } entry.map(|e| e.file) } diff --git a/kernel/src/fs/fs_resolver.rs b/kernel/src/fs/fs_resolver.rs index 2fa35fe5c..06c480eef 100644 --- a/kernel/src/fs/fs_resolver.rs +++ b/kernel/src/fs/fs_resolver.rs @@ -509,8 +509,7 @@ impl<'a> TryFrom<&'a str> for FsPath<'a> { pub fn split_path(path: &str) -> (&str, &str) { let file_name = path .split_inclusive('/') - .filter(|&x| x != "/") - .next_back() + .rfind(|&x| x != "/") .unwrap_or("."); let mut split = path.trim_end_matches('/').rsplitn(2, '/'); diff --git a/kernel/src/fs/inode_handle/mod.rs b/kernel/src/fs/inode_handle/mod.rs index 90e535f11..29a507b0d 100644 --- a/kernel/src/fs/inode_handle/mod.rs +++ b/kernel/src/fs/inode_handle/mod.rs @@ -269,10 +269,10 @@ impl InodeHandle_ { } fn unlock_range_lock(&self, lock: &RangeLockItem) { - if let Some(extension) = self.dentry.inode().extension() { - if let Some(range_lock_list) = extension.get::() { - range_lock_list.unlock(lock); - } + if let Some(extension) = self.dentry.inode().extension() + && let Some(range_lock_list) = extension.get::() + { + range_lock_list.unlock(lock); } } @@ -310,10 +310,10 @@ impl InodeHandle_ { } fn unlock_flock(&self, req_owner: &InodeHandle) { - if let Some(extension) = self.dentry.inode().extension() { - if let Some(flock_list) = extension.get::() { - flock_list.unlock(req_owner); - } + if let Some(extension) = self.dentry.inode().extension() + && let Some(flock_list) = extension.get::() + { + flock_list.unlock(req_owner); } } } diff --git a/kernel/src/fs/path/dentry.rs b/kernel/src/fs/path/dentry.rs index aa5a80e98..58687f9d9 100644 --- a/kernel/src/fs/path/dentry.rs +++ b/kernel/src/fs/path/dentry.rs @@ -473,10 +473,10 @@ impl DentryChildren { /// Checks whether the dentry is a mount point. Returns an error if it is. pub fn check_mountpoint(&self, name: &str) -> Result<()> { - if let Some(Some(dentry)) = self.dentries.get(name) { - if dentry.is_mountpoint() { - return_errno_with_message!(Errno::EBUSY, "dentry is mountpint"); - } + if let Some(Some(dentry)) = self.dentries.get(name) + && dentry.is_mountpoint() + { + return_errno_with_message!(Errno::EBUSY, "dentry is mountpint"); } Ok(()) } diff --git a/kernel/src/fs/pipe.rs b/kernel/src/fs/pipe.rs index ef7359407..dc087ad8f 100644 --- a/kernel/src/fs/pipe.rs +++ b/kernel/src/fs/pipe.rs @@ -415,11 +415,11 @@ mod test { ); } - fn reader_from(buf: &[u8]) -> VmReader { + fn reader_from(buf: &'_ [u8]) -> VmReader<'_> { VmReader::from(buf).to_fallible() } - fn writer_from(buf: &mut [u8]) -> VmWriter { + fn writer_from(buf: &'_ mut [u8]) -> VmWriter<'_> { VmWriter::from(buf).to_fallible() } } diff --git a/kernel/src/fs/procfs/template/builder.rs b/kernel/src/fs/procfs/template/builder.rs index 9bda8241d..c71cfaee6 100644 --- a/kernel/src/fs/procfs/template/builder.rs +++ b/kernel/src/fs/procfs/template/builder.rs @@ -185,10 +185,10 @@ impl OptionalBuilder { // The volatile property is inherited from parent. let is_volatile = { let mut is_volatile = self.is_volatile; - if let Some(parent) = self.parent.as_ref() { - if !parent.upgrade().unwrap().is_dentry_cacheable() { - is_volatile = true; - } + if let Some(parent) = self.parent.as_ref() + && !parent.upgrade().unwrap().is_dentry_cacheable() + { + is_volatile = true; } is_volatile }; diff --git a/kernel/src/fs/rootfs.rs b/kernel/src/fs/rootfs.rs index 3c0fa41d8..995d25984 100644 --- a/kernel/src/fs/rootfs.rs +++ b/kernel/src/fs/rootfs.rs @@ -58,11 +58,7 @@ pub fn init(initramfs_buf: &[u8]) -> Result<()> { let mut decoder = CpioDecoder::new(reader); let fs = FsResolver::new(); - loop { - let Some(entry_result) = decoder.next() else { - break; - }; - + while let Some(entry_result) = decoder.next() { let mut entry = entry_result?; // Make sure the name is a relative path, and is not end with "/". diff --git a/kernel/src/fs/utils/inode.rs b/kernel/src/fs/utils/inode.rs index 2a2ede448..5e71a5a52 100644 --- a/kernel/src/fs/utils/inode.rs +++ b/kernel/src/fs/utils/inode.rs @@ -569,7 +569,7 @@ impl dyn Inode { (self as &dyn Any).downcast_ref::() } - pub fn writer(&self, from_offset: usize) -> InodeWriter { + pub fn writer(&self, from_offset: usize) -> InodeWriter<'_> { InodeWriter { inner: self, offset: from_offset, diff --git a/kernel/src/fs/utils/page_cache.rs b/kernel/src/fs/utils/page_cache.rs index 0cf46e3d0..5c46a523c 100644 --- a/kernel/src/fs/utils/page_cache.rs +++ b/kernel/src/fs/utils/page_cache.rs @@ -161,7 +161,7 @@ impl PageCache { // first zero the gap between the new size and the // next page boundary (or the old size), if such a gap exists. let old_size = self.pages.size(); - if old_size > new_size && new_size % PAGE_SIZE != 0 { + if old_size > new_size && !new_size.is_multiple_of(PAGE_SIZE) { let gap_size = old_size.min(new_size.align_up(PAGE_SIZE)) - new_size; if gap_size > 0 { self.fill_zeros(new_size..new_size + gap_size)?; @@ -602,18 +602,19 @@ impl PageCacheManager { let backend = self.backend()?; let backend_npages = backend.npages()?; for idx in page_idx_range.start..page_idx_range.end { - if let Some(page) = pages.peek(&idx) { - if page.load_state() == PageState::Dirty && idx < backend_npages { - let (reply_handle, reply_consumer) = ReplyQueue::new_pair()?; - backend.write_page_async(AsyncWriteRequest { - handle: PageHandle { - idx, - frame: page.clone(), - }, - reply_handle: Some(reply_handle), - })?; - consumers.push(reply_consumer); - } + if let Some(page) = pages.peek(&idx) + && page.load_state() == PageState::Dirty + && idx < backend_npages + { + let (reply_handle, reply_consumer) = ReplyQueue::new_pair()?; + backend.write_page_async(AsyncWriteRequest { + handle: PageHandle { + idx, + frame: page.clone(), + }, + reply_handle: Some(reply_handle), + })?; + consumers.push(reply_consumer); } } @@ -753,14 +754,14 @@ impl Pager for PageCacheManager { fn decommit_page(&self, idx: usize) -> Result<()> { let page_result = self.inner.lock().pages.pop(&idx); - if let Some(page) = page_result { - if let PageState::Dirty = page.load_state() { - let Some(backend) = self.backend.upgrade() else { - return Ok(()); - }; - if idx < backend.npages()? { - backend.write_page(PageHandle { idx, frame: page })?; - } + if let Some(page) = page_result + && let PageState::Dirty = page.load_state() + { + let Some(backend) = self.backend.upgrade() else { + return Ok(()); + }; + if idx < backend.npages()? { + backend.write_page(PageHandle { idx, frame: page })?; } } diff --git a/kernel/src/fs/utils/page_cache_baseline.rs b/kernel/src/fs/utils/page_cache_baseline.rs index 09b3a84ad..c33a660ab 100644 --- a/kernel/src/fs/utils/page_cache_baseline.rs +++ b/kernel/src/fs/utils/page_cache_baseline.rs @@ -85,7 +85,7 @@ impl PageCache { // first zero the gap between the new size and the // next page boundary (or the old size), if such a gap exists. let old_size = self.pages.size(); - if old_size > new_size && new_size % PAGE_SIZE != 0 { + if old_size > new_size && !new_size.is_multiple_of(PAGE_SIZE) { let gap_size = old_size.min(new_size.align_up(PAGE_SIZE)) - new_size; if gap_size > 0 { self.fill_zeros(new_size..new_size + gap_size)?; @@ -370,11 +370,12 @@ impl PageCacheManager { let backend = self.backend(); let backend_npages = backend.npages(); for idx in page_idx_range.start..page_idx_range.end { - if let Some(page) = pages.peek(&idx) { - if page.load_state() == PageState::Dirty && idx < backend_npages { - let waiter = backend.write_page_async(idx, page)?; - bio_waiter.concat(waiter); - } + if let Some(page) = pages.peek(&idx) + && page.load_state() == PageState::Dirty + && idx < backend_npages + { + let waiter = backend.write_page_async(idx, page)?; + bio_waiter.concat(waiter); } } @@ -466,14 +467,14 @@ impl Pager for PageCacheManager { fn decommit_page(&self, idx: usize) -> Result<()> { let page_result = self.pages.lock().pop(&idx); - if let Some(page) = page_result { - if let PageState::Dirty = page.load_state() { - let Some(backend) = self.backend.upgrade() else { - return Ok(()); - }; - if idx < backend.npages() { - backend.write_page(idx, &page)?; - } + if let Some(page) = page_result + && let PageState::Dirty = page.load_state() + { + let Some(backend) = self.backend.upgrade() else { + return Ok(()); + }; + if idx < backend.npages() { + backend.write_page(idx, &page)?; } } diff --git a/kernel/src/ipc/semaphore/system_v/sem.rs b/kernel/src/ipc/semaphore/system_v/sem.rs index 686c83fc5..5f4b748a0 100644 --- a/kernel/src/ipc/semaphore/system_v/sem.rs +++ b/kernel/src/ipc/semaphore/system_v/sem.rs @@ -66,7 +66,7 @@ pub struct PendingOp { } impl PendingOp { - pub fn sops_iter(&self) -> Iter { + pub fn sops_iter(&self) -> Iter<'_, SemBuf> { self.sops.iter() } diff --git a/kernel/src/ipc/semaphore/system_v/sem_set.rs b/kernel/src/ipc/semaphore/system_v/sem_set.rs index f03fe857b..42923238b 100644 --- a/kernel/src/ipc/semaphore/system_v/sem_set.rs +++ b/kernel/src/ipc/semaphore/system_v/sem_set.rs @@ -169,7 +169,7 @@ impl SemaphoreSet { ); } - pub(super) fn inner(&self) -> SpinLockGuard { + pub(super) fn inner(&self) -> SpinLockGuard<'_, SemSetInner, PreemptDisabled> { self.inner.lock() } diff --git a/kernel/src/net/socket/ip/stream/mod.rs b/kernel/src/net/socket/ip/stream/mod.rs index eaa6cbe04..4045e9f6c 100644 --- a/kernel/src/net/socket/ip/stream/mod.rs +++ b/kernel/src/net/socket/ip/stream/mod.rs @@ -137,7 +137,7 @@ impl StreamSocket { /// Ensures that the socket state is up to date and obtains a read lock on it. /// /// For a description of what "up-to-date" means, see [`Self::write_updated_state`]. - fn read_updated_state(&self) -> RwLockReadGuard, PreemptDisabled> { + fn read_updated_state(&self) -> RwLockReadGuard<'_, Takeable, PreemptDisabled> { loop { let state = self.state.read(); match state.as_ref() { @@ -159,7 +159,7 @@ impl StreamSocket { /// /// This method performs the delayed state transition to ensure that the state is up to date /// and returns the guard of the write-locked state. - fn write_updated_state(&self) -> RwLockWriteGuard, PreemptDisabled> { + fn write_updated_state(&self) -> RwLockWriteGuard<'_, Takeable, PreemptDisabled> { let mut state = self.state.write(); match state.as_ref() { diff --git a/kernel/src/net/socket/vsock/stream/socket.rs b/kernel/src/net/socket/vsock/stream/socket.rs index 4a379d68e..78ff999a4 100644 --- a/kernel/src/net/socket/vsock/stream/socket.rs +++ b/kernel/src/net/socket/vsock/stream/socket.rs @@ -105,10 +105,10 @@ impl VsockStreamSocket { let peer_addr = self.peer_addr()?; // If buffer is now empty and the peer requested shutdown, finish shutting down the // connection. - if connected.should_close() { - if let Err(e) = self.shutdown(SockShutdownCmd::SHUT_RDWR) { - debug!("The error is {:?}", e); - } + if connected.should_close() + && let Err(e) = self.shutdown(SockShutdownCmd::SHUT_RDWR) + { + debug!("The error is {:?}", e); } Ok((read_size, peer_addr)) } @@ -183,13 +183,12 @@ impl Socket for VsockStreamSocket { if !connecting .poll(IoEvents::IN, Some(poller.as_handle_mut())) .contains(IoEvents::IN) + && let Err(e) = poller.wait() { - if let Err(e) = poller.wait() { - vsockspace - .remove_connecting_socket(&connecting.local_addr()) - .unwrap(); - return Err(e); - } + vsockspace + .remove_connecting_socket(&connecting.local_addr()) + .unwrap(); + return Err(e); } vsockspace diff --git a/kernel/src/process/credentials/credentials_.rs b/kernel/src/process/credentials/credentials_.rs index 1109cf669..7d68ee296 100644 --- a/kernel/src/process/credentials/credentials_.rs +++ b/kernel/src/process/credentials/credentials_.rs @@ -416,11 +416,11 @@ impl Credentials_ { // ******* Supplementary groups methods ******* - pub(super) fn groups(&self) -> RwLockReadGuard, PreemptDisabled> { + pub(super) fn groups(&self) -> RwLockReadGuard<'_, BTreeSet, PreemptDisabled> { self.supplementary_gids.read() } - pub(super) fn groups_mut(&self) -> RwLockWriteGuard, PreemptDisabled> { + pub(super) fn groups_mut(&self) -> RwLockWriteGuard<'_, BTreeSet, PreemptDisabled> { self.supplementary_gids.write() } diff --git a/kernel/src/process/credentials/static_cap.rs b/kernel/src/process/credentials/static_cap.rs index 461871586..0b7c99560 100644 --- a/kernel/src/process/credentials/static_cap.rs +++ b/kernel/src/process/credentials/static_cap.rs @@ -253,7 +253,7 @@ impl Credentials { /// /// This method requires the `Read` right. #[require(R > Read)] - pub fn groups(&self) -> RwLockReadGuard, PreemptDisabled> { + pub fn groups(&self) -> RwLockReadGuard<'_, BTreeSet, PreemptDisabled> { self.0.groups() } @@ -261,7 +261,7 @@ impl Credentials { /// /// This method requires the `Write` right. #[require(R > Write)] - pub fn groups_mut(&self) -> RwLockWriteGuard, PreemptDisabled> { + pub fn groups_mut(&self) -> RwLockWriteGuard<'_, BTreeSet, PreemptDisabled> { self.0.groups_mut() } diff --git a/kernel/src/process/posix_thread/thread_local.rs b/kernel/src/process/posix_thread/thread_local.rs index 5d2ea69ea..db98aaf5d 100644 --- a/kernel/src/process/posix_thread/thread_local.rs +++ b/kernel/src/process/posix_thread/thread_local.rs @@ -68,11 +68,11 @@ impl ThreadLocal { &self.robust_list } - pub fn borrow_file_table(&self) -> FileTableRef { + pub fn borrow_file_table(&self) -> FileTableRef<'_> { FileTableRef(self.file_table.borrow()) } - pub fn borrow_file_table_mut(&self) -> FileTableRefMut { + pub fn borrow_file_table_mut(&self) -> FileTableRefMut<'_> { FileTableRefMut(self.file_table.borrow_mut()) } diff --git a/kernel/src/process/process/mod.rs b/kernel/src/process/process/mod.rs index 552fc764b..d4ec2cca9 100644 --- a/kernel/src/process/process/mod.rs +++ b/kernel/src/process/process/mod.rs @@ -291,11 +291,7 @@ impl Process { /// Get a snapshot of the current children attached to this process. pub fn current_children(&self) -> Vec> { - self.children - .lock() - .iter() - .map(|(_pid, proc)| proc.clone()) - .collect() + self.children.lock().values().cloned().collect() } pub fn children_wait_queue(&self) -> &WaitQueue { @@ -593,7 +589,7 @@ impl Process { &self.process_vm } - pub fn lock_root_vmar(&self) -> ProcessVmarGuard { + pub fn lock_root_vmar(&self) -> ProcessVmarGuard<'_> { self.process_vm.lock_root_vmar() } @@ -601,7 +597,7 @@ impl Process { self.process_vm.heap() } - pub fn init_stack_reader(&self) -> InitStackReader { + pub fn init_stack_reader(&self) -> InitStackReader<'_> { self.process_vm.init_stack_reader() } diff --git a/kernel/src/process/process/process_group.rs b/kernel/src/process/process/process_group.rs index 429fcd796..438a86e7c 100644 --- a/kernel/src/process/process/process_group.rs +++ b/kernel/src/process/process/process_group.rs @@ -53,7 +53,7 @@ impl ProcessGroup { } /// Acquires a lock on the process group. - pub fn lock(&self) -> ProcessGroupGuard { + pub fn lock(&self) -> ProcessGroupGuard<'_> { ProcessGroupGuard { inner: self.inner.lock(), } @@ -82,7 +82,7 @@ pub struct ProcessGroupGuard<'a> { impl ProcessGroupGuard<'_> { /// Returns an iterator over the processes in the process group. - pub fn iter(&self) -> ProcessGroupIter { + pub fn iter(&self) -> ProcessGroupIter<'_> { ProcessGroupIter { inner: self.inner.processes.values(), } diff --git a/kernel/src/process/process/session.rs b/kernel/src/process/process/session.rs index e6f6ba451..92d91b4e7 100644 --- a/kernel/src/process/process/session.rs +++ b/kernel/src/process/process/session.rs @@ -70,7 +70,7 @@ impl Session { } /// Acquires a lock on the session. - pub fn lock(&self) -> SessionGuard { + pub fn lock(&self) -> SessionGuard<'_> { SessionGuard { inner: self.inner.lock(), } diff --git a/kernel/src/process/process_table.rs b/kernel/src/process/process_table.rs index 6bf19efbe..4be3f540d 100644 --- a/kernel/src/process/process_table.rs +++ b/kernel/src/process/process_table.rs @@ -56,7 +56,7 @@ impl ProcessTable { } /// Returns an iterator over the processes in the table. - pub fn iter(&self) -> ProcessTableIter { + pub fn iter(&self) -> ProcessTableIter<'_> { ProcessTableIter { inner: self.inner.values(), } diff --git a/kernel/src/process/process_vm/init_stack/mod.rs b/kernel/src/process/process_vm/init_stack/mod.rs index ea5a1936c..8c1568c54 100644 --- a/kernel/src/process/process_vm/init_stack/mod.rs +++ b/kernel/src/process/process_vm/init_stack/mod.rs @@ -174,7 +174,7 @@ impl InitStack { let vmar_map_options = { let perms = VmPerms::READ | VmPerms::WRITE; let map_addr = self.initial_top - self.max_size; - debug_assert!(map_addr % PAGE_SIZE == 0); + debug_assert!(map_addr.is_multiple_of(PAGE_SIZE)); root_vmar .new_map(self.max_size, perms)? .offset(map_addr) @@ -291,7 +291,7 @@ impl InitStackWriter { let argv_pointers_size = (argv_pointers.len() + 1) * mem::size_of::(); let argc_size = mem::size_of::(); let to_write_size = auxvec_size + envp_pointers_size + argv_pointers_size + argc_size; - if (self.pos() - to_write_size) % 16 != 0 { + if !(self.pos() - to_write_size).is_multiple_of(16) { self.write_u64(0)?; } Ok(()) diff --git a/kernel/src/process/process_vm/mod.rs b/kernel/src/process/process_vm/mod.rs index d69d97f9c..cbe853362 100644 --- a/kernel/src/process/process_vm/mod.rs +++ b/kernel/src/process/process_vm/mod.rs @@ -142,7 +142,7 @@ impl ProcessVm { } /// Locks the root VMAR and gets a guard to it. - pub fn lock_root_vmar(&self) -> ProcessVmarGuard { + pub fn lock_root_vmar(&self) -> ProcessVmarGuard<'_> { ProcessVmarGuard { inner: self.root_vmar.lock(), } @@ -150,7 +150,7 @@ impl ProcessVm { /// Returns a reader for reading contents from /// the `InitStack`. - pub fn init_stack_reader(&self) -> InitStackReader { + pub fn init_stack_reader(&self) -> InitStackReader<'_> { self.init_stack.reader(self.lock_root_vmar()) } diff --git a/kernel/src/process/status.rs b/kernel/src/process/status.rs index e5a8ed70d..3ce18e2af 100644 --- a/kernel/src/process/status.rs +++ b/kernel/src/process/status.rs @@ -147,16 +147,16 @@ impl StopStatus { pub(super) fn wait(&self, options: WaitOptions) -> Option { let mut wait_status = self.wait_status.lock(); - if options.contains(WaitOptions::WSTOPPED) { - if let Some(StopWaitStatus::Stopped(_)) = wait_status.as_ref() { - return wait_status.take(); - } + if options.contains(WaitOptions::WSTOPPED) + && let Some(StopWaitStatus::Stopped(_)) = wait_status.as_ref() + { + return wait_status.take(); } - if options.contains(WaitOptions::WCONTINUED) { - if let Some(StopWaitStatus::Continue) = wait_status.as_ref() { - return wait_status.take(); - } + if options.contains(WaitOptions::WCONTINUED) + && let Some(StopWaitStatus::Continue) = wait_status.as_ref() + { + return wait_status.take(); } None diff --git a/kernel/src/syscall/eventfd.rs b/kernel/src/syscall/eventfd.rs index bd7d09a9e..5bb49e1b1 100644 --- a/kernel/src/syscall/eventfd.rs +++ b/kernel/src/syscall/eventfd.rs @@ -53,7 +53,8 @@ pub fn sys_eventfd2(init_val: u64, flags: u32, ctx: &Context) -> Result FileDesc { let event_file = EventFile::new(init_val, flags); - let fd = { + + { let file_table = ctx.thread_local.borrow_file_table(); let mut file_table_locked = file_table.unwrap().write(); let fd_flags = if flags.contains(Flags::EFD_CLOEXEC) { @@ -62,8 +63,7 @@ fn do_sys_eventfd2(init_val: u64, flags: Flags, ctx: &Context) -> FileDesc { FdFlags::empty() }; file_table_locked.insert(Arc::new(event_file), fd_flags) - }; - fd + } } bitflags! { diff --git a/kernel/src/syscall/madvise.rs b/kernel/src/syscall/madvise.rs index 20ae9cf51..826ce3e1f 100644 --- a/kernel/src/syscall/madvise.rs +++ b/kernel/src/syscall/madvise.rs @@ -27,7 +27,7 @@ pub fn sys_madvise( start, len, behavior ); - if start % PAGE_SIZE != 0 { + if !start.is_multiple_of(PAGE_SIZE) { return_errno_with_message!(Errno::EINVAL, "the start address should be page aligned"); } if len > isize::MAX as usize { diff --git a/kernel/src/syscall/mmap.rs b/kernel/src/syscall/mmap.rs index be56a5b55..5f5b4a9e3 100644 --- a/kernel/src/syscall/mmap.rs +++ b/kernel/src/syscall/mmap.rs @@ -67,7 +67,7 @@ fn do_sys_mmap( let len = len.align_up(PAGE_SIZE); - if offset % PAGE_SIZE != 0 { + if !offset.is_multiple_of(PAGE_SIZE) { return_errno_with_message!(Errno::EINVAL, "mmap only support page-aligned offset"); } offset.checked_add(len).ok_or(Error::with_message( diff --git a/kernel/src/syscall/mprotect.rs b/kernel/src/syscall/mprotect.rs index 144c18b7f..6dace4a94 100644 --- a/kernel/src/syscall/mprotect.rs +++ b/kernel/src/syscall/mprotect.rs @@ -17,7 +17,7 @@ pub fn sys_mprotect(addr: Vaddr, len: usize, perms: u64, ctx: &Context) -> Resul // According to linux behavior, // , // the addr is checked even if len is 0. - if addr % PAGE_SIZE != 0 { + if !addr.is_multiple_of(PAGE_SIZE) { return_errno_with_message!(Errno::EINVAL, "the start address should be page aligned"); } if len == 0 { diff --git a/kernel/src/syscall/mremap.rs b/kernel/src/syscall/mremap.rs index d2214577b..240e2fc1f 100644 --- a/kernel/src/syscall/mremap.rs +++ b/kernel/src/syscall/mremap.rs @@ -31,7 +31,7 @@ fn do_sys_mremap( old_addr, old_size, new_size, flags, new_addr, ); - if old_addr % PAGE_SIZE != 0 { + if !old_addr.is_multiple_of(PAGE_SIZE) { return_errno_with_message!(Errno::EINVAL, "mremap: `old_addr` must be page-aligned"); } if new_size == 0 { diff --git a/kernel/src/syscall/msync.rs b/kernel/src/syscall/msync.rs index bd144011f..2c811cbb7 100644 --- a/kernel/src/syscall/msync.rs +++ b/kernel/src/syscall/msync.rs @@ -34,7 +34,9 @@ pub fn sys_msync(start: Vaddr, size: usize, flag: i32, ctx: &Context) -> Result< debug!("msync: start = {start:#x}, size = {size}, flags = {flags:?}"); - if start % PAGE_SIZE != 0 || flags.contains(MsyncFlags::MS_ASYNC | MsyncFlags::MS_SYNC) { + if !start.is_multiple_of(PAGE_SIZE) + || flags.contains(MsyncFlags::MS_ASYNC | MsyncFlags::MS_SYNC) + { return_errno!(Errno::EINVAL); } diff --git a/kernel/src/syscall/munmap.rs b/kernel/src/syscall/munmap.rs index 7b96c3b10..724a3a4e6 100644 --- a/kernel/src/syscall/munmap.rs +++ b/kernel/src/syscall/munmap.rs @@ -8,7 +8,7 @@ use crate::prelude::*; pub fn sys_munmap(addr: Vaddr, len: usize, ctx: &Context) -> Result { debug!("addr = 0x{:x}, len = {}", addr, len); - if addr % PAGE_SIZE != 0 { + if !addr.is_multiple_of(PAGE_SIZE) { return_errno_with_message!(Errno::EINVAL, "munmap addr must be page-aligned"); } if len == 0 { diff --git a/kernel/src/syscall/prctl.rs b/kernel/src/syscall/prctl.rs index 7c85cb6ba..bcbe69bcf 100644 --- a/kernel/src/syscall/prctl.rs +++ b/kernel/src/syscall/prctl.rs @@ -62,13 +62,13 @@ pub fn sys_prctl( } PrctlCmd::PR_GET_NAME(write_to_addr) => { let thread_name = ctx.posix_thread.thread_name().lock(); - if let Some(thread_name) = &*thread_name { - if let Some(thread_name) = thread_name.name()? { - ctx.user_space().write_bytes( - write_to_addr, - &mut VmReader::from(thread_name.to_bytes_with_nul()), - )?; - } + if let Some(thread_name) = &*thread_name + && let Some(thread_name) = thread_name.name()? + { + ctx.user_space().write_bytes( + write_to_addr, + &mut VmReader::from(thread_name.to_bytes_with_nul()), + )?; } } PrctlCmd::PR_SET_NAME(read_addr) => { diff --git a/kernel/src/syscall/setxattr.rs b/kernel/src/syscall/setxattr.rs index 8a1088ba1..a9605e8ef 100644 --- a/kernel/src/syscall/setxattr.rs +++ b/kernel/src/syscall/setxattr.rs @@ -165,7 +165,7 @@ pub(super) fn read_xattr_name_cstr_from_user( }) } -pub(super) fn parse_xattr_name(name_str: &str) -> Result { +pub(super) fn parse_xattr_name(name_str: &'_ str) -> Result> { if name_str.is_empty() || name_str.len() > XATTR_NAME_MAX_LEN { return_errno_with_message!(Errno::ERANGE, "xattr name empty or too long"); } diff --git a/kernel/src/util/ring_buffer.rs b/kernel/src/util/ring_buffer.rs index 92c38b13a..47903ae39 100644 --- a/kernel/src/util/ring_buffer.rs +++ b/kernel/src/util/ring_buffer.rs @@ -565,11 +565,11 @@ mod test { assert!(prod.is_empty()); } - fn reader_from(buf: &[u8]) -> VmReader { + fn reader_from(buf: &[u8]) -> VmReader<'_> { VmReader::from(buf).to_fallible() } - fn writer_from(buf: &mut [u8]) -> VmWriter { + fn writer_from(buf: &mut [u8]) -> VmWriter<'_> { VmWriter::from(buf).to_fallible() } } diff --git a/kernel/src/vm/mod.rs b/kernel/src/vm/mod.rs index fee6c9626..d2d8e2600 100644 --- a/kernel/src/vm/mod.rs +++ b/kernel/src/vm/mod.rs @@ -55,13 +55,12 @@ pub fn mem_total() -> usize { use ostd::boot::{boot_info, memory_region::MemoryRegionType}; let regions = &boot_info().memory_regions; - let total = regions + + regions .iter() .filter(|region| region.typ() == MemoryRegionType::Usable) .map(|region| region.len()) - .sum::(); - - total + .sum::() } static PROMOTED_PAGE_SIZE: usize = page_size::(2); diff --git a/kernel/src/vm/vmar/dyn_cap.rs b/kernel/src/vm/vmar/dyn_cap.rs index 6bc619e77..b595f3178 100644 --- a/kernel/src/vm/vmar/dyn_cap.rs +++ b/kernel/src/vm/vmar/dyn_cap.rs @@ -57,7 +57,11 @@ impl Vmar { /// Memory permissions may be changed through the `protect` method, /// which ensures that any updated memory permissions do not go beyond /// the access rights of the underlying VMOs. - pub fn new_map(&self, size: usize, perms: VmPerms) -> Result> { + pub fn new_map( + &self, + size: usize, + perms: VmPerms, + ) -> Result> { Ok(VmarMapOptions::new(self, size, perms)) } diff --git a/kernel/src/vm/vmar/interval_set.rs b/kernel/src/vm/vmar/interval_set.rs index 389237f71..7f6503fca 100644 --- a/kernel/src/vm/vmar/interval_set.rs +++ b/kernel/src/vm/vmar/interval_set.rs @@ -76,10 +76,10 @@ where if v.range().end > *point { return Some(v); } - } else if let Some((_, v)) = cursor.peek_next() { - if v.range().start <= *point { - return Some(v); - } + } else if let Some((_, v)) = cursor.peek_next() + && v.range().start <= *point + { + return Some(v); } None } @@ -131,10 +131,10 @@ where if v.range().end > *point { return Some(cursor.remove_prev().unwrap().1); } - } else if let Some((_, v)) = cursor.peek_next() { - if v.range().start <= *point { - return Some(cursor.remove_next().unwrap().1); - } + } else if let Some((_, v)) = cursor.peek_next() + && v.range().start <= *point + { + return Some(cursor.remove_next().unwrap().1); } None } @@ -184,10 +184,10 @@ where // There's one previous element that may intersect with the range. if !self.peeked_prev { self.peeked_prev = true; - if let Some((_, v)) = self.cursor.peek_prev() { - if v.range().end > self.range.start { - return Some(v); - } + if let Some((_, v)) = self.cursor.peek_prev() + && v.range().end > self.range.start + { + return Some(v); } } @@ -226,10 +226,10 @@ where // There's one previous element that may intersect with the range. if !self.drained_prev { self.drained_prev = true; - if let Some((_, v)) = self.cursor.peek_prev() { - if v.range().end > self.range.start { - return Some(self.cursor.remove_prev().unwrap().1); - } + if let Some((_, v)) = self.cursor.peek_prev() + && v.range().end > self.range.start + { + return Some(self.cursor.remove_prev().unwrap().1); } } diff --git a/kernel/src/vm/vmar/mod.rs b/kernel/src/vm/vmar/mod.rs index fe440a4de..8a271b228 100644 --- a/kernel/src/vm/vmar/mod.rs +++ b/kernel/src/vm/vmar/mod.rs @@ -83,7 +83,10 @@ impl VmMappingPolicy for VmMappingPolicyGreedyHugeMapping { Ok( // Check if the address is aligned to a level 2 page. If it is not aligned, it cannot be // mapped at a level larger than 1. - if (req.page_aligned_addr % page_size::(2)) == 0 { + if req + .page_aligned_addr + .is_multiple_of(page_size::(2)) + { 2 } else { 1 @@ -437,10 +440,10 @@ impl VmarInner { .map_or(ROOT_VMAR_LOWEST_ADDR, |vm_mapping| vm_mapping.range().end); // FIXME: The up-align may overflow. let last_occupied_aligned = highest_occupied.align_up(align); - if let Some(last) = last_occupied_aligned.checked_add(size) { - if last <= ROOT_VMAR_CAP_ADDR { - return Ok(last_occupied_aligned..last); - } + if let Some(last) = last_occupied_aligned.checked_add(size) + && last <= ROOT_VMAR_CAP_ADDR + { + return Ok(last_occupied_aligned..last); } // Slow path that we need to search for a free region. @@ -577,8 +580,8 @@ impl Vmar_ { } fn protect(&self, perms: VmPerms, range: Range) -> Result<()> { - assert!(range.start % PAGE_SIZE == 0); - assert!(range.end % PAGE_SIZE == 0); + assert!(range.start.is_multiple_of(PAGE_SIZE)); + assert!(range.end.is_multiple_of(PAGE_SIZE)); self.do_protect_inner(perms, range)?; Ok(()) } @@ -1207,17 +1210,17 @@ where /// Checks whether all options are valid. fn check_options(&self) -> Result<()> { // Check align. - debug_assert!(self.align % PAGE_SIZE == 0); + debug_assert!(self.align.is_multiple_of(PAGE_SIZE)); debug_assert!(self.align.is_power_of_two()); - if self.align % PAGE_SIZE != 0 || !self.align.is_power_of_two() { + if !self.align.is_multiple_of(PAGE_SIZE) || !self.align.is_power_of_two() { return_errno_with_message!(Errno::EINVAL, "invalid align"); } - debug_assert!(self.size % self.align == 0); - if self.size % self.align != 0 { + debug_assert!(self.size.is_multiple_of(self.align)); + if !self.size.is_multiple_of(self.align) { return_errno_with_message!(Errno::EINVAL, "invalid mapping size"); } - debug_assert!(self.vmo_offset % self.align == 0); - if self.vmo_offset % self.align != 0 { + debug_assert!(self.vmo_offset.is_multiple_of(self.align)); + if !self.vmo_offset.is_multiple_of(self.align) { return_errno_with_message!(Errno::EINVAL, "invalid vmo offset"); } if let Some(offset) = self.offset { diff --git a/kernel/src/vm/vmar/static_cap.rs b/kernel/src/vm/vmar/static_cap.rs index 83ad07fd0..90a6523f9 100644 --- a/kernel/src/vm/vmar/static_cap.rs +++ b/kernel/src/vm/vmar/static_cap.rs @@ -67,7 +67,7 @@ impl Vmar> { &self, size: usize, perms: VmPerms, - ) -> Result, Rights>> { + ) -> Result, Rights>> { Ok(VmarMapOptions::new(self, size, perms)) } diff --git a/kernel/src/vm/vmar/vm_mapping.rs b/kernel/src/vm/vmar/vm_mapping.rs index 177f0ca03..ee010d677 100644 --- a/kernel/src/vm/vmar/vm_mapping.rs +++ b/kernel/src/vm/vmar/vm_mapping.rs @@ -444,7 +444,7 @@ impl VmMapping { /// must not be either the start or the end of the mapping. pub fn split(self, at: Vaddr) -> Result<(Self, Self)> { debug_assert!(self.map_to_addr < at && at < self.map_end()); - debug_assert!(at % PAGE_SIZE == 0); + debug_assert!(at.is_multiple_of(PAGE_SIZE)); let (mut l_vmo, mut r_vmo) = (None, None); @@ -619,7 +619,7 @@ impl MappedVmo { page_offset: usize, ) -> core::result::Result { debug_assert!(page_offset < self.range.len()); - debug_assert!(page_offset % PAGE_SIZE == 0); + debug_assert!(page_offset.is_multiple_of(PAGE_SIZE)); self.vmo.try_commit_page(self.range.start + page_offset) } diff --git a/osdk/Cargo.toml b/osdk/Cargo.toml index 93bdb6744..d3369f6a4 100644 --- a/osdk/Cargo.toml +++ b/osdk/Cargo.toml @@ -8,11 +8,8 @@ readme = "README.md" repository = "https://github.com/ldos-project/asterinas" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - [lints.clippy] collapsible_match = "allow" -collapsible_if = "allow" -unnecessary_unwrap = "allow" [dependencies] linux-bzimage-builder = { version = "0.15.2", path = "../ostd/libs/linux-bzimage/builder" } diff --git a/osdk/deps/frame-allocator/src/chunk.rs b/osdk/deps/frame-allocator/src/chunk.rs index 589205800..3effae5ed 100644 --- a/osdk/deps/frame-allocator/src/chunk.rs +++ b/osdk/deps/frame-allocator/src/chunk.rs @@ -46,8 +46,8 @@ pub(crate) fn split_to_chunks( addr: Paddr, size: usize, ) -> impl Iterator { - assert!(addr % PAGE_SIZE == 0); - assert!(size % PAGE_SIZE == 0); + assert!(addr.is_multiple_of(PAGE_SIZE)); + assert!(size.is_multiple_of(PAGE_SIZE)); struct SplitChunks { addr: Paddr, diff --git a/osdk/deps/frame-allocator/src/pools/mod.rs b/osdk/deps/frame-allocator/src/pools/mod.rs index c3342c19a..e6d3622d4 100644 --- a/osdk/deps/frame-allocator/src/pools/mod.rs +++ b/osdk/deps/frame-allocator/src/pools/mod.rs @@ -77,14 +77,14 @@ pub(super) fn alloc(guard: &DisabledLocalIrqGuard, layout: Layout) -> Option layout.size() { - if let Some(chunk_addr) = chunk_addr { - do_dealloc( - &mut local_pool, - &mut global_pool, - [(chunk_addr + layout.size(), allocated_size - layout.size())].into_iter(), - ); - } + if allocated_size > layout.size() + && let Some(chunk_addr) = chunk_addr + { + do_dealloc( + &mut local_pool, + &mut global_pool, + [(chunk_addr + layout.size(), allocated_size - layout.size())].into_iter(), + ); } balancing::balance(local_pool.deref_mut(), &mut global_pool); diff --git a/osdk/deps/test-kernel/src/lib.rs b/osdk/deps/test-kernel/src/lib.rs index 348aae2b6..e3a28db06 100644 --- a/osdk/deps/test-kernel/src/lib.rs +++ b/osdk/deps/test-kernel/src/lib.rs @@ -105,11 +105,11 @@ where let crate_set = crate_whitelist.map(|crates| crates.iter().copied().collect::>()); for crate_ in tree.iter() { - if let Some(crate_set) = &crate_set { - if !crate_set.contains(crate_.name()) { - early_print!("\n[ktest runner] skipping crate \"{}\".\n", crate_.name()); - continue; - } + if let Some(crate_set) = &crate_set + && !crate_set.contains(crate_.name()) + { + early_print!("\n[ktest runner] skipping crate \"{}\".\n", crate_.name()); + continue; } match run_crate_ktests(crate_, &whitelist_trie) { KtestResult::Ok => {} @@ -149,7 +149,7 @@ fn run_crate_ktests(crate_: &KtestCrate, whitelist: &Option) -> Ktes debug_assert_eq!(test.info().package, crate_name); match test.run( &(ostd::panic::catch_unwind::<(), fn()> - as fn(fn()) -> Result<(), Box<(dyn Any + Send + 'static)>>), + as fn(fn()) -> Result<(), Box>), ) { Ok(()) => { early_print!(" {}\n", "ok".green()); diff --git a/osdk/deps/test-kernel/src/path.rs b/osdk/deps/test-kernel/src/path.rs index f8134884d..95e672133 100644 --- a/osdk/deps/test-kernel/src/path.rs +++ b/osdk/deps/test-kernel/src/path.rs @@ -88,7 +88,7 @@ impl KtestPath { true } - pub fn iter(&self) -> KtestPathIter { + pub fn iter(&self) -> KtestPathIter<'_> { self.path.iter() } } diff --git a/osdk/src/bundle/mod.rs b/osdk/src/bundle/mod.rs index 5a7f5683b..3d52c57c6 100644 --- a/osdk/src/bundle/mod.rs +++ b/osdk/src/bundle/mod.rs @@ -86,20 +86,20 @@ impl Bundle { let _dir_guard = DirGuard::change_dir(&path); - if let Some(aster_bin) = &manifest.aster_bin { - if !aster_bin.validate() { - return None; - } + if let Some(aster_bin) = &manifest.aster_bin + && !aster_bin.validate() + { + return None; } - if let Some(vm_image) = &manifest.vm_image { - if !vm_image.validate() { - return None; - } + if let Some(vm_image) = &manifest.vm_image + && !vm_image.validate() + { + return None; } - if let Some(initramfs) = &manifest.initramfs { - if !initramfs.validate() { - return None; - } + if let Some(initramfs) = &manifest.initramfs + && !initramfs.validate() + { + return None; } Some(Self { @@ -266,10 +266,10 @@ impl Bundle { // Setting a QEMU log is required for source line stack trace because piping the output // is less desirable when running QEMU with serial redirected to standard I/O. let qemu_log_path = config.work_dir.join("qemu.log"); - if let Ok(file) = std::fs::File::open(qemu_log_path) { - if let Some(aster_bin) = &self.manifest.aster_bin { - crate::util::trace_panic_from_log(file, self.path.join(aster_bin.path())); - } + if let Ok(file) = std::fs::File::open(qemu_log_path) + && let Some(aster_bin) = &self.manifest.aster_bin + { + crate::util::trace_panic_from_log(file, self.path.join(aster_bin.path())); } // FIXME: When panicking it sometimes returns success, why? diff --git a/osdk/src/cli.rs b/osdk/src/cli.rs index 1268afcfa..bd76763c4 100644 --- a/osdk/src/cli.rs +++ b/osdk/src/cli.rs @@ -283,8 +283,8 @@ impl DebugProfileOutArgs { /// the default format is flame graph. pub fn format(&self) -> ProfileFormat { self.format.unwrap_or_else(|| { - if self.output.is_some() { - match self.output.as_ref().unwrap().extension() { + if let Some(output) = &self.output { + match output.extension() { Some(ext) if ext == "folded" => ProfileFormat::Folded, Some(ext) if ext == "json" => ProfileFormat::Json, Some(ext) if ext == "svg" => ProfileFormat::FlameGraph, diff --git a/osdk/src/commands/build/mod.rs b/osdk/src/commands/build/mod.rs index d38b51eb3..4eec63d17 100644 --- a/osdk/src/commands/build/mod.rs +++ b/osdk/src/commands/build/mod.rs @@ -148,11 +148,11 @@ pub fn do_cached_build( ); // Check the existing bundle's reusability - if let Some(existing_bundle) = get_reusable_existing_bundle(&bundle_path, config, action) { - if aster_elf.modified_time() < &existing_bundle.last_modified_time() { - info!("Reusing existing bundle: aster_elf is unchanged"); - return existing_bundle; - } + if let Some(existing_bundle) = get_reusable_existing_bundle(&bundle_path, config, action) + && aster_elf.modified_time() < &existing_bundle.last_modified_time() + { + info!("Reusing existing bundle: aster_elf is unchanged"); + return existing_bundle; } // Build a new bundle diff --git a/osdk/src/config/mod.rs b/osdk/src/config/mod.rs index f046f3212..354c7f87f 100644 --- a/osdk/src/config/mod.rs +++ b/osdk/src/config/mod.rs @@ -126,30 +126,30 @@ fn canonicalize_and_eval(action_scheme: &mut ActionScheme, workdir: &PathBuf) { canonicalize(initramfs); } - if let Some(ref mut qemu) = action_scheme.qemu { - if let Some(ref mut qemu_path) = qemu.path { - canonicalize(qemu_path); - } + if let Some(ref mut qemu) = action_scheme.qemu + && let Some(ref mut qemu_path) = qemu.path + { + canonicalize(qemu_path); } - if let Some(ref mut grub) = action_scheme.grub { - if let Some(ref mut grub_mkrescue_path) = grub.grub_mkrescue { - canonicalize(grub_mkrescue_path); - } + if let Some(ref mut grub) = action_scheme.grub + && let Some(ref mut grub_mkrescue_path) = grub.grub_mkrescue + { + canonicalize(grub_mkrescue_path); } } // Do evaluations on the need to be evaluated string field, namely, // QEMU arguments. - if let Some(ref mut qemu) = action_scheme.qemu { - if let Some(ref mut args) = qemu.args { - *args = match eval(workdir, args) { - Ok(v) => v, - Err(e) => { - error_msg!("Failed to evaluate qemu args: {:#?}", e); - process::exit(Errno::ParseMetadata as _); - } + if let Some(ref mut qemu) = action_scheme.qemu + && let Some(ref mut args) = qemu.args + { + *args = match eval(workdir, args) { + Ok(v) => v, + Err(e) => { + error_msg!("Failed to evaluate qemu args: {:#?}", e); + process::exit(Errno::ParseMetadata as _); } } } diff --git a/osdk/src/config/scheme/mod.rs b/osdk/src/config/scheme/mod.rs index b6ca1fe24..a9fe5dd00 100644 --- a/osdk/src/config/scheme/mod.rs +++ b/osdk/src/config/scheme/mod.rs @@ -52,12 +52,12 @@ pub struct Scheme { macro_rules! inherit_optional { ($from:ident, $to:ident, .$field:ident) => { - if $to.$field.is_none() { - $to.$field = $from.$field.clone(); - } else { + if let Some(to_field) = &mut $to.$field { if let Some($field) = &$from.$field { - $to.$field.as_mut().unwrap().inherit($field); + to_field.inherit($field); } + } else { + $to.$field = $from.$field.clone(); } }; } diff --git a/osdk/src/config/unix_args.rs b/osdk/src/config/unix_args.rs index bcc7918d5..841f37ddf 100644 --- a/osdk/src/config/unix_args.rs +++ b/osdk/src/config/unix_args.rs @@ -23,13 +23,14 @@ pub fn split_to_kv_array(args: &str) -> Vec { let mut joined = Vec::::new(); let mut last_has_value = false; for elem in target { - if !elem.starts_with('-') && !last_has_value { - if let Some(last) = joined.last_mut() { - last.push(' '); - last.push_str(&elem); - last_has_value = true; - continue; - } + if !elem.starts_with('-') + && !last_has_value + && let Some(last) = joined.last_mut() + { + last.push(' '); + last.push_str(&elem); + last_has_value = true; + continue; } joined.push(elem); last_has_value = false; diff --git a/osdk/src/util.rs b/osdk/src/util.rs index 792ac2c6c..83f091834 100644 --- a/osdk/src/util.rs +++ b/osdk/src/util.rs @@ -210,10 +210,10 @@ fn file_contains_ostd_main_macro(file: &syn::File) -> bool { } } } - syn::Item::ExternCrate(syn::ItemExternCrate { ident, .. }) => { - if ident.to_token_stream().to_string() == "osdk_test_kernel" { - return true; - } + syn::Item::ExternCrate(syn::ItemExternCrate { ident, .. }) + if ident.to_token_stream().to_string() == "osdk_test_kernel" => + { + return true; } _ => {} } @@ -301,18 +301,16 @@ pub fn trace_panic_from_log(qemu_log: File, bin_path: PathBuf) { println!("[OSDK] The kernel seems panicked. Parsing stack trace for source lines:"); trace_exists = true; } - if trace_exists { - if let Some(cap) = pc_matcher.captures(&line) { - let pc = cap.get(1).unwrap().as_str(); - let mut stdin = addr2line_proc.stdin.as_ref().unwrap(); - stdin.write_all(pc.as_bytes()).unwrap(); - stdin.write_all(b"\n").unwrap(); - let mut line = String::new(); - let mut stdout = BufReader::new(addr2line_proc.stdout.as_mut().unwrap()); - stdout.read_line(&mut line).unwrap(); - stack_num += 1; - println!("({: >3}) {}", stack_num, line.trim()); - } + if trace_exists && let Some(cap) = pc_matcher.captures(&line) { + let pc = cap.get(1).unwrap().as_str(); + let mut stdin = addr2line_proc.stdin.as_ref().unwrap(); + stdin.write_all(pc.as_bytes()).unwrap(); + stdin.write_all(b"\n").unwrap(); + let mut line = String::new(); + let mut stdout = BufReader::new(addr2line_proc.stdout.as_mut().unwrap()); + stdout.read_line(&mut line).unwrap(); + stack_num += 1; + println!("({: >3}) {}", stack_num, line.trim()); } } addr2line_proc.kill().unwrap(); diff --git a/ostd/libs/linux-bzimage/setup/src/sync.rs b/ostd/libs/linux-bzimage/setup/src/sync.rs index 5e372b754..d3612acc3 100644 --- a/ostd/libs/linux-bzimage/setup/src/sync.rs +++ b/ostd/libs/linux-bzimage/setup/src/sync.rs @@ -21,7 +21,7 @@ impl Mutex { } /// Locks the mutex. - pub fn lock(&self) -> MutexGuard { + pub fn lock(&self) -> MutexGuard<'_, T> { self.0.borrow_mut() } } diff --git a/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/efi.rs b/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/efi.rs index 6d26d5f1d..dfcc82122 100644 --- a/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/efi.rs +++ b/ostd/libs/linux-bzimage/setup/src/x86/amd64_efi/efi.rs @@ -63,22 +63,24 @@ fn efi_phase_boot(boot_params: &mut BootParams) { ); // Load the command line if it is not loaded. - if boot_params.hdr.cmd_line_ptr == 0 && boot_params.ext_cmd_line_ptr == 0 { - if let Some(cmdline) = load_cmdline() { - boot_params.hdr.cmd_line_ptr = cmdline.as_ptr().addr().try_into().unwrap(); - boot_params.ext_cmd_line_ptr = 0; - boot_params.hdr.cmdline_size = (cmdline.count_bytes() + 1).try_into().unwrap(); - } + if boot_params.hdr.cmd_line_ptr == 0 + && boot_params.ext_cmd_line_ptr == 0 + && let Some(cmdline) = load_cmdline() + { + boot_params.hdr.cmd_line_ptr = cmdline.as_ptr().addr().try_into().unwrap(); + boot_params.ext_cmd_line_ptr = 0; + boot_params.hdr.cmdline_size = (cmdline.count_bytes() + 1).try_into().unwrap(); } // Load the init ramdisk if it is not loaded. - if boot_params.hdr.ramdisk_image == 0 && boot_params.ext_ramdisk_image == 0 { - if let Some(initrd) = load_initrd() { - boot_params.hdr.ramdisk_image = initrd.as_ptr().addr().try_into().unwrap(); - boot_params.ext_ramdisk_image = 0; - boot_params.hdr.ramdisk_size = initrd.len().try_into().unwrap(); - boot_params.ext_ramdisk_size = 0; - } + if boot_params.hdr.ramdisk_image == 0 + && boot_params.ext_ramdisk_image == 0 + && let Some(initrd) = load_initrd() + { + boot_params.hdr.ramdisk_image = initrd.as_ptr().addr().try_into().unwrap(); + boot_params.ext_ramdisk_image = 0; + boot_params.hdr.ramdisk_size = initrd.len().try_into().unwrap(); + boot_params.ext_ramdisk_size = 0; } // Fill the boot params with the RSDP address if it is not provided. diff --git a/ostd/libs/linux-bzimage/setup/src/x86/mod.rs b/ostd/libs/linux-bzimage/setup/src/x86/mod.rs index df2ff1352..aa3287110 100644 --- a/ostd/libs/linux-bzimage/setup/src/x86/mod.rs +++ b/ostd/libs/linux-bzimage/setup/src/x86/mod.rs @@ -34,7 +34,7 @@ pub fn image_load_offset() -> isize { fn entry_legacy32(); } - (entry_legacy32 as usize as isize) - CODE32_START + (entry_legacy32 as *const () as usize as isize) - CODE32_START } global_asm!( @@ -54,7 +54,7 @@ fn payload() -> &'static [u8] { unsafe { core::slice::from_raw_parts( __payload_start as *const u8, - __payload_end as usize - __payload_start as usize, + __payload_end as *const () as usize - __payload_start as *const () as usize, ) } } diff --git a/ostd/libs/orpc-macros/Cargo.toml b/ostd/libs/orpc-macros/Cargo.toml index 5b67a383c..f361a4ae7 100644 --- a/ostd/libs/orpc-macros/Cargo.toml +++ b/ostd/libs/orpc-macros/Cargo.toml @@ -6,12 +6,6 @@ edition = "2024" [lib] proc-macro = true -[lints.rust] -mismatched_lifetime_syntaxes = "allow" - -[lints.clippy] -collapsible-if = "allow" - [dependencies] proc-macro2 = "1.0" quote = "1.0" diff --git a/ostd/libs/orpc-macros/src/parsing_utils.rs b/ostd/libs/orpc-macros/src/parsing_utils.rs index a9190c2fa..e8fe5d647 100644 --- a/ostd/libs/orpc-macros/src/parsing_utils.rs +++ b/ostd/libs/orpc-macros/src/parsing_utils.rs @@ -13,18 +13,18 @@ pub(crate) enum ORPCMethodKind<'a> { impl ORPCMethodKind<'_> { /// Extract all the required information from a signature. - pub(crate) fn of(sig: &syn::Signature) -> Option { + pub(crate) fn of(sig: &'_ syn::Signature) -> Option> { let ret = &sig.output; - if let syn::ReturnType::Type(_, typ) = ret { - if let syn::Type::Path(syn::TypePath { qself: None, path }) = typ.as_ref() { - let path_segment = &path.segments.last()?; - let name = path_segment.ident.to_string(); - return match name.as_str() { - "Result" => Some(ORPCMethodKind::Orpc { return_type: typ }), - "OQueueRef" => Some(ORPCMethodKind::OQueue { return_type: typ }), - _ => None, - }; - } + if let syn::ReturnType::Type(_, typ) = ret + && let syn::Type::Path(syn::TypePath { qself: None, path }) = typ.as_ref() + { + let path_segment = &path.segments.last()?; + let name = path_segment.ident.to_string(); + return match name.as_str() { + "Result" => Some(ORPCMethodKind::Orpc { return_type: typ }), + "OQueueRef" => Some(ORPCMethodKind::OQueue { return_type: typ }), + _ => None, + }; } None } diff --git a/ostd/libs/ostd-test/src/lib.rs b/ostd/libs/ostd-test/src/lib.rs index 47204c88f..a3705ce8f 100644 --- a/ostd/libs/ostd-test/src/lib.rs +++ b/ostd/libs/ostd-test/src/lib.rs @@ -112,13 +112,20 @@ pub struct KtestItemInfo { pub col: usize, } -#[derive(Clone, PartialEq, Debug)] +#[derive(Clone, Debug)] pub struct KtestItem { fn_: fn() -> (), should_panic: (bool, Option<&'static str>), info: KtestItemInfo, } +impl PartialEq for KtestItem { + #[expect(unpredictable_function_pointer_comparisons)] + fn eq(&self, other: &Self) -> bool { + self.fn_ == other.fn_ && self.should_panic == other.should_panic && self.info == other.info + } +} + type CatchUnwindImpl = fn(f: fn() -> ()) -> Result<(), Box>; impl KtestItem { @@ -186,7 +193,8 @@ macro_rules! ktest_array { fn __ktest_array_end(); } let item_size = core::mem::size_of::(); - let l = (__ktest_array_end as usize - __ktest_array as usize) / item_size; + let l = (__ktest_array_end as *const () as usize - __ktest_array as *const () as usize) + / item_size; // SAFETY: __ktest_array is a static section consisting of KtestItem. unsafe { core::slice::from_raw_parts(__ktest_array as *const KtestItem, l) } }}; diff --git a/ostd/src/arch/x86/boot/smp.rs b/ostd/src/arch/x86/boot/smp.rs index 5ca6d304f..d8920cbbf 100644 --- a/ostd/src/arch/x86/boot/smp.rs +++ b/ostd/src/arch/x86/boot/smp.rs @@ -137,7 +137,7 @@ const AP_BOOT_START_PA: usize = 0x8000; /// The size of the AP boot code (the `.ap_boot` section). fn ap_boot_code_size() -> usize { - __ap_boot_end as usize - __ap_boot_start as usize + __ap_boot_end as *const () as usize - __ap_boot_start as *const () as usize } pub(super) fn reclaimable_memory_region() -> MemoryRegion { @@ -152,8 +152,8 @@ pub(super) fn reclaimable_memory_region() -> MemoryRegion { /// /// The caller must ensure the memory region to be filled with AP boot code is valid to write. unsafe fn copy_ap_boot_code() { - let ap_boot_start = __ap_boot_start as usize as *const u8; - let len = __ap_boot_end as usize - __ap_boot_start as usize; + let ap_boot_start = __ap_boot_start as *const () as usize as *const u8; + let len = __ap_boot_end as *const () as usize - __ap_boot_start as *const () as usize; // SAFETY: // 1. The source memory region is valid for reading because it's inside the kernel text. @@ -223,7 +223,8 @@ unsafe fn wake_up_aps_via_mailbox(num_cpus: u32) { fn ap_boot_from_long_mode(); } - let offset = ap_boot_from_long_mode as usize - ap_boot_from_real_mode as usize; + let offset = + ap_boot_from_long_mode as *const () as usize - ap_boot_from_real_mode as *const () as usize; let acpi_tables = get_acpi_tables().unwrap(); for ap_num in 1..num_cpus { diff --git a/ostd/src/arch/x86/ex_table.rs b/ostd/src/arch/x86/ex_table.rs index 7623cfc4f..0c1375be4 100644 --- a/ostd/src/arch/x86/ex_table.rs +++ b/ostd/src/arch/x86/ex_table.rs @@ -62,8 +62,8 @@ impl ExTable { /// if the exception handling fails and there is a predefined recovery action, /// then the found recovery action will be taken. pub fn find_recovery_inst_addr(inst_addr: Vaddr) -> Option { - let table_size = - (__ex_table_end as usize - __ex_table as usize) / core::mem::size_of::(); + let table_size = (__ex_table_end as *const () as usize - __ex_table as *const () as usize) + / core::mem::size_of::(); // SAFETY: `__ex_table` is a static section consisting of `ExTableItem`. let ex_table = unsafe { core::slice::from_raw_parts(__ex_table as *const ExTableItem, table_size) }; diff --git a/ostd/src/arch/x86/kernel/apic/xapic.rs b/ostd/src/arch/x86/kernel/apic/xapic.rs index f45dcec97..f0fbd437d 100644 --- a/ostd/src/arch/x86/kernel/apic/xapic.rs +++ b/ostd/src/arch/x86/kernel/apic/xapic.rs @@ -36,7 +36,7 @@ impl XApic { /// Reads a register from the MMIO region. fn read(&self, offset: u32) -> u32 { - assert!(offset as usize % 4 == 0); + assert!((offset as usize).is_multiple_of(4)); let index = offset as usize / 4; debug_assert!(index < 256); unsafe { core::ptr::read_volatile(self.mmio_start.add(index)) } @@ -44,7 +44,7 @@ impl XApic { /// Writes a register in the MMIO region. fn write(&self, offset: u32, val: u32) { - assert!(offset as usize % 4 == 0); + assert!((offset as usize).is_multiple_of(4)); let index = offset as usize / 4; debug_assert!(index < 256); unsafe { core::ptr::write_volatile(self.mmio_start.add(index), val) } diff --git a/ostd/src/arch/x86/kernel/tsc.rs b/ostd/src/arch/x86/kernel/tsc.rs index 23a6bf43b..1fed2b46e 100644 --- a/ostd/src/arch/x86/kernel/tsc.rs +++ b/ostd/src/arch/x86/kernel/tsc.rs @@ -19,8 +19,7 @@ use crate::{ pub(in crate::arch) static TSC_FREQ: AtomicU64 = AtomicU64::new(0); pub fn init_tsc_freq() { - let tsc_freq = - determine_tsc_freq_via_cpuid().map_or_else(determine_tsc_freq_via_pit, |freq| freq); + let tsc_freq = determine_tsc_freq_via_cpuid().unwrap_or_else(determine_tsc_freq_via_pit); TSC_FREQ.store(tsc_freq, Ordering::Relaxed); info!("TSC frequency:{:?} Hz", tsc_freq); } diff --git a/ostd/src/arch/x86/trap/syscall.rs b/ostd/src/arch/x86/trap/syscall.rs index 3cea97f2e..dee39859c 100644 --- a/ostd/src/arch/x86/trap/syscall.rs +++ b/ostd/src/arch/x86/trap/syscall.rs @@ -61,7 +61,7 @@ pub(super) unsafe fn init() { // entry point and flags to clear are also correctly set, so enabling the `syscall` and // `sysret` instructions is safe. unsafe { - LStar::write(VirtAddr::new(syscall_entry as usize as u64)); + LStar::write(VirtAddr::new(syscall_entry as *const () as usize as u64)); SFMask::write(RFlags::from_bits(RFLAGS_MASK).unwrap()); // Enable the `syscall` and `sysret` instructions. diff --git a/ostd/src/boot/memory_region.rs b/ostd/src/boot/memory_region.rs index 19568fd90..a7726441c 100644 --- a/ostd/src/boot/memory_region.rs +++ b/ostd/src/boot/memory_region.rs @@ -67,8 +67,8 @@ impl MemoryRegion { fn __kernel_end(); } MemoryRegion { - base: __kernel_start as usize - kernel_loaded_offset(), - len: __kernel_end as usize - __kernel_start as usize, + base: __kernel_start as *const () as usize - kernel_loaded_offset(), + len: __kernel_end as *const () as usize - __kernel_start as *const () as usize, typ: MemoryRegionType::Kernel, } } diff --git a/ostd/src/bus/pci/capability/msix.rs b/ostd/src/bus/pci/capability/msix.rs index b0e32687e..cabd29b73 100644 --- a/ostd/src/bus/pci/capability/msix.rs +++ b/ostd/src/bus/pci/capability/msix.rs @@ -168,7 +168,7 @@ impl CapabilityMsixData { .unwrap(); } - let _old_irq = core::mem::replace(&mut self.irqs[index as usize], Some(irq)); + let _old_irq = self.irqs[index as usize].replace(irq); // Enable this msix vector self.table_bar .io_mem() diff --git a/ostd/src/bus/pci/cfg_space.rs b/ostd/src/bus/pci/cfg_space.rs index 2a5e394ad..95016dbd0 100644 --- a/ostd/src/bus/pci/cfg_space.rs +++ b/ostd/src/bus/pci/cfg_space.rs @@ -306,7 +306,7 @@ impl IoBar { /// Reads from port pub fn read(&self, offset: u32) -> Result { // Check alignment - if (self.base + offset) % size_of::() as u32 != 0 { + if !(self.base + offset).is_multiple_of(size_of::() as u32) { return InvalidArgsSnafu.fail(); } // Check overflow @@ -321,7 +321,7 @@ impl IoBar { /// Writes to port pub fn write(&self, offset: u32, value: T) -> Result<()> { // Check alignment - if (self.base + offset) % size_of::() as u32 != 0 { + if !(self.base + offset).is_multiple_of(size_of::() as u32) { return InvalidArgsSnafu.fail(); } // Check overflow diff --git a/ostd/src/cpu/local/cell.rs b/ostd/src/cpu/local/cell.rs index c05b2b075..619c20bdf 100644 --- a/ostd/src/cpu/local/cell.rs +++ b/ostd/src/cpu/local/cell.rs @@ -114,11 +114,13 @@ impl CpuLocalCell { let offset = { let bsp_va = self as *const _ as usize; - let bsp_base = __cpu_local_start as usize; + let bsp_base = __cpu_local_start as *const () as usize; // The implementation should ensure that the CPU-local object resides in the `.cpu_local`. - debug_assert!(bsp_va + core::mem::size_of::() <= __cpu_local_end as usize); + debug_assert!( + bsp_va + core::mem::size_of::() <= __cpu_local_end as *const () as usize + ); - bsp_va - bsp_base as usize + bsp_va - bsp_base }; let local_base = arch::cpu::local::get_base() as usize; @@ -155,7 +157,7 @@ impl> CpuLocalCell { /// Note that this memory operation will not be elided or reordered by the /// compiler since it is a black-box. pub fn add_assign(&'static self, rhs: T) { - let offset = self as *const _ as usize - __cpu_local_start as usize; + let offset = self as *const _ as usize - __cpu_local_start as *const () as usize; // SAFETY: The CPU-local object is defined in the `.cpu_local` section, // so the pointer to the object is valid. And the reference is never shared. unsafe { @@ -172,7 +174,7 @@ impl> CpuLocalCell { /// Note that this memory operation will not be elided or reordered by the /// compiler since it is a black-box. pub fn sub_assign(&'static self, rhs: T) { - let offset = self as *const _ as usize - __cpu_local_start as usize; + let offset = self as *const _ as usize - __cpu_local_start as *const () as usize; // SAFETY: The CPU-local object is defined in the `.cpu_local` section, // so the pointer to the object is valid. And the reference is never shared. unsafe { @@ -187,7 +189,7 @@ impl> CpuLocalCell { /// Note that this memory operation will not be elided or reordered by the /// compiler since it is a black-box. pub fn bitand_assign(&'static self, rhs: T) { - let offset = self as *const _ as usize - __cpu_local_start as usize; + let offset = self as *const _ as usize - __cpu_local_start as *const () as usize; // SAFETY: The CPU-local object is defined in the `.cpu_local` section, // so the pointer to the object is valid. And the reference is never shared. unsafe { @@ -202,7 +204,7 @@ impl> CpuLocalCell { /// Note that this memory operation will not be elided or reordered by the /// compiler since it is a black-box. pub fn bitor_assign(&'static self, rhs: T) { - let offset = self as *const _ as usize - __cpu_local_start as usize; + let offset = self as *const _ as usize - __cpu_local_start as *const () as usize; // SAFETY: The CPU-local object is defined in the `.cpu_local` section, // so the pointer to the object is valid. And the reference is never shared. unsafe { @@ -217,7 +219,7 @@ impl> CpuLocalCell { /// Note that this memory operation will not be elided or reordered by the /// compiler since it is a black-box. pub fn bitxor_assign(&'static self, rhs: T) { - let offset = self as *const _ as usize - __cpu_local_start as usize; + let offset = self as *const _ as usize - __cpu_local_start as *const () as usize; // SAFETY: The CPU-local object is defined in the `.cpu_local` section, // so the pointer to the object is valid. And the reference is never shared. unsafe { @@ -232,7 +234,7 @@ impl CpuLocalCell { /// Note that this memory operation will not be elided or reordered by the /// compiler since it is a black-box. pub fn load(&'static self) -> T { - let offset = self as *const _ as usize - __cpu_local_start as usize; + let offset = self as *const _ as usize - __cpu_local_start as *const () as usize; // SAFETY: The CPU-local object is defined in the `.cpu_local` section, // so the pointer to the object is valid. unsafe { T::load(offset as *const T) } @@ -245,7 +247,7 @@ impl CpuLocalCell { /// Note that this memory operation will not be elided or reordered by the /// compiler since it is a black-box. pub fn store(&'static self, val: T) { - let offset = self as *const _ as usize - __cpu_local_start as usize; + let offset = self as *const _ as usize - __cpu_local_start as *const () as usize; // SAFETY: The CPU-local object is defined in the `.cpu_local` section, // so the pointer to the object is valid. And the reference is never shared. unsafe { diff --git a/ostd/src/cpu/local/dyn_cpu_local.rs b/ostd/src/cpu/local/dyn_cpu_local.rs index 9ed69aec2..2754cc972 100644 --- a/ostd/src/cpu/local/dyn_cpu_local.rs +++ b/ostd/src/cpu/local/dyn_cpu_local.rs @@ -130,7 +130,7 @@ impl DynCpuLocalChunk { .alloc_segment_with(total_chunk_size.div_ceil(PAGE_SIZE), |_| DynCpuLocalMeta)?; let num_items = CHUNK_SIZE / ITEM_SIZE; - const { assert!(CHUNK_SIZE % ITEM_SIZE == 0) }; + const { assert!(CHUNK_SIZE.is_multiple_of(ITEM_SIZE)) }; Ok(Self { segment: ManuallyDrop::new(segment), diff --git a/ostd/src/cpu/local/mod.rs b/ostd/src/cpu/local/mod.rs index e7611c962..68dfd0d12 100644 --- a/ostd/src/cpu/local/mod.rs +++ b/ostd/src/cpu/local/mod.rs @@ -227,8 +227,8 @@ pub(crate) unsafe fn copy_bsp_for_ap(num_cpus: usize) { unsafe { core::slice::from_raw_parts_mut(ptr, num_aps) } }; - let bsp_base_va = __cpu_local_start as usize; - let bsp_end_va = __cpu_local_end as usize; + let bsp_base_va = __cpu_local_start as *const () as usize; + let bsp_end_va = __cpu_local_end as *const () as usize; // Allocate the CPU-local storage segments for APs. for res_addr_mut in res.iter_mut() { diff --git a/ostd/src/cpu/local/static_cpu_local.rs b/ostd/src/cpu/local/static_cpu_local.rs index 428b18140..742338f13 100644 --- a/ostd/src/cpu/local/static_cpu_local.rs +++ b/ostd/src/cpu/local/static_cpu_local.rs @@ -83,9 +83,9 @@ impl StaticStorage { /// Gets the offset of the CPU-local object in the CPU-local area. fn get_offset(&self) -> usize { let bsp_va = self as *const _ as usize; - let bsp_base = __cpu_local_start as usize; + let bsp_base = __cpu_local_start as *const () as usize; // The implementation should ensure that the CPU-local object resides in the `.cpu_local`. - debug_assert!(bsp_va + core::mem::size_of::() <= __cpu_local_end as usize); + debug_assert!(bsp_va + core::mem::size_of::() <= __cpu_local_end as *const () as usize); bsp_va - bsp_base } diff --git a/ostd/src/cpu/set.rs b/ostd/src/cpu/set.rs index c7fceb68b..37583a909 100644 --- a/ostd/src/cpu/set.rs +++ b/ostd/src/cpu/set.rs @@ -89,7 +89,7 @@ impl CpuSet { pub fn is_full(&self) -> bool { let num_cpus = num_cpus(); self.bits.iter().enumerate().all(|(idx, part)| { - if idx == self.bits.len() - 1 && num_cpus % BITS_PER_PART != 0 { + if idx == self.bits.len() - 1 && !num_cpus.is_multiple_of(BITS_PER_PART) { *part == (1 << (num_cpus % BITS_PER_PART)) - 1 } else { *part == !0 @@ -134,7 +134,7 @@ impl CpuSet { fn clear_nonexistent_cpu_bits(&mut self) { let num_cpus = num_cpus(); - if num_cpus % BITS_PER_PART != 0 { + if !num_cpus.is_multiple_of(BITS_PER_PART) { let num_parts = parts_for_cpus(num_cpus); self.bits[num_parts - 1] &= (1 << (num_cpus % BITS_PER_PART)) - 1; } diff --git a/ostd/src/io/io_port/allocator.rs b/ostd/src/io/io_port/allocator.rs index d316ef4da..307490272 100644 --- a/ostd/src/io/io_port/allocator.rs +++ b/ostd/src/io/io_port/allocator.rs @@ -74,14 +74,15 @@ pub(crate) unsafe fn init() { fn __sensitive_io_ports_start(); fn __sensitive_io_ports_end(); } - let start = __sensitive_io_ports_start as usize; - let end = __sensitive_io_ports_end as usize; - assert!((end - start) % size_of::() == 0); + let start = __sensitive_io_ports_start as *const () as usize; + let end = __sensitive_io_ports_end as *const () as usize; + assert!((end - start).is_multiple_of(size_of::())); // Iterate through the sensitive I/O port ranges and remove them from the allocator. let io_port_range_count = (end - start) / size_of::(); for i in 0..io_port_range_count { - let range_base_addr = __sensitive_io_ports_start as usize + i * size_of::(); + let range_base_addr = + __sensitive_io_ports_start as *const () as usize + i * size_of::(); // SAFETY: The range is guaranteed to be valid as it is defined in the `.sensitive_io_ports` section. let port_range = unsafe { *(range_base_addr as *const RawIoPortRange) }; diff --git a/ostd/src/lib.rs b/ostd/src/lib.rs index 8cfc75221..6229944e3 100644 --- a/ostd/src/lib.rs +++ b/ostd/src/lib.rs @@ -165,10 +165,10 @@ fn invoke_ffi_init_funcs() { fn __sinit_array(); fn __einit_array(); } - let call_len = (__einit_array as usize - __sinit_array as usize) / 8; + let call_len = (__einit_array as *const () as usize - __sinit_array as *const () as usize) / 8; for i in 0..call_len { unsafe { - let function = (__sinit_array as usize + 8 * i) as *const fn(); + let function = (__sinit_array as *const () as usize + 8 * i) as *const fn(); (*function)(); } } diff --git a/ostd/src/mm/dma/dma_stream.rs b/ostd/src/mm/dma/dma_stream.rs index 458a833b2..00b77e9d6 100644 --- a/ostd/src/mm/dma/dma_stream.rs +++ b/ostd/src/mm/dma/dma_stream.rs @@ -302,14 +302,14 @@ impl> DmaStreamSlice { } /// Returns a reader to read data from it. - pub fn reader(&self) -> Result, Error> { + pub fn reader(&self) -> Result, Error> { let mut stream_reader = self.stream.as_ref().reader()?; stream_reader.skip(self.offset).limit(self.len); Ok(stream_reader) } /// Returns a writer to write data into it. - pub fn writer(&self) -> Result, Error> { + pub fn writer(&self) -> Result, Error> { let mut stream_writer = self.stream.as_ref().writer()?; stream_writer.skip(self.offset).limit(self.len); Ok(stream_writer) diff --git a/ostd/src/mm/frame/meta.rs b/ostd/src/mm/frame/meta.rs index c5ae13a67..e4ca78684 100644 --- a/ostd/src/mm/frame/meta.rs +++ b/ostd/src/mm/frame/meta.rs @@ -132,7 +132,7 @@ pub(super) const REF_COUNT_MAX: u64 = i64::MAX as u64; type FrameMetaVtablePtr = core::ptr::DynMetadata; -const_assert!(PAGE_SIZE % META_SLOT_SIZE == 0); +const_assert!(PAGE_SIZE.is_multiple_of(META_SLOT_SIZE)); const_assert!(size_of::() == META_SLOT_SIZE); /// All frame metadata types must implement this trait. @@ -206,7 +206,7 @@ pub enum GetFrameError { /// Gets the reference to a metadata slot. pub(super) fn get_slot(paddr: Paddr) -> Result<&'static MetaSlot, GetFrameError> { - if paddr % PAGE_SIZE != 0 { + if !paddr.is_multiple_of(PAGE_SIZE) { return Err(GetFrameError::NotAligned); } if paddr >= super::max_paddr() { diff --git a/ostd/src/mm/frame/mod.rs b/ostd/src/mm/frame/mod.rs index 44714d980..7e1f475d7 100644 --- a/ostd/src/mm/frame/mod.rs +++ b/ostd/src/mm/frame/mod.rs @@ -344,7 +344,7 @@ impl TryFrom> for UFrame { /// 1. The physical address must represent a valid frame; /// 2. The caller must have already held a reference to the frame. pub(in crate::mm) unsafe fn inc_frame_ref_count(paddr: Paddr) { - debug_assert!(paddr % PAGE_SIZE == 0); + debug_assert!(paddr.is_multiple_of(PAGE_SIZE)); debug_assert!(paddr < max_paddr()); let vaddr: Vaddr = mapping::frame_to_meta(paddr); diff --git a/ostd/src/mm/frame/segment.rs b/ostd/src/mm/frame/segment.rs index 64993d9a1..05cd91697 100644 --- a/ostd/src/mm/frame/segment.rs +++ b/ostd/src/mm/frame/segment.rs @@ -86,7 +86,7 @@ impl Segment { where F: FnMut(Paddr) -> M, { - if range.start % PAGE_SIZE != 0 || range.end % PAGE_SIZE != 0 { + if !range.start.is_multiple_of(PAGE_SIZE) || !range.end.is_multiple_of(PAGE_SIZE) { return Err(GetFrameError::NotAligned); } if range.end > super::max_paddr() { @@ -150,7 +150,7 @@ impl Segment { /// The function panics if the offset is out of bounds, at either ends, or /// not base-page-aligned. pub fn split(self, offset: usize) -> (Self, Self) { - assert!(offset % PAGE_SIZE == 0); + assert!(offset.is_multiple_of(PAGE_SIZE)); assert!(0 < offset && offset < self.size()); let old = ManuallyDrop::new(self); @@ -178,7 +178,7 @@ impl Segment { /// The function panics if the byte offset range is out of bounds, or if /// any of the ends of the byte offset range is not base-page aligned. pub fn slice(&self, range: &Range) -> Self { - assert!(range.start % PAGE_SIZE == 0 && range.end % PAGE_SIZE == 0); + assert!(range.start.is_multiple_of(PAGE_SIZE) && range.end.is_multiple_of(PAGE_SIZE)); let start = self.range.start + range.start; let end = self.range.start + range.end; assert!(start <= end && end <= self.range.end); diff --git a/ostd/src/mm/heap/mod.rs b/ostd/src/mm/heap/mod.rs index 0afbeedd0..2b317b824 100644 --- a/ostd/src/mm/heap/mod.rs +++ b/ostd/src/mm/heap/mod.rs @@ -110,7 +110,7 @@ unsafe impl GlobalAlloc for AllocDispatch { if required_slot.size() != slot.size() || slot.size() < layout.size() - || slot.as_ptr() as Vaddr % layout.align() != 0 + || !(slot.as_ptr() as Vaddr).is_multiple_of(layout.align()) { abort_with_message!( "Heap allocation mismatch: slot ptr = {:p}, size = {:x}; layout = {:#x?}; required_slot = {:#x?}", diff --git a/ostd/src/mm/io.rs b/ostd/src/mm/io.rs index 6b8bf65ca..dc5f12cf2 100644 --- a/ostd/src/mm/io.rs +++ b/ostd/src/mm/io.rs @@ -788,7 +788,7 @@ impl<'a> VmWriter<'a, Infallible> { assert!(cursor.is_aligned()); let avail = self.avail(); - assert!(avail % core::mem::size_of::() == 0); + assert!(avail.is_multiple_of(core::mem::size_of::())); let written_num = avail / core::mem::size_of::(); for i in 0..written_num { diff --git a/ostd/src/mm/kspace/kvirt_area.rs b/ostd/src/mm/kspace/kvirt_area.rs index 6a38674ac..340e708c6 100644 --- a/ostd/src/mm/kspace/kvirt_area.rs +++ b/ostd/src/mm/kspace/kvirt_area.rs @@ -85,8 +85,8 @@ impl KVirtArea { frames: impl Iterator>, prop: PageProperty, ) -> Self { - assert!(area_size % PAGE_SIZE == 0); - assert!(map_offset % PAGE_SIZE == 0); + assert!(area_size.is_multiple_of(PAGE_SIZE)); + assert!(map_offset.is_multiple_of(PAGE_SIZE)); let range = KVIRT_AREA_ALLOCATOR.alloc(area_size).unwrap(); let cursor_range = range.start + map_offset..range.end; @@ -131,10 +131,10 @@ impl KVirtArea { pa_range: Range, prop: PageProperty, ) -> Self { - assert!(pa_range.start % PAGE_SIZE == 0); - assert!(pa_range.end % PAGE_SIZE == 0); - assert!(area_size % PAGE_SIZE == 0); - assert!(map_offset % PAGE_SIZE == 0); + assert!(pa_range.start.is_multiple_of(PAGE_SIZE)); + assert!(pa_range.end.is_multiple_of(PAGE_SIZE)); + assert!(area_size.is_multiple_of(PAGE_SIZE)); + assert!(map_offset.is_multiple_of(PAGE_SIZE)); assert!(map_offset + pa_range.len() <= area_size); let range = KVIRT_AREA_ALLOCATOR.alloc(area_size).unwrap(); diff --git a/ostd/src/mm/page_table/cursor/mod.rs b/ostd/src/mm/page_table/cursor/mod.rs index 1d870c546..6308ada12 100644 --- a/ostd/src/mm/page_table/cursor/mod.rs +++ b/ostd/src/mm/page_table/cursor/mod.rs @@ -125,7 +125,8 @@ impl<'rcu, C: PageTableConfig> Cursor<'rcu, C> { if !is_valid_range::(va) || va.is_empty() { return Err(PageTableError::InvalidVaddrRange(va.start, va.end)); } - if va.start % C::BASE_PAGE_SIZE != 0 || va.end % C::BASE_PAGE_SIZE != 0 { + if !va.start.is_multiple_of(C::BASE_PAGE_SIZE) || !va.end.is_multiple_of(C::BASE_PAGE_SIZE) + { return Err(PageTableError::UnalignedVaddr); } @@ -193,10 +194,10 @@ impl<'rcu, C: PageTableConfig> Cursor<'rcu, C> { // TODO(aneesh): handle levels larger than two. let rcu_guard = self.rcu_guard; let mut cur_entry = self.cur_entry(); - if let ChildRef::Frame(_, _, _) = cur_entry.to_ref() { - if let Some(split_child) = cur_entry.split_if_mapped_huge(rcu_guard) { - self.push_level(split_child); - } + if let ChildRef::Frame(_, _, _) = cur_entry.to_ref() + && let Some(split_child) = cur_entry.split_if_mapped_huge(rcu_guard) + { + self.push_level(split_child); } self.jump(va).unwrap(); } @@ -299,7 +300,7 @@ impl<'rcu, C: PageTableConfig> Cursor<'rcu, C> { /// /// This method panics if the address has bad alignment. pub fn jump(&mut self, va: Vaddr) -> Result<(), PageTableError> { - assert!(va % C::BASE_PAGE_SIZE == 0); + assert!(va.is_multiple_of(C::BASE_PAGE_SIZE)); if !self.barrier_va.contains(&va) { return Err(PageTableError::InvalidVaddr(va)); } diff --git a/ostd/src/mm/page_table/mod.rs b/ostd/src/mm/page_table/mod.rs index 7beeae47f..d91537c09 100644 --- a/ostd/src/mm/page_table/mod.rs +++ b/ostd/src/mm/page_table/mod.rs @@ -188,8 +188,8 @@ pub(crate) fn largest_pages( let mut level = C::HIGHEST_TRANSLATION_LEVEL; while page_size::(level) > len - || va % page_size::(level) != 0 - || pa % page_size::(level) != 0 + || !va.is_multiple_of(page_size::(level)) + || !pa.is_multiple_of(page_size::(level)) { level -= 1; } diff --git a/ostd/src/mm/vm_space.rs b/ostd/src/mm/vm_space.rs index ba0c43f29..0a3bb9340 100644 --- a/ostd/src/mm/vm_space.rs +++ b/ostd/src/mm/vm_space.rs @@ -225,7 +225,7 @@ impl VmSpace { return AccessDeniedSnafu.fail(); } - if vaddr.checked_add(len).unwrap_or(usize::MAX) > MAX_USERSPACE_VADDR { + if vaddr.saturating_add(len) > MAX_USERSPACE_VADDR { return AccessDeniedSnafu.fail(); } @@ -245,7 +245,7 @@ impl VmSpace { return AccessDeniedSnafu.fail(); } - if vaddr.checked_add(len).unwrap_or(usize::MAX) > MAX_USERSPACE_VADDR { + if vaddr.saturating_add(len) > MAX_USERSPACE_VADDR { return AccessDeniedSnafu.fail(); } diff --git a/ostd/src/orpc/oqueue/mod.rs b/ostd/src/orpc/oqueue/mod.rs index 3bdff7a74..e21fc792a 100644 --- a/ostd/src/orpc/oqueue/mod.rs +++ b/ostd/src/orpc/oqueue/mod.rs @@ -811,7 +811,11 @@ mod test { let producer = queue.attach_value_producer().unwrap(); let observer = queue .attach_strong_observer(ObservationQuery::new_filter(|m: &Message| { - if m.id % 2 == 0 { Some(m.id) } else { None } + if m.id.is_multiple_of(2) { + Some(m.id) + } else { + None + } })) .unwrap(); @@ -841,11 +845,13 @@ mod test { let observer = queue .attach_weak_observer( 2, - ObservationQuery::new_filter( - |m: &Message| { - if m.id % 2 == 0 { Some(m.id) } else { None } - }, - ), + ObservationQuery::new_filter(|m: &Message| { + if m.id.is_multiple_of(2) { + Some(m.id) + } else { + None + } + }), ) .unwrap(); (producer, observer) diff --git a/ostd/src/orpc/path.rs b/ostd/src/orpc/path.rs index 28367c7fd..22df33614 100644 --- a/ostd/src/orpc/path.rs +++ b/ostd/src/orpc/path.rs @@ -69,7 +69,7 @@ impl Hash for PathComponent { impl PathComponent { /// Returns a ref (-like) value which provides uniform access to names as a `&str`. - pub fn borrow(&self) -> PathComponentRef { + pub fn borrow(&self) -> PathComponentRef<'_> { match self { PathComponent::Name(s) => PathComponentRef::Name(s), PathComponent::OwnedName(s) => PathComponentRef::Name(s), diff --git a/ostd/src/sync/mutex.rs b/ostd/src/sync/mutex.rs index 98d5fee11..797e9b271 100644 --- a/ostd/src/sync/mutex.rs +++ b/ostd/src/sync/mutex.rs @@ -107,7 +107,7 @@ impl Mutex { /// /// This method runs in a block way until the mutex can be acquired. #[track_caller] - pub fn lock(&self) -> MutexGuard { + pub fn lock(&self) -> MutexGuard<'_, T> { #[cfg(feature = "track_mutex")] if let Some(r) = self.try_lock() { return r; @@ -136,7 +136,7 @@ impl Mutex { /// Tries Acquire the mutex immedidately. #[track_caller] - pub fn try_lock(&self) -> Option> { + pub fn try_lock(&self) -> Option> { // Cannot be reduced to `then_some`, or the possible dropping of the temporary // guard will cause an unexpected unlock. // SAFETY: The lock is successfully acquired when creating the guard. diff --git a/ostd/src/sync/rwarc.rs b/ostd/src/sync/rwarc.rs index f6448da46..36334360a 100644 --- a/ostd/src/sync/rwarc.rs +++ b/ostd/src/sync/rwarc.rs @@ -41,12 +41,12 @@ impl RwArc { } /// Acquires the read lock for immutable access. - pub fn read(&self) -> RwLockReadGuard { + pub fn read(&self) -> RwLockReadGuard<'_, T, PreemptDisabled> { self.0.data.read() } /// Acquires the write lock for mutable access. - pub fn write(&self) -> RwLockWriteGuard { + pub fn write(&self) -> RwLockWriteGuard<'_, T, PreemptDisabled> { self.0.data.write() } @@ -108,7 +108,7 @@ impl RwArc { impl RoArc { /// Acquires the read lock for immutable access. - pub fn read(&self) -> RwLockReadGuard { + pub fn read(&self) -> RwLockReadGuard<'_, T, PreemptDisabled> { self.0.data.read() } } diff --git a/ostd/src/sync/rwlock.rs b/ostd/src/sync/rwlock.rs index 22da44010..2a6d4cc2a 100644 --- a/ostd/src/sync/rwlock.rs +++ b/ostd/src/sync/rwlock.rs @@ -131,7 +131,7 @@ impl RwLock { /// upgrading upreaders present. There is no guarantee for the order /// in which other readers or writers waiting simultaneously will /// obtain the lock. - pub fn read(&self) -> RwLockReadGuard { + pub fn read(&self) -> RwLockReadGuard<'_, T, G> { loop { if let Some(readguard) = self.try_read() { return readguard; @@ -163,7 +163,7 @@ impl RwLock { /// upreaders or readers present. There is no guarantee for the order /// in which other readers or writers waiting simultaneously will /// obtain the lock. - pub fn write(&self) -> RwLockWriteGuard { + pub fn write(&self) -> RwLockWriteGuard<'_, T, G> { loop { if let Some(writeguard) = self.try_write() { return writeguard; @@ -199,7 +199,7 @@ impl RwLock { /// and reader do not differ before invoking the upgread method. However, /// only one upreader can exist at any time to avoid deadlock in the /// upgread method. - pub fn upread(&self) -> RwLockUpgradeableGuard { + pub fn upread(&self) -> RwLockUpgradeableGuard<'_, T, G> { loop { if let Some(guard) = self.try_upread() { return guard; @@ -228,7 +228,7 @@ impl RwLock { /// Attempts to acquire a read lock. /// /// This function will never spin-wait and will return immediately. - pub fn try_read(&self) -> Option> { + pub fn try_read(&self) -> Option> { let guard = G::read_guard(); let lock = self.lock.fetch_add(READER, Acquire); if lock & (WRITER | MAX_READER | BEING_UPGRADED) == 0 { @@ -262,7 +262,7 @@ impl RwLock { /// Attempts to acquire a write lock. /// /// This function will never spin-wait and will return immediately. - pub fn try_write(&self) -> Option> { + pub fn try_write(&self) -> Option> { let guard = G::guard(); if self .lock @@ -300,7 +300,7 @@ impl RwLock { /// Attempts to acquire an upread lock. /// /// This function will never spin-wait and will return immediately. - pub fn try_upread(&self) -> Option> { + pub fn try_upread(&self) -> Option> { let guard = G::guard(); let lock = self.lock.fetch_or(UPGRADEABLE_READER, Acquire) & (WRITER | UPGRADEABLE_READER); if lock == 0 { diff --git a/ostd/src/sync/rwmutex.rs b/ostd/src/sync/rwmutex.rs index 5c2397f7f..87d17d521 100644 --- a/ostd/src/sync/rwmutex.rs +++ b/ostd/src/sync/rwmutex.rs @@ -122,7 +122,7 @@ impl RwMutex { /// order in which other concurrent readers or writers waiting simultaneously /// will acquire the mutex. #[track_caller] - pub fn read(&self) -> RwMutexReadGuard { + pub fn read(&self) -> RwMutexReadGuard<'_, T> { self.queue.wait_until(|| self.try_read()) } @@ -133,7 +133,7 @@ impl RwMutex { /// order in which other concurrent readers or writers waiting simultaneously /// will acquire the mutex. #[track_caller] - pub fn write(&self) -> RwMutexWriteGuard { + pub fn write(&self) -> RwMutexWriteGuard<'_, T> { self.queue.wait_until(|| self.try_write()) } @@ -148,14 +148,14 @@ impl RwMutex { /// only one upreader can exist at any time to avoid deadlock in the /// upgread method. #[track_caller] - pub fn upread(&self) -> RwMutexUpgradeableGuard { + pub fn upread(&self) -> RwMutexUpgradeableGuard<'_, T> { self.queue.wait_until(|| self.try_upread()) } /// Attempts to acquire a read mutex. /// /// This function will never sleep and will return immediately. - pub fn try_read(&self) -> Option> { + pub fn try_read(&self) -> Option> { let lock = self.lock.fetch_add(READER, Acquire); if lock & (WRITER | BEING_UPGRADED | MAX_READER) == 0 { Some(RwMutexReadGuard { inner: self }) @@ -168,7 +168,7 @@ impl RwMutex { /// Attempts to acquire a write mutex. /// /// This function will never sleep and will return immediately. - pub fn try_write(&self) -> Option> { + pub fn try_write(&self) -> Option> { if self .lock .compare_exchange(0, WRITER, Acquire, Relaxed) @@ -183,7 +183,7 @@ impl RwMutex { /// Attempts to acquire a upread mutex. /// /// This function will never sleep and will return immediately. - pub fn try_upread(&self) -> Option> { + pub fn try_upread(&self) -> Option> { let lock = self.lock.fetch_or(UPGRADEABLE_READER, Acquire) & (WRITER | UPGRADEABLE_READER); if lock == 0 { return Some(RwMutexUpgradeableGuard { inner: self }); diff --git a/ostd/src/sync/spin.rs b/ostd/src/sync/spin.rs index b8132c418..23f80c41f 100644 --- a/ostd/src/sync/spin.rs +++ b/ostd/src/sync/spin.rs @@ -74,7 +74,7 @@ impl SpinLock { impl SpinLock { /// Acquires the spin lock. - pub fn lock(&self) -> SpinLockGuard { + pub fn lock(&self) -> SpinLockGuard<'_, T, G> { // Notice the guard must be created before acquiring the lock. let inner_guard = G::guard(); self.acquire_lock(); @@ -100,7 +100,7 @@ impl SpinLock { } /// Tries acquiring the spin lock immedidately. - pub fn try_lock(&self) -> Option> { + pub fn try_lock(&self) -> Option> { let inner_guard = G::guard(); if self.try_acquire_lock() { let lock_guard = SpinLockGuard_ { diff --git a/ostd/src/task/mod.rs b/ostd/src/task/mod.rs index f0a41a475..cb35fdb08 100644 --- a/ostd/src/task/mod.rs +++ b/ostd/src/task/mod.rs @@ -161,11 +161,7 @@ impl Task { /// Returns the user context of this task, if it has. pub fn user_ctx(&self) -> Option<&Arc> { - if self.user_ctx.is_some() { - Some(self.user_ctx.as_ref().unwrap()) - } else { - None - } + self.user_ctx.as_ref() } /// Saves the FPU state for user task. @@ -285,7 +281,7 @@ impl TaskOptions { ctx.get_mut().set_tls_pointer(user_ctx.tls_pointer()); }; ctx.get_mut() - .set_instruction_pointer(kernel_task_entry as usize); + .set_instruction_pointer(kernel_task_entry as *const () as usize); // We should reserve space for the return address in the stack, otherwise // we will write across the page boundary due to the implementation of // the context switch. diff --git a/ostd/src/util/range_alloc.rs b/ostd/src/util/range_alloc.rs index 7bc9b7f72..91d6c300d 100644 --- a/ostd/src/util/range_alloc.rs +++ b/ostd/src/util/range_alloc.rs @@ -84,13 +84,13 @@ impl RangeAllocator { } } - if let Some(key) = to_remove { - if let Some(freenode) = freelist.get_mut(&key) { - if freenode.block.end - size == freenode.block.start { - freelist.remove(&key); - } else { - freenode.block.end -= size; - } + if let Some(key) = to_remove + && let Some(freenode) = freelist.get_mut(&key) + { + if freenode.block.end - size == freenode.block.start { + freelist.remove(&key); + } else { + freenode.block.end -= size; } } @@ -115,12 +115,11 @@ impl RangeAllocator { if let Some((prev_va, prev_node)) = freelist .upper_bound_mut(core::ops::Bound::Excluded(&free_range.start)) .peek_prev() + && prev_node.block.end == free_range.start { - if prev_node.block.end == free_range.start { - let prev_va = *prev_va; - free_range.start = prev_node.block.start; - freelist.remove(&prev_va); - } + let prev_va = *prev_va; + free_range.start = prev_node.block.start; + freelist.remove(&prev_va); } freelist.insert(free_range.start, FreeRange::new(free_range.clone())); @@ -128,19 +127,18 @@ impl RangeAllocator { if let Some((next_va, next_node)) = freelist .lower_bound_mut(core::ops::Bound::Excluded(&free_range.start)) .peek_next() + && free_range.end == next_node.block.start { - if free_range.end == next_node.block.start { - let next_va = *next_va; - free_range.end = next_node.block.end; - freelist.remove(&next_va); - freelist.get_mut(&free_range.start).unwrap().block.end = free_range.end; - } + let next_va = *next_va; + free_range.end = next_node.block.end; + freelist.remove(&next_va); + freelist.get_mut(&free_range.start).unwrap().block.end = free_range.end; } } fn get_freelist_guard( &self, - ) -> SpinLockGuard>, PreemptDisabled> { + ) -> SpinLockGuard<'_, Option>, PreemptDisabled> { let mut lock_guard = self.freelist.lock(); if lock_guard.is_none() { let mut freelist: BTreeMap = BTreeMap::new();