Skip to content

vfs abstractions and tarfs #1037

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 29 commits into
base: rust-next
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
a3fe8d8
xattr: make the xattr array itself const
wedsonaf Sep 29, 2023
484ec70
rust: introduce `InPlaceModule`
wedsonaf Sep 29, 2023
da1a2b6
samples: rust: add in-place initialisation sample
wedsonaf Sep 29, 2023
883e433
rust: add the `container_of` macro
wedsonaf Sep 29, 2023
14513c0
rust: init: introduce `Opaque::try_ffi_init`
wedsonaf Sep 29, 2023
c7d0fb2
rust: time: introduce `time` module
wedsonaf Sep 29, 2023
ca4a93c
rust: types: add little-endian type
wedsonaf Sep 29, 2023
a44bdcc
rust: types: introduce `FromBytes` trait
wedsonaf Sep 29, 2023
caf9b29
rust: mem_cache: introduce `MemCache`
wedsonaf Sep 29, 2023
b0bc357
kbuild: rust: allow modules to allocate memory
wedsonaf Sep 29, 2023
528babd
rust: fs: add registration/unregistration of file systems
wedsonaf Sep 29, 2023
e909f43
rust: fs: introduce the `module_fs` macro
wedsonaf Sep 29, 2023
ad07f4b
samples: rust: add initial ro file system sample
wedsonaf Sep 29, 2023
626056a
rust: fs: introduce `FileSystem::super_params`
wedsonaf Sep 29, 2023
a448dc5
rust: fs: introduce `INode<T>`
wedsonaf Sep 29, 2023
b26f77a
rust: fs: introduce `FileSystem::init_root`
wedsonaf Sep 29, 2023
ac0f637
rust: fs: introduce `FileSystem::read_dir`
wedsonaf Sep 29, 2023
14b32d0
rust: fs: introduce `FileSystem::lookup`
wedsonaf Sep 29, 2023
5e601b9
rust: folio: introduce basic support for folios
wedsonaf Sep 29, 2023
c02d2b9
rust: fs: introduce `FileSystem::read_folio`
wedsonaf Sep 29, 2023
ce0acb6
rust: fs: introduce `FileSystem::read_xattr`
wedsonaf Sep 29, 2023
3f94966
rust: fs: introduce `FileSystem::statfs`
wedsonaf Sep 29, 2023
6032d93
rust: fs: introduce more inode types
wedsonaf Sep 29, 2023
1cf6e5e
rust: fs: add per-superblock data
wedsonaf Sep 29, 2023
516d0e4
rust: fs: add basic support for fs buffer heads
wedsonaf Sep 29, 2023
0605dba
rust: fs: allow file systems backed by a block device
wedsonaf Sep 29, 2023
b40e37b
rust: fs: allow per-inode data
wedsonaf Sep 29, 2023
80fda66
rust: fs: export file type from mode constants
wedsonaf Sep 29, 2023
7189177
tarfs: introduce tar fs
wedsonaf Sep 29, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
rust: folio: introduce basic support for folios
Allow Rust file systems to handle ref-counted folios.

Provide the minimum needed to implement `read_folio` (part of `struct
address_space_operations`) in read-only file systems and to read
uncached blocks.

Signed-off-by: Wedson Almeida Filho <[email protected]>
  • Loading branch information
wedsonaf committed Oct 18, 2023
commit 5e601b9e469fc41d910efe6609f5806cc05cb63b
3 changes: 3 additions & 0 deletions rust/bindings/bindings_helper.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#include <linux/fs.h>
#include <linux/fs_context.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/refcount.h>
#include <linux/wait.h>
#include <linux/sched.h>
Expand All @@ -27,3 +28,5 @@ const slab_flags_t BINDINGS_SLAB_ACCOUNT = SLAB_ACCOUNT;
const unsigned long BINDINGS_SB_RDONLY = SB_RDONLY;

const loff_t BINDINGS_MAX_LFS_FILESIZE = MAX_LFS_FILESIZE;

const size_t BINDINGS_PAGE_SIZE = PAGE_SIZE;
2 changes: 2 additions & 0 deletions rust/bindings/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,3 +59,5 @@ pub const SLAB_ACCOUNT: slab_flags_t = BINDINGS_SLAB_ACCOUNT;
pub const SB_RDONLY: core::ffi::c_ulong = BINDINGS_SB_RDONLY;

pub const MAX_LFS_FILESIZE: loff_t = BINDINGS_MAX_LFS_FILESIZE;

pub const PAGE_SIZE: usize = BINDINGS_PAGE_SIZE;
81 changes: 81 additions & 0 deletions rust/helpers.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,14 @@
#include <kunit/test-bug.h>
#include <linux/bug.h>
#include <linux/build_bug.h>
#include <linux/cacheflush.h>
#include <linux/err.h>
#include <linux/errname.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
#include <linux/refcount.h>
#include <linux/sched/signal.h>
#include <linux/spinlock.h>
Expand Down Expand Up @@ -145,6 +149,77 @@ struct kunit *rust_helper_kunit_get_current_test(void)
}
EXPORT_SYMBOL_GPL(rust_helper_kunit_get_current_test);

void *rust_helper_kmap(struct page *page)
{
return kmap(page);
}
EXPORT_SYMBOL_GPL(rust_helper_kmap);

void rust_helper_kunmap(struct page *page)
{
kunmap(page);
}
EXPORT_SYMBOL_GPL(rust_helper_kunmap);

void rust_helper_folio_get(struct folio *folio)
{
folio_get(folio);
}
EXPORT_SYMBOL_GPL(rust_helper_folio_get);

void rust_helper_folio_put(struct folio *folio)
{
folio_put(folio);
}
EXPORT_SYMBOL_GPL(rust_helper_folio_put);

struct page *rust_helper_folio_page(struct folio *folio, size_t n)
{
return folio_page(folio, n);
}

loff_t rust_helper_folio_pos(struct folio *folio)
{
return folio_pos(folio);
}
EXPORT_SYMBOL_GPL(rust_helper_folio_pos);

size_t rust_helper_folio_size(struct folio *folio)
{
return folio_size(folio);
}
EXPORT_SYMBOL_GPL(rust_helper_folio_size);

void rust_helper_folio_mark_uptodate(struct folio *folio)
{
folio_mark_uptodate(folio);
}
EXPORT_SYMBOL_GPL(rust_helper_folio_mark_uptodate);

void rust_helper_folio_set_error(struct folio *folio)
{
folio_set_error(folio);
}
EXPORT_SYMBOL_GPL(rust_helper_folio_set_error);

void rust_helper_flush_dcache_folio(struct folio *folio)
{
flush_dcache_folio(folio);
}
EXPORT_SYMBOL_GPL(rust_helper_flush_dcache_folio);

void *rust_helper_kmap_local_folio(struct folio *folio, size_t offset)
{
return kmap_local_folio(folio, offset);
}
EXPORT_SYMBOL_GPL(rust_helper_kmap_local_folio);

void rust_helper_kunmap_local(const void *vaddr)
{
kunmap_local(vaddr);
}
EXPORT_SYMBOL_GPL(rust_helper_kunmap_local);

void rust_helper_i_uid_write(struct inode *inode, uid_t uid)
{
i_uid_write(inode, uid);
Expand All @@ -163,6 +238,12 @@ off_t rust_helper_i_size_read(const struct inode *inode)
}
EXPORT_SYMBOL_GPL(rust_helper_i_size_read);

void rust_helper_mapping_set_large_folios(struct address_space *mapping)
{
mapping_set_large_folios(mapping);
}
EXPORT_SYMBOL_GPL(rust_helper_mapping_set_large_folios);

/*
* `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can
* use it in contexts where Rust expects a `usize` like slice (array) indices.
Expand Down
215 changes: 215 additions & 0 deletions rust/kernel/folio.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,215 @@
// SPDX-License-Identifier: GPL-2.0

//! Groups of contiguous pages, folios.
//!
//! C headers: [`include/linux/mm.h`](../../include/linux/mm.h)

use crate::error::{code::*, Result};
use crate::types::{ARef, AlwaysRefCounted, Opaque, ScopeGuard};
use core::{cmp::min, ptr};

/// Wraps the kernel's `struct folio`.
///
/// # Invariants
///
/// Instances of this type are always ref-counted, that is, a call to `folio_get` ensures that the
/// allocation remains valid at least until the matching call to `folio_put`.
#[repr(transparent)]
pub struct Folio(pub(crate) Opaque<bindings::folio>);

// SAFETY: The type invariants guarantee that `Folio` is always ref-counted.
unsafe impl AlwaysRefCounted for Folio {
fn inc_ref(&self) {
// SAFETY: The existence of a shared reference means that the refcount is nonzero.
unsafe { bindings::folio_get(self.0.get()) };
}

unsafe fn dec_ref(obj: ptr::NonNull<Self>) {
// SAFETY: The safety requirements guarantee that the refcount is nonzero.
unsafe { bindings::folio_put(obj.cast().as_ptr()) }
}
}

impl Folio {
/// Tries to allocate a new folio.
///
/// On success, returns a folio made up of 2^order pages.
pub fn try_new(order: u32) -> Result<UniqueFolio> {
if order > bindings::MAX_ORDER {
return Err(EDOM);
}

// SAFETY: We checked that `order` is within the max allowed value.
let f = ptr::NonNull::new(unsafe { bindings::folio_alloc(bindings::GFP_KERNEL, order) })
.ok_or(ENOMEM)?;

// SAFETY: The folio returned by `folio_alloc` is referenced. The ownership of the
// reference is transferred to the `ARef` instance.
Ok(UniqueFolio(unsafe { ARef::from_raw(f.cast()) }))
}

/// Returns the byte position of this folio in its file.
pub fn pos(&self) -> i64 {
// SAFETY: The folio is valid because the shared reference implies a non-zero refcount.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

just make this part of the type invariant.

unsafe { bindings::folio_pos(self.0.get()) }
}

/// Returns the byte size of this folio.
pub fn size(&self) -> usize {
// SAFETY: The folio is valid because the shared reference implies a non-zero refcount.
unsafe { bindings::folio_size(self.0.get()) }
}

/// Flushes the data cache for the pages that make up the folio.
pub fn flush_dcache(&self) {
// SAFETY: The folio is valid because the shared reference implies a non-zero refcount.
unsafe { bindings::flush_dcache_folio(self.0.get()) }
}
}

/// A [`Folio`] that has a single reference to it.
pub struct UniqueFolio(pub(crate) ARef<Folio>);

impl UniqueFolio {
/// Maps the contents of a folio page into a slice.
pub fn map_page(&self, page_index: usize) -> Result<MapGuard<'_>> {
if page_index >= self.0.size() / bindings::PAGE_SIZE {
return Err(EDOM);
}

// SAFETY: We just checked that the index is within bounds of the folio.
let page = unsafe { bindings::folio_page(self.0 .0.get(), page_index) };

// SAFETY: `page` is valid because it was returned by `folio_page` above.
let ptr = unsafe { bindings::kmap(page) };

// SAFETY: We just mapped `ptr`, so it's valid for read.
let data = unsafe { core::slice::from_raw_parts(ptr.cast::<u8>(), bindings::PAGE_SIZE) };

Ok(MapGuard { data, page })
}
}

/// A mapped [`UniqueFolio`].
pub struct MapGuard<'a> {
data: &'a [u8],
page: *mut bindings::page,
}

impl core::ops::Deref for MapGuard<'_> {
type Target = [u8];

fn deref(&self) -> &Self::Target {
self.data
}
}

impl Drop for MapGuard<'_> {
fn drop(&mut self) {
// SAFETY: A `MapGuard` instance is only created when `kmap` succeeds, so it's ok to unmap
// it when the guard is dropped.
unsafe { bindings::kunmap(self.page) };
}
}

/// A locked [`Folio`].
pub struct LockedFolio<'a>(&'a Folio);
Comment on lines +115 to +116
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What does it mean for a folio to be locked?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Here's a full description: https://elixir.bootlin.com/linux/latest/source/include/linux/pagemap.h#L934

Since this series concerns read-only file systems, we are only concerned with reading a folio: on success we have to mark it up to date, then unlock it. (The caller of read_page will wait for it to be unlocked, then check if it's up to date; this means the unlock may be asynchronous, but since none of the FS we are implementing require this, we don't support it. Eventually we may need to have to express the fact that we're holding a reference to a locked folio.)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I would suggest to link/mention that lock function from the C side.


impl LockedFolio<'_> {
/// Creates a new locked folio from a raw pointer.
///
/// # Safety
///
/// Callers must ensure that the folio is valid and locked. Additionally, that the
/// responsibility of unlocking is transferred to the new instance of [`LockedFolio`]. Lastly,
/// that the returned [`LockedFolio`] doesn't outlive the refcount that keeps it alive.
#[allow(dead_code)]
pub(crate) unsafe fn from_raw(folio: *const bindings::folio) -> Self {
let ptr = folio.cast();
// SAFETY: The safety requirements ensure that `folio` (from which `ptr` is derived) is
// valid and will remain valid while the `LockedFolio` instance lives.
Self(unsafe { &*ptr })
}

/// Marks the folio as being up to date.
pub fn mark_uptodate(&mut self) {
// SAFETY: The folio is valid because the shared reference implies a non-zero refcount.
unsafe { bindings::folio_mark_uptodate(self.0 .0.get()) }
}

/// Sets the error flag on the folio.
pub fn set_error(&mut self) {
// SAFETY: The folio is valid because the shared reference implies a non-zero refcount.
unsafe { bindings::folio_set_error(self.0 .0.get()) }
}

fn for_each_page(
&mut self,
offset: usize,
len: usize,
mut cb: impl FnMut(&mut [u8]) -> Result,
) -> Result {
let mut remaining = len;
let mut next_offset = offset;

// Check that we don't overflow the folio.
let end = offset.checked_add(len).ok_or(EDOM)?;
if end > self.size() {
return Err(EINVAL);
}

while remaining > 0 {
let page_offset = next_offset & (bindings::PAGE_SIZE - 1);
let usable = min(remaining, bindings::PAGE_SIZE - page_offset);
// SAFETY: The folio is valid because the shared reference implies a non-zero refcount;
// `next_offset` is also guaranteed be lesss than the folio size.
let ptr = unsafe { bindings::kmap_local_folio(self.0 .0.get(), next_offset) };

// SAFETY: `ptr` was just returned by the `kmap_local_folio` above.
let _guard = ScopeGuard::new(|| unsafe { bindings::kunmap_local(ptr) });

// SAFETY: `kmap_local_folio` maps whole page so we know it's mapped for at least
// `usable` bytes.
let s = unsafe { core::slice::from_raw_parts_mut(ptr.cast::<u8>(), usable) };
cb(s)?;

next_offset += usable;
remaining -= usable;
}

Ok(())
}

/// Writes the given slice into the folio.
pub fn write(&mut self, offset: usize, data: &[u8]) -> Result {
let mut remaining = data;

self.for_each_page(offset, data.len(), |s| {
s.copy_from_slice(&remaining[..s.len()]);
remaining = &remaining[s.len()..];
Ok(())
})
}

/// Writes zeroes into the folio.
pub fn zero_out(&mut self, offset: usize, len: usize) -> Result {
self.for_each_page(offset, len, |s| {
s.fill(0);
Ok(())
})
}
}

impl core::ops::Deref for LockedFolio<'_> {
type Target = Folio;
fn deref(&self) -> &Self::Target {
self.0
}
}

impl Drop for LockedFolio<'_> {
fn drop(&mut self) {
// SAFETY: The folio is valid because the shared reference implies a non-zero refcount.
unsafe { bindings::folio_unlock(self.0 .0.get()) }
}
}
1 change: 1 addition & 0 deletions rust/kernel/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ extern crate self as kernel;
mod allocator;
mod build_assert;
pub mod error;
pub mod folio;
pub mod fs;
pub mod init;
pub mod ioctl;
Expand Down