memory management
This commit is contained in:
parent
ec9264620b
commit
ab874a2102
86
src/allocator.rs
Normal file
86
src/allocator.rs
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
use alloc::alloc::{GlobalAlloc, Layout};
|
||||||
|
use core::ptr::null_mut;
|
||||||
|
use x86_64::{
|
||||||
|
structures::paging::{
|
||||||
|
mapper::MapToError, FrameAllocator, Mapper, Page, PageTableFlags, Size4KiB,
|
||||||
|
},
|
||||||
|
VirtAddr,
|
||||||
|
};
|
||||||
|
use linked_list_allocator::LockedHeap;
|
||||||
|
|
||||||
|
pub fn init_heap(
|
||||||
|
mapper: &mut impl Mapper<Size4KiB>,
|
||||||
|
frame_allocator: &mut impl FrameAllocator<Size4KiB>,
|
||||||
|
) -> Result<(), MapToError<Size4KiB>> {
|
||||||
|
let page_range = {
|
||||||
|
let heap_start = VirtAddr::new(HEAP_START as u64);
|
||||||
|
let heap_end = heap_start + HEAP_SIZE - 1u64;
|
||||||
|
let heap_start_page = Page::containing_address(heap_start);
|
||||||
|
let heap_end_page = Page::containing_address(heap_end);
|
||||||
|
Page::range_inclusive(heap_start_page, heap_end_page)
|
||||||
|
};
|
||||||
|
|
||||||
|
for page in page_range {
|
||||||
|
let frame = frame_allocator
|
||||||
|
.allocate_frame()
|
||||||
|
.ok_or(MapToError::FrameAllocationFailed)?;
|
||||||
|
let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE;
|
||||||
|
unsafe {
|
||||||
|
mapper.map_to(page, frame, flags, frame_allocator)?.flush()
|
||||||
|
};
|
||||||
|
}
|
||||||
|
unsafe {
|
||||||
|
ALLOCATOR.lock().init(HEAP_START, HEAP_SIZE);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
pub mod bump;
|
||||||
|
use bump::BumpAllocator;
|
||||||
|
|
||||||
|
pub mod linked_list;
|
||||||
|
use linked_list::LinkedListAllocator;
|
||||||
|
|
||||||
|
pub mod fixed_size_block;
|
||||||
|
use fixed_size_block::FixedSizeBlockAllocator;
|
||||||
|
|
||||||
|
#[global_allocator]
|
||||||
|
static ALLOCATOR: Locked<FixedSizeBlockAllocator> = Locked::new(
|
||||||
|
FixedSizeBlockAllocator::new());
|
||||||
|
|
||||||
|
pub struct Dummy;
|
||||||
|
pub const HEAP_START: usize = 0x_4444_4444_0000;
|
||||||
|
pub const HEAP_SIZE: usize = 100 * 1024;
|
||||||
|
|
||||||
|
unsafe impl GlobalAlloc for Dummy {
|
||||||
|
unsafe fn alloc(&self, _layout: Layout) -> *mut u8 {
|
||||||
|
null_mut()
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
|
||||||
|
panic!("dealloc should be never called")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Align the given address `addr` upwards to alignment `align`.
|
||||||
|
fn align_up(addr: usize, align: usize) -> usize {
|
||||||
|
(addr + align - 1) & !(align - 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Locked<A> {
|
||||||
|
inner: spin::Mutex<A>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<A> Locked<A> {
|
||||||
|
pub const fn new(inner: A) -> Self {
|
||||||
|
Locked {
|
||||||
|
inner: spin::Mutex::new(inner),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn lock(&self) -> spin::MutexGuard<A> {
|
||||||
|
self.inner.lock()
|
||||||
|
}
|
||||||
|
}
|
||||||
61
src/allocator/bump.rs
Normal file
61
src/allocator/bump.rs
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
use super::{align_up, Locked};
|
||||||
|
use alloc::alloc::{GlobalAlloc, Layout};
|
||||||
|
use core::ptr;
|
||||||
|
|
||||||
|
unsafe impl GlobalAlloc for Locked<BumpAllocator> {
|
||||||
|
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||||
|
let mut bump = self.lock(); // get a mutable reference
|
||||||
|
|
||||||
|
let alloc_start = align_up(bump.next, layout.align());
|
||||||
|
let alloc_end = match alloc_start.checked_add(layout.size()) {
|
||||||
|
Some(end) => end,
|
||||||
|
None => return ptr::null_mut(),
|
||||||
|
};
|
||||||
|
|
||||||
|
if alloc_end > bump.heap_end {
|
||||||
|
ptr::null_mut() // out of memory
|
||||||
|
} else {
|
||||||
|
bump.next = alloc_end;
|
||||||
|
bump.allocations += 1;
|
||||||
|
alloc_start as *mut u8
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
|
||||||
|
let mut bump = self.lock(); // get a mutable reference
|
||||||
|
|
||||||
|
bump.allocations -= 1;
|
||||||
|
if bump.allocations == 0 {
|
||||||
|
bump.next = bump.heap_start;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct BumpAllocator {
|
||||||
|
heap_start: usize,
|
||||||
|
heap_end: usize,
|
||||||
|
next: usize,
|
||||||
|
allocations: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BumpAllocator {
|
||||||
|
/// Creates a new empty bump allocator.
|
||||||
|
pub const fn new() -> Self {
|
||||||
|
BumpAllocator {
|
||||||
|
heap_start: 0,
|
||||||
|
heap_end: 0,
|
||||||
|
next: 0,
|
||||||
|
allocations: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initializes the bump allocator with the given heap bounds.
|
||||||
|
///
|
||||||
|
/// This method is unsafe because the caller must ensure that the given
|
||||||
|
/// memory range is unused. Also, this method must be called only once.
|
||||||
|
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
|
||||||
|
self.heap_start = heap_start;
|
||||||
|
self.heap_end = heap_start + heap_size;
|
||||||
|
self.next = heap_start;
|
||||||
|
}
|
||||||
|
}
|
||||||
95
src/allocator/fixed_size_block.rs
Normal file
95
src/allocator/fixed_size_block.rs
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
use alloc::alloc::{Layout, GlobalAlloc};
|
||||||
|
use core::{mem, ptr::NonNull, ptr};
|
||||||
|
use super::Locked;
|
||||||
|
|
||||||
|
unsafe impl GlobalAlloc for Locked<FixedSizeBlockAllocator> {
|
||||||
|
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||||
|
let mut allocator = self.lock();
|
||||||
|
match list_index(&layout) {
|
||||||
|
Some(index) => {
|
||||||
|
match allocator.list_heads[index].take() {
|
||||||
|
Some(node) => {
|
||||||
|
allocator.list_heads[index] = node.next.take();
|
||||||
|
node as *mut ListNode as *mut u8
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
// no block exists in list => allocate new block
|
||||||
|
let block_size = BLOCK_SIZES[index];
|
||||||
|
// only works if all block sizes are a power of 2
|
||||||
|
let block_align = block_size;
|
||||||
|
let layout = Layout::from_size_align(block_size, block_align)
|
||||||
|
.unwrap();
|
||||||
|
allocator.fallback_alloc(layout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => allocator.fallback_alloc(layout),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||||
|
let mut allocator = self.lock();
|
||||||
|
match list_index(&layout) {
|
||||||
|
Some(index) => {
|
||||||
|
let new_node = ListNode {
|
||||||
|
next: allocator.list_heads[index].take(),
|
||||||
|
};
|
||||||
|
// verify that block has size and alignment required for storing node
|
||||||
|
assert!(mem::size_of::<ListNode>() <= BLOCK_SIZES[index]);
|
||||||
|
assert!(mem::align_of::<ListNode>() <= BLOCK_SIZES[index]);
|
||||||
|
let new_node_ptr = ptr as *mut ListNode;
|
||||||
|
new_node_ptr.write(new_node);
|
||||||
|
allocator.list_heads[index] = Some(&mut *new_node_ptr);
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
let ptr = NonNull::new(ptr).unwrap();
|
||||||
|
allocator.fallback_allocator.deallocate(ptr, layout);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ListNode {
|
||||||
|
next: Option<&'static mut ListNode>,
|
||||||
|
}
|
||||||
|
|
||||||
|
const BLOCK_SIZES: &[usize] = &[8, 16, 32, 64, 128, 256, 512, 1024, 2048];
|
||||||
|
|
||||||
|
pub struct FixedSizeBlockAllocator {
|
||||||
|
list_heads: [Option<&'static mut ListNode>; BLOCK_SIZES.len()],
|
||||||
|
fallback_allocator: linked_list_allocator::Heap,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FixedSizeBlockAllocator {
|
||||||
|
/// Creates an empty FixedSizeBlockAllocator.
|
||||||
|
pub const fn new() -> Self {
|
||||||
|
const EMPTY: Option<&'static mut ListNode> = None;
|
||||||
|
FixedSizeBlockAllocator {
|
||||||
|
list_heads: [EMPTY; BLOCK_SIZES.len()],
|
||||||
|
fallback_allocator: linked_list_allocator::Heap::empty(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize the allocator with the given heap bounds.
|
||||||
|
///
|
||||||
|
/// This function is unsafe because the caller must guarantee that the given
|
||||||
|
/// heap bounds are valid and that the heap is unused. This method must be
|
||||||
|
/// called only once.
|
||||||
|
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
|
||||||
|
self.fallback_allocator.init(heap_start, heap_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Allocates using the fallback allocator.
|
||||||
|
fn fallback_alloc(&mut self, layout: Layout) -> *mut u8 {
|
||||||
|
match self.fallback_allocator.allocate_first_fit(layout) {
|
||||||
|
Ok(ptr) => ptr.as_ptr(),
|
||||||
|
Err(_) => ptr::null_mut(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn list_index(layout: &Layout) -> Option<usize> {
|
||||||
|
let required_block_size = layout.size().max(layout.align());
|
||||||
|
BLOCK_SIZES.iter().position(|&s| s >= required_block_size)
|
||||||
|
}
|
||||||
|
|
||||||
152
src/allocator/linked_list.rs
Normal file
152
src/allocator/linked_list.rs
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
use super::align_up;
|
||||||
|
use core::mem;
|
||||||
|
use super::Locked;
|
||||||
|
use alloc::alloc::{GlobalAlloc, Layout};
|
||||||
|
use core::ptr;
|
||||||
|
|
||||||
|
unsafe impl GlobalAlloc for Locked<LinkedListAllocator> {
|
||||||
|
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||||
|
// perform layout adjustments
|
||||||
|
let (size, align) = LinkedListAllocator::size_align(layout);
|
||||||
|
let mut allocator = self.lock();
|
||||||
|
|
||||||
|
if let Some((region, alloc_start)) = allocator.find_region(size, align) {
|
||||||
|
let alloc_end = alloc_start.checked_add(size).expect("overflow");
|
||||||
|
let excess_size = region.end_addr() - alloc_end;
|
||||||
|
if excess_size > 0 {
|
||||||
|
allocator.add_free_region(alloc_end, excess_size);
|
||||||
|
}
|
||||||
|
alloc_start as *mut u8
|
||||||
|
} else {
|
||||||
|
ptr::null_mut()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||||
|
// perform layout adjustments
|
||||||
|
let (size, _) = LinkedListAllocator::size_align(layout);
|
||||||
|
|
||||||
|
self.lock().add_free_region(ptr as usize, size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ListNode {
|
||||||
|
size: usize,
|
||||||
|
next: Option<&'static mut ListNode>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ListNode {
|
||||||
|
const fn new(size: usize) -> Self {
|
||||||
|
ListNode { size, next: None }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_addr(&self) -> usize {
|
||||||
|
self as *const Self as usize
|
||||||
|
}
|
||||||
|
|
||||||
|
fn end_addr(&self) -> usize {
|
||||||
|
self.start_addr() + self.size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct LinkedListAllocator {
|
||||||
|
head: ListNode,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LinkedListAllocator {
|
||||||
|
/// Creates an empty LinkedListAllocator.
|
||||||
|
pub const fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
head: ListNode::new(0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize the allocator with the given heap bounds.
|
||||||
|
///
|
||||||
|
/// This function is unsafe because the caller must guarantee that the given
|
||||||
|
/// heap bounds are valid and that the heap is unused. This method must be
|
||||||
|
/// called only once.
|
||||||
|
pub unsafe fn init(&mut self, heap_start: usize, heap_size: usize) {
|
||||||
|
self.add_free_region(heap_start, heap_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds the given memory region to the front of the list.
|
||||||
|
unsafe fn add_free_region(&mut self, addr: usize, size: usize) {
|
||||||
|
// ensure that the freed region is capable of holding ListNode
|
||||||
|
assert_eq!(align_up(addr, mem::align_of::<ListNode>()), addr);
|
||||||
|
assert!(size >= mem::size_of::<ListNode>());
|
||||||
|
|
||||||
|
// create a new list node and append it at the start of the list
|
||||||
|
let mut node = ListNode::new(size);
|
||||||
|
node.next = self.head.next.take();
|
||||||
|
let node_ptr = addr as *mut ListNode;
|
||||||
|
node_ptr.write(node);
|
||||||
|
self.head.next = Some(&mut *node_ptr)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Looks for a free region with the given size and alignment and removes
|
||||||
|
/// it from the list.
|
||||||
|
///
|
||||||
|
/// Returns a tuple of the list node and the start address of the allocation.
|
||||||
|
fn find_region(&mut self, size: usize, align: usize)
|
||||||
|
-> Option<(&'static mut ListNode, usize)>
|
||||||
|
{
|
||||||
|
// reference to current list node, updated for each iteration
|
||||||
|
let mut current = &mut self.head;
|
||||||
|
// look for a large enough memory region in linked list
|
||||||
|
while let Some(ref mut region) = current.next {
|
||||||
|
if let Ok(alloc_start) = Self::alloc_from_region(®ion, size, align) {
|
||||||
|
// region suitable for allocation -> remove node from list
|
||||||
|
let next = region.next.take();
|
||||||
|
let ret = Some((current.next.take().unwrap(), alloc_start));
|
||||||
|
current.next = next;
|
||||||
|
return ret;
|
||||||
|
} else {
|
||||||
|
// region not suitable -> continue with next region
|
||||||
|
current = current.next.as_mut().unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// no suitable region found
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Try to use the given region for an allocation with given size and
|
||||||
|
/// alignment.
|
||||||
|
///
|
||||||
|
/// Returns the allocation start address on success.
|
||||||
|
fn alloc_from_region(region: &ListNode, size: usize, align: usize)
|
||||||
|
-> Result<usize, ()>
|
||||||
|
{
|
||||||
|
let alloc_start = align_up(region.start_addr(), align);
|
||||||
|
let alloc_end = alloc_start.checked_add(size).ok_or(())?;
|
||||||
|
|
||||||
|
if alloc_end > region.end_addr() {
|
||||||
|
// region too small
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let excess_size = region.end_addr() - alloc_end;
|
||||||
|
if excess_size > 0 && excess_size < mem::size_of::<ListNode>() {
|
||||||
|
// rest of region too small to hold a ListNode (required because the
|
||||||
|
// allocation splits the region in a used and a free part)
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// region suitable for allocation
|
||||||
|
Ok(alloc_start)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adjust the given layout so that the resulting allocated memory
|
||||||
|
/// region is also capable of storing a `ListNode`.
|
||||||
|
///
|
||||||
|
/// Returns the adjusted size and alignment as a (size, align) tuple.
|
||||||
|
fn size_align(layout: Layout) -> (usize, usize) {
|
||||||
|
let layout = layout
|
||||||
|
.align_to(mem::align_of::<ListNode>())
|
||||||
|
.expect("adjusting alignment failed")
|
||||||
|
.pad_to_align();
|
||||||
|
let size = layout.size().max(mem::size_of::<ListNode>());
|
||||||
|
(size, layout.align())
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -5,6 +5,7 @@
|
|||||||
#![test_runner(crate::test_runner)]
|
#![test_runner(crate::test_runner)]
|
||||||
#![reexport_test_harness_main = "test_main"]
|
#![reexport_test_harness_main = "test_main"]
|
||||||
#![feature(abi_x86_interrupt)]
|
#![feature(abi_x86_interrupt)]
|
||||||
|
#![feature(const_mut_refs)]
|
||||||
|
|
||||||
use core::panic::PanicInfo;
|
use core::panic::PanicInfo;
|
||||||
|
|
||||||
|
|||||||
72
src/memory.rs
Normal file
72
src/memory.rs
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
use x86_64::{
|
||||||
|
structures::paging::{OffsetPageTable,
|
||||||
|
Page, PhysFrame,
|
||||||
|
Mapper, Size4KiB,
|
||||||
|
PageTable, FrameAllocator},
|
||||||
|
PhysAddr,
|
||||||
|
VirtAddr
|
||||||
|
};
|
||||||
|
use bootloader::bootinfo::MemoryMap;
|
||||||
|
use bootloader::bootinfo::MemoryRegionType;
|
||||||
|
|
||||||
|
/// A FrameAllocator that returns usable frames from the bootloader's memory map.
|
||||||
|
pub struct BootInfoFrameAllocator {
|
||||||
|
memory_map: &'static MemoryMap,
|
||||||
|
next: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BootInfoFrameAllocator {
|
||||||
|
/// Create a FrameAllocator from the passed memory map.
|
||||||
|
///
|
||||||
|
/// This function is unsafe because the caller must guarantee that the passed
|
||||||
|
/// memory map is valid. The main requirement is that all frames that are marked
|
||||||
|
/// as `USABLE` in it are really unused.
|
||||||
|
pub unsafe fn init(memory_map: &'static MemoryMap) -> Self {
|
||||||
|
BootInfoFrameAllocator {
|
||||||
|
memory_map,
|
||||||
|
next: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn usable_frames(&self) -> impl Iterator<Item = PhysFrame> {
|
||||||
|
// get usable regions from memory map
|
||||||
|
let regions = self.memory_map.iter();
|
||||||
|
let usable_regions = regions
|
||||||
|
.filter(|r| r.region_type == MemoryRegionType::Usable);
|
||||||
|
// map each region to its address range
|
||||||
|
let addr_ranges = usable_regions
|
||||||
|
.map(|r| r.range.start_addr()..r.range.end_addr());
|
||||||
|
// transform to an iterator of frame start addresses
|
||||||
|
let frame_addresses = addr_ranges.flat_map(|r| r.step_by(4096));
|
||||||
|
// create `PhysFrame` types from the start addresses
|
||||||
|
frame_addresses.map(|addr| PhysFrame::containing_address(PhysAddr::new(addr)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe impl FrameAllocator<Size4KiB> for BootInfoFrameAllocator {
|
||||||
|
fn allocate_frame(&mut self) -> Option<PhysFrame> {
|
||||||
|
let frame = self.usable_frames().nth(self.next);
|
||||||
|
self.next += 1;
|
||||||
|
frame
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub unsafe fn init(physical_memory_offset: VirtAddr) -> OffsetPageTable<'static> {
|
||||||
|
let level_4_table = active_level_4_table(physical_memory_offset);
|
||||||
|
OffsetPageTable::new(level_4_table, physical_memory_offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a mutable reference to the active level 4 table.
|
||||||
|
unsafe fn active_level_4_table(physical_memory_offset: VirtAddr)
|
||||||
|
-> &'static mut PageTable
|
||||||
|
{
|
||||||
|
use x86_64::registers::control::Cr3;
|
||||||
|
|
||||||
|
let (level_4_table_frame, _) = Cr3::read();
|
||||||
|
|
||||||
|
let phys = level_4_table_frame.start_address();
|
||||||
|
let virt = physical_memory_offset + phys.as_u64();
|
||||||
|
let page_table_ptr: *mut PageTable = virt.as_mut_ptr();
|
||||||
|
|
||||||
|
&mut *page_table_ptr // unsafe
|
||||||
|
}
|
||||||
74
tests/heap_allocation.rs
Normal file
74
tests/heap_allocation.rs
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
#![no_std]
|
||||||
|
#![no_main]
|
||||||
|
#![feature(custom_test_frameworks)]
|
||||||
|
#![test_runner(rust_os::test_runner)]
|
||||||
|
#![reexport_test_harness_main = "test_main"]
|
||||||
|
|
||||||
|
extern crate alloc;
|
||||||
|
|
||||||
|
use bootloader::{entry_point, BootInfo};
|
||||||
|
use core::panic::PanicInfo;
|
||||||
|
use alloc::boxed::Box;
|
||||||
|
use alloc::vec::Vec;
|
||||||
|
use rust_os::allocator::HEAP_SIZE;
|
||||||
|
|
||||||
|
entry_point!(main);
|
||||||
|
|
||||||
|
fn main(boot_info: &'static BootInfo) -> ! {
|
||||||
|
use rust_os::allocator;
|
||||||
|
use rust_os::memory::{self, BootInfoFrameAllocator};
|
||||||
|
use x86_64::VirtAddr;
|
||||||
|
|
||||||
|
rust_os::init();
|
||||||
|
let phys_mem_offset = VirtAddr::new(boot_info.physical_memory_offset);
|
||||||
|
let mut mapper = unsafe { memory::init(phys_mem_offset) };
|
||||||
|
let mut frame_allocator = unsafe {
|
||||||
|
BootInfoFrameAllocator::init(&boot_info.memory_map)
|
||||||
|
};
|
||||||
|
allocator::init_heap(&mut mapper, &mut frame_allocator)
|
||||||
|
.expect("heap initialization failed");
|
||||||
|
|
||||||
|
test_main();
|
||||||
|
loop {}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[panic_handler]
|
||||||
|
fn panic(info: &PanicInfo) -> ! {
|
||||||
|
rust_os::test_panic_handler(info)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test_case]
|
||||||
|
fn simple_allocation() {
|
||||||
|
let heap_value_1 = Box::new(41);
|
||||||
|
let heap_value_2 = Box::new(13);
|
||||||
|
assert_eq!(*heap_value_1, 41);
|
||||||
|
assert_eq!(*heap_value_2, 13);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test_case]
|
||||||
|
fn large_vec() {
|
||||||
|
let n = 1000;
|
||||||
|
let mut vec = Vec::new();
|
||||||
|
for i in 0..n {
|
||||||
|
vec.push(i);
|
||||||
|
}
|
||||||
|
assert_eq!(vec.iter().sum::<u64>(), (n - 1) * n / 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test_case]
|
||||||
|
fn many_boxes() {
|
||||||
|
for i in 0..HEAP_SIZE {
|
||||||
|
let x = Box::new(i);
|
||||||
|
assert_eq!(*x, i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test_case]
|
||||||
|
fn many_boxes_long_lived() {
|
||||||
|
let long_lived = Box::new(1);
|
||||||
|
for i in 0..HEAP_SIZE {
|
||||||
|
let x = Box::new(i);
|
||||||
|
assert_eq!(*x, i);
|
||||||
|
}
|
||||||
|
assert_eq!(*long_lived, 1);
|
||||||
|
}
|
||||||
Loading…
Reference in New Issue
Block a user