mirror of
https://github.com/Fennix-Project/Kernel.git
synced 2025-07-11 07:19:20 +00:00
Update kernel
This commit is contained in:
@ -38,39 +38,45 @@ namespace Memory
|
||||
|
||||
/* Check if the address is valid. */
|
||||
if ((uintptr_t)Address < HeapStart)
|
||||
{
|
||||
debug("Address %#lx is less than HeapStart %#lx", Address, HeapStart);
|
||||
return (void *)-ENOMEM;
|
||||
}
|
||||
|
||||
Virtual vmm = Virtual(this->Table);
|
||||
Virtual vmm(this->Table);
|
||||
|
||||
if ((uintptr_t)Address > Break)
|
||||
{
|
||||
/* Allocate more memory. */
|
||||
size_t Pages = TO_PAGES(uintptr_t(Address) - Break);
|
||||
ssize_t Pages = TO_PAGES(uintptr_t(Address) - Break);
|
||||
void *Allocated = vma->RequestPages(Pages);
|
||||
if (Allocated == nullptr)
|
||||
return (void *)-ENOMEM;
|
||||
|
||||
/* Map the allocated pages. */
|
||||
for (size_t i = 0; i < Pages; i++)
|
||||
for (ssize_t i = 0; i < Pages; i++)
|
||||
{
|
||||
void *VirtAddr = (void *)(Break + (i * PAGE_SIZE));
|
||||
void *PhysAddr = (void *)(uintptr_t(Allocated) + (i * PAGE_SIZE));
|
||||
debug("Mapping %#lx to %#lx", VirtAddr, PhysAddr);
|
||||
vmm.Map(VirtAddr, PhysAddr, RW | US);
|
||||
}
|
||||
|
||||
Break = (uint64_t)Address;
|
||||
return (void *)Break;
|
||||
Break = ROUND_UP(uintptr_t(Address), PAGE_SIZE);
|
||||
debug("Round up %#lx to %#lx", Address, Break);
|
||||
return Address;
|
||||
}
|
||||
|
||||
/* Free memory. */
|
||||
size_t Pages = TO_PAGES(uintptr_t(Address) - Break);
|
||||
ssize_t Pages = TO_PAGES(Break - uintptr_t(Address));
|
||||
vma->FreePages((void *)Break, Pages);
|
||||
|
||||
/* Unmap the freed pages. */
|
||||
for (size_t i = 0; i < Pages; i++)
|
||||
for (ssize_t i = 0; i < Pages; i++)
|
||||
{
|
||||
uint64_t Page = Break - (i * 0x1000);
|
||||
vmm.Remap((void *)Page, (void *)Page, PTFlag::P | PTFlag::RW);
|
||||
vmm.Remap((void *)Page, (void *)Page, RW);
|
||||
debug("Unmapping %#lx", Page);
|
||||
}
|
||||
|
||||
Break = (uint64_t)Address;
|
||||
@ -82,12 +88,16 @@ namespace Memory
|
||||
assert(Table != nullptr);
|
||||
assert(vma != nullptr);
|
||||
|
||||
debug("+ %#lx", this);
|
||||
|
||||
this->Table = Table;
|
||||
this->vma = vma;
|
||||
}
|
||||
|
||||
ProgramBreak::~ProgramBreak()
|
||||
{
|
||||
debug("- %#lx", this);
|
||||
|
||||
/* Do nothing because VirtualMemoryArea
|
||||
will be destroyed later. */
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
#include "liballoc_1_1.h"
|
||||
#include <convert.h>
|
||||
|
||||
#pragma GCC diagnostic ignored "-Wconversion"
|
||||
#pragma GCC diagnostic ignored "-Wsign-conversion"
|
||||
@ -104,6 +105,7 @@ static long long l_possibleOverruns = 0; ///< Number of possible overruns
|
||||
|
||||
__no_sanitize("undefined") static void *liballoc_memset(void *s, int c, size_t n)
|
||||
{
|
||||
return memset(s, c, n);
|
||||
unsigned int i;
|
||||
for (i = 0; i < n; i++)
|
||||
((char *)s)[i] = c;
|
||||
@ -112,6 +114,7 @@ __no_sanitize("undefined") static void *liballoc_memset(void *s, int c, size_t n
|
||||
}
|
||||
__no_sanitize("undefined") static void *liballoc_memcpy(void *s1, const void *s2, size_t n)
|
||||
{
|
||||
return memcpy(s1, s2, n);
|
||||
char *cdest;
|
||||
char *csrc;
|
||||
unsigned int *ldest = (unsigned int *)s1;
|
||||
|
@ -24,46 +24,46 @@ extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
/** This function is supposed to lock the memory data structures. It
|
||||
* could be as simple as disabling interrupts or acquiring a spinlock.
|
||||
* It's up to you to decide.
|
||||
*
|
||||
* \return 0 if the lock was acquired successfully. Anything else is
|
||||
* failure.
|
||||
*/
|
||||
extern int liballoc_lock();
|
||||
/** This function is supposed to lock the memory data structures. It
|
||||
* could be as simple as disabling interrupts or acquiring a spinlock.
|
||||
* It's up to you to decide.
|
||||
*
|
||||
* \return 0 if the lock was acquired successfully. Anything else is
|
||||
* failure.
|
||||
*/
|
||||
extern int liballoc_lock();
|
||||
|
||||
/** This function unlocks what was previously locked by the liballoc_lock
|
||||
* function. If it disabled interrupts, it enables interrupts. If it
|
||||
* had acquiried a spinlock, it releases the spinlock. etc.
|
||||
*
|
||||
* \return 0 if the lock was successfully released.
|
||||
*/
|
||||
extern int liballoc_unlock();
|
||||
/** This function unlocks what was previously locked by the liballoc_lock
|
||||
* function. If it disabled interrupts, it enables interrupts. If it
|
||||
* had acquiried a spinlock, it releases the spinlock. etc.
|
||||
*
|
||||
* \return 0 if the lock was successfully released.
|
||||
*/
|
||||
extern int liballoc_unlock();
|
||||
|
||||
/** This is the hook into the local system which allocates pages. It
|
||||
* accepts an integer parameter which is the number of pages
|
||||
* required. The page size was set up in the liballoc_init function.
|
||||
*
|
||||
* \return NULL if the pages were not allocated.
|
||||
* \return A pointer to the allocated memory.
|
||||
*/
|
||||
extern void *liballoc_alloc(size_t);
|
||||
/** This is the hook into the local system which allocates pages. It
|
||||
* accepts an integer parameter which is the number of pages
|
||||
* required. The page size was set up in the liballoc_init function.
|
||||
*
|
||||
* \return NULL if the pages were not allocated.
|
||||
* \return A pointer to the allocated memory.
|
||||
*/
|
||||
extern void *liballoc_alloc(size_t);
|
||||
|
||||
/** This frees previously allocated memory. The void* parameter passed
|
||||
* to the function is the exact same value returned from a previous
|
||||
* liballoc_alloc call.
|
||||
*
|
||||
* The integer value is the number of pages to free.
|
||||
*
|
||||
* \return 0 if the memory was successfully freed.
|
||||
*/
|
||||
extern int liballoc_free(void *, size_t);
|
||||
/** This frees previously allocated memory. The void* parameter passed
|
||||
* to the function is the exact same value returned from a previous
|
||||
* liballoc_alloc call.
|
||||
*
|
||||
* The integer value is the number of pages to free.
|
||||
*
|
||||
* \return 0 if the memory was successfully freed.
|
||||
*/
|
||||
extern int liballoc_free(void *, size_t);
|
||||
|
||||
extern void *PREFIX(malloc)(size_t); ///< The standard function.
|
||||
extern void *PREFIX(realloc)(void *, size_t); ///< The standard function.
|
||||
extern void *PREFIX(calloc)(size_t, size_t); ///< The standard function.
|
||||
extern void PREFIX(free)(void *); ///< The standard function.
|
||||
extern void *PREFIX(malloc)(size_t); ///< The standard function.
|
||||
extern void *PREFIX(realloc)(void *, size_t); ///< The standard function.
|
||||
extern void *PREFIX(calloc)(size_t, size_t); ///< The standard function.
|
||||
extern void PREFIX(free)(void *); ///< The standard function.
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
@ -16,11 +16,14 @@ EXTERNC int liballoc_unlock()
|
||||
|
||||
EXTERNC void *liballoc_alloc(size_t Pages)
|
||||
{
|
||||
return KernelAllocator.RequestPages(Pages);
|
||||
void *ret = KernelAllocator.RequestPages(Pages);
|
||||
debug("(%d) = %#lx", Pages, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
EXTERNC int liballoc_free(void *Address, size_t Pages)
|
||||
{
|
||||
debug("(%#lx, %d)", Address, Pages);
|
||||
KernelAllocator.FreePages(Address, Pages);
|
||||
return 0;
|
||||
}
|
||||
|
3597
core/memory/heap_allocators/rpmalloc/rpmalloc.c
Normal file
3597
core/memory/heap_allocators/rpmalloc/rpmalloc.c
Normal file
File diff suppressed because it is too large
Load Diff
371
core/memory/heap_allocators/rpmalloc/rpmalloc.h
Normal file
371
core/memory/heap_allocators/rpmalloc/rpmalloc.h
Normal file
@ -0,0 +1,371 @@
|
||||
/* rpmalloc.h - Memory allocator - Public Domain - 2016 Mattias Jansson
|
||||
*
|
||||
* This library provides a cross-platform lock free thread caching malloc implementation in C11.
|
||||
* The latest source code is always available at
|
||||
*
|
||||
* https://github.com/mjansson/rpmalloc
|
||||
*
|
||||
* This library is put in the public domain; you can redistribute it and/or modify it without any restrictions.
|
||||
*
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
#if defined(__clang__) || defined(__GNUC__)
|
||||
#define RPMALLOC_EXPORT __attribute__((visibility("default")))
|
||||
#define RPMALLOC_ALLOCATOR
|
||||
#if (defined(__clang_major__) && (__clang_major__ < 4)) || (defined(__GNUC__) && defined(ENABLE_PRELOAD) && ENABLE_PRELOAD)
|
||||
#define RPMALLOC_ATTRIB_MALLOC
|
||||
#define RPMALLOC_ATTRIB_ALLOC_SIZE(size)
|
||||
#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size)
|
||||
#else
|
||||
#define RPMALLOC_ATTRIB_MALLOC __attribute__((__malloc__))
|
||||
#define RPMALLOC_ATTRIB_ALLOC_SIZE(size) __attribute__((alloc_size(size)))
|
||||
#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size) __attribute__((alloc_size(count, size)))
|
||||
#endif
|
||||
#define RPMALLOC_CDECL
|
||||
#elif defined(_MSC_VER)
|
||||
#define RPMALLOC_EXPORT
|
||||
#define RPMALLOC_ALLOCATOR __declspec(allocator) __declspec(restrict)
|
||||
#define RPMALLOC_ATTRIB_MALLOC
|
||||
#define RPMALLOC_ATTRIB_ALLOC_SIZE(size)
|
||||
#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size)
|
||||
#define RPMALLOC_CDECL __cdecl
|
||||
#else
|
||||
#define RPMALLOC_EXPORT
|
||||
#define RPMALLOC_ALLOCATOR
|
||||
#define RPMALLOC_ATTRIB_MALLOC
|
||||
#define RPMALLOC_ATTRIB_ALLOC_SIZE(size)
|
||||
#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size)
|
||||
#define RPMALLOC_CDECL
|
||||
#endif
|
||||
|
||||
//! Define RPMALLOC_CONFIGURABLE to enable configuring sizes. Will introduce
|
||||
// a very small overhead due to some size calculations not being compile time constants
|
||||
#ifndef RPMALLOC_CONFIGURABLE
|
||||
#define RPMALLOC_CONFIGURABLE 1
|
||||
#endif
|
||||
|
||||
//! Define RPMALLOC_FIRST_CLASS_HEAPS to enable heap based API (rpmalloc_heap_* functions).
|
||||
// Will introduce a very small overhead to track fully allocated spans in heaps
|
||||
#ifndef RPMALLOC_FIRST_CLASS_HEAPS
|
||||
#define RPMALLOC_FIRST_CLASS_HEAPS 0
|
||||
#endif
|
||||
|
||||
//! Flag to rpaligned_realloc to not preserve content in reallocation
|
||||
#define RPMALLOC_NO_PRESERVE 1
|
||||
//! Flag to rpaligned_realloc to fail and return null pointer if grow cannot be done in-place,
|
||||
// in which case the original pointer is still valid (just like a call to realloc which failes to allocate
|
||||
// a new block).
|
||||
#define RPMALLOC_GROW_OR_FAIL 2
|
||||
|
||||
typedef struct rpmalloc_global_statistics_t
|
||||
{
|
||||
//! Current amount of virtual memory mapped, all of which might not have been committed (only if ENABLE_STATISTICS=1)
|
||||
size_t mapped;
|
||||
//! Peak amount of virtual memory mapped, all of which might not have been committed (only if ENABLE_STATISTICS=1)
|
||||
size_t mapped_peak;
|
||||
//! Current amount of memory in global caches for small and medium sizes (<32KiB)
|
||||
size_t cached;
|
||||
//! Current amount of memory allocated in huge allocations, i.e larger than LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1)
|
||||
size_t huge_alloc;
|
||||
//! Peak amount of memory allocated in huge allocations, i.e larger than LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1)
|
||||
size_t huge_alloc_peak;
|
||||
//! Total amount of memory mapped since initialization (only if ENABLE_STATISTICS=1)
|
||||
size_t mapped_total;
|
||||
//! Total amount of memory unmapped since initialization (only if ENABLE_STATISTICS=1)
|
||||
size_t unmapped_total;
|
||||
} rpmalloc_global_statistics_t;
|
||||
|
||||
typedef struct rpmalloc_thread_statistics_t
|
||||
{
|
||||
//! Current number of bytes available in thread size class caches for small and medium sizes (<32KiB)
|
||||
size_t sizecache;
|
||||
//! Current number of bytes available in thread span caches for small and medium sizes (<32KiB)
|
||||
size_t spancache;
|
||||
//! Total number of bytes transitioned from thread cache to global cache (only if ENABLE_STATISTICS=1)
|
||||
size_t thread_to_global;
|
||||
//! Total number of bytes transitioned from global cache to thread cache (only if ENABLE_STATISTICS=1)
|
||||
size_t global_to_thread;
|
||||
//! Per span count statistics (only if ENABLE_STATISTICS=1)
|
||||
struct
|
||||
{
|
||||
//! Currently used number of spans
|
||||
size_t current;
|
||||
//! High water mark of spans used
|
||||
size_t peak;
|
||||
//! Number of spans transitioned to global cache
|
||||
size_t to_global;
|
||||
//! Number of spans transitioned from global cache
|
||||
size_t from_global;
|
||||
//! Number of spans transitioned to thread cache
|
||||
size_t to_cache;
|
||||
//! Number of spans transitioned from thread cache
|
||||
size_t from_cache;
|
||||
//! Number of spans transitioned to reserved state
|
||||
size_t to_reserved;
|
||||
//! Number of spans transitioned from reserved state
|
||||
size_t from_reserved;
|
||||
//! Number of raw memory map calls (not hitting the reserve spans but resulting in actual OS mmap calls)
|
||||
size_t map_calls;
|
||||
} span_use[64];
|
||||
//! Per size class statistics (only if ENABLE_STATISTICS=1)
|
||||
struct
|
||||
{
|
||||
//! Current number of allocations
|
||||
size_t alloc_current;
|
||||
//! Peak number of allocations
|
||||
size_t alloc_peak;
|
||||
//! Total number of allocations
|
||||
size_t alloc_total;
|
||||
//! Total number of frees
|
||||
size_t free_total;
|
||||
//! Number of spans transitioned to cache
|
||||
size_t spans_to_cache;
|
||||
//! Number of spans transitioned from cache
|
||||
size_t spans_from_cache;
|
||||
//! Number of spans transitioned from reserved state
|
||||
size_t spans_from_reserved;
|
||||
//! Number of raw memory map calls (not hitting the reserve spans but resulting in actual OS mmap calls)
|
||||
size_t map_calls;
|
||||
} size_use[128];
|
||||
} rpmalloc_thread_statistics_t;
|
||||
|
||||
typedef struct rpmalloc_config_t
|
||||
{
|
||||
//! Map memory pages for the given number of bytes. The returned address MUST be
|
||||
// aligned to the rpmalloc span size, which will always be a power of two.
|
||||
// Optionally the function can store an alignment offset in the offset variable
|
||||
// in case it performs alignment and the returned pointer is offset from the
|
||||
// actual start of the memory region due to this alignment. The alignment offset
|
||||
// will be passed to the memory unmap function. The alignment offset MUST NOT be
|
||||
// larger than 65535 (storable in an uint16_t), if it is you must use natural
|
||||
// alignment to shift it into 16 bits. If you set a memory_map function, you
|
||||
// must also set a memory_unmap function or else the default implementation will
|
||||
// be used for both. This function must be thread safe, it can be called by
|
||||
// multiple threads simultaneously.
|
||||
void *(*memory_map)(size_t size, size_t *offset);
|
||||
//! Unmap the memory pages starting at address and spanning the given number of bytes.
|
||||
// If release is set to non-zero, the unmap is for an entire span range as returned by
|
||||
// a previous call to memory_map and that the entire range should be released. The
|
||||
// release argument holds the size of the entire span range. If release is set to 0,
|
||||
// the unmap is a partial decommit of a subset of the mapped memory range.
|
||||
// If you set a memory_unmap function, you must also set a memory_map function or
|
||||
// else the default implementation will be used for both. This function must be thread
|
||||
// safe, it can be called by multiple threads simultaneously.
|
||||
void (*memory_unmap)(void *address, size_t size, size_t offset, size_t release);
|
||||
//! Called when an assert fails, if asserts are enabled. Will use the standard assert()
|
||||
// if this is not set.
|
||||
void (*error_callback)(const char *message);
|
||||
//! Called when a call to map memory pages fails (out of memory). If this callback is
|
||||
// not set or returns zero the library will return a null pointer in the allocation
|
||||
// call. If this callback returns non-zero the map call will be retried. The argument
|
||||
// passed is the number of bytes that was requested in the map call. Only used if
|
||||
// the default system memory map function is used (memory_map callback is not set).
|
||||
int (*map_fail_callback)(size_t size);
|
||||
//! Size of memory pages. The page size MUST be a power of two. All memory mapping
|
||||
// requests to memory_map will be made with size set to a multiple of the page size.
|
||||
// Used if RPMALLOC_CONFIGURABLE is defined to 1, otherwise system page size is used.
|
||||
size_t page_size;
|
||||
//! Size of a span of memory blocks. MUST be a power of two, and in [4096,262144]
|
||||
// range (unless 0 - set to 0 to use the default span size). Used if RPMALLOC_CONFIGURABLE
|
||||
// is defined to 1.
|
||||
size_t span_size;
|
||||
//! Number of spans to map at each request to map new virtual memory blocks. This can
|
||||
// be used to minimize the system call overhead at the cost of virtual memory address
|
||||
// space. The extra mapped pages will not be written until actually used, so physical
|
||||
// committed memory should not be affected in the default implementation. Will be
|
||||
// aligned to a multiple of spans that match memory page size in case of huge pages.
|
||||
size_t span_map_count;
|
||||
//! Enable use of large/huge pages. If this flag is set to non-zero and page size is
|
||||
// zero, the allocator will try to enable huge pages and auto detect the configuration.
|
||||
// If this is set to non-zero and page_size is also non-zero, the allocator will
|
||||
// assume huge pages have been configured and enabled prior to initializing the
|
||||
// allocator.
|
||||
// For Windows, see https://docs.microsoft.com/en-us/windows/desktop/memory/large-page-support
|
||||
// For Linux, see https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt
|
||||
int enable_huge_pages;
|
||||
//! Respectively allocated pages and huge allocated pages names for systems
|
||||
// supporting it to be able to distinguish among anonymous regions.
|
||||
const char *page_name;
|
||||
const char *huge_page_name;
|
||||
} rpmalloc_config_t;
|
||||
|
||||
//! Initialize allocator with default configuration
|
||||
RPMALLOC_EXPORT int
|
||||
rpmalloc_initialize(void);
|
||||
|
||||
//! Initialize allocator with given configuration
|
||||
RPMALLOC_EXPORT int
|
||||
rpmalloc_initialize_config(const rpmalloc_config_t *config);
|
||||
|
||||
//! Get allocator configuration
|
||||
RPMALLOC_EXPORT const rpmalloc_config_t *
|
||||
rpmalloc_config(void);
|
||||
|
||||
//! Finalize allocator
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_finalize(void);
|
||||
|
||||
//! Initialize allocator for calling thread
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_thread_initialize(void);
|
||||
|
||||
//! Finalize allocator for calling thread
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_thread_finalize(int release_caches);
|
||||
|
||||
//! Perform deferred deallocations pending for the calling thread heap
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_thread_collect(void);
|
||||
|
||||
//! Query if allocator is initialized for calling thread
|
||||
RPMALLOC_EXPORT int
|
||||
rpmalloc_is_thread_initialized(void);
|
||||
|
||||
//! Get per-thread statistics
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_thread_statistics(rpmalloc_thread_statistics_t *stats);
|
||||
|
||||
//! Get global statistics
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_global_statistics(rpmalloc_global_statistics_t *stats);
|
||||
|
||||
//! Dump all statistics in human readable format to file (should be a FILE*)
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_dump_statistics(void *file);
|
||||
|
||||
//! Allocate a memory block of at least the given size
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
|
||||
rpmalloc(size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(1);
|
||||
|
||||
//! Free the given memory block
|
||||
RPMALLOC_EXPORT void
|
||||
rpfree(void *ptr);
|
||||
|
||||
//! Allocate a memory block of at least the given size and zero initialize it
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
|
||||
rpcalloc(size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE2(1, 2);
|
||||
|
||||
//! Reallocate the given block to at least the given size
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
|
||||
rprealloc(void *ptr, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
|
||||
|
||||
//! Reallocate the given block to at least the given size and alignment,
|
||||
// with optional control flags (see RPMALLOC_NO_PRESERVE).
|
||||
// Alignment must be a power of two and a multiple of sizeof(void*),
|
||||
// and should ideally be less than memory page size. A caveat of rpmalloc
|
||||
// internals is that this must also be strictly less than the span size (default 64KiB)
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
|
||||
rpaligned_realloc(void *ptr, size_t alignment, size_t size, size_t oldsize, unsigned int flags) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(3);
|
||||
|
||||
//! Allocate a memory block of at least the given size and alignment.
|
||||
// Alignment must be a power of two and a multiple of sizeof(void*),
|
||||
// and should ideally be less than memory page size. A caveat of rpmalloc
|
||||
// internals is that this must also be strictly less than the span size (default 64KiB)
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
|
||||
rpaligned_alloc(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
|
||||
|
||||
//! Allocate a memory block of at least the given size and alignment, and zero initialize it.
|
||||
// Alignment must be a power of two and a multiple of sizeof(void*),
|
||||
// and should ideally be less than memory page size. A caveat of rpmalloc
|
||||
// internals is that this must also be strictly less than the span size (default 64KiB)
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
|
||||
rpaligned_calloc(size_t alignment, size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3);
|
||||
|
||||
//! Allocate a memory block of at least the given size and alignment.
|
||||
// Alignment must be a power of two and a multiple of sizeof(void*),
|
||||
// and should ideally be less than memory page size. A caveat of rpmalloc
|
||||
// internals is that this must also be strictly less than the span size (default 64KiB)
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
|
||||
rpmemalign(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
|
||||
|
||||
//! Allocate a memory block of at least the given size and alignment.
|
||||
// Alignment must be a power of two and a multiple of sizeof(void*),
|
||||
// and should ideally be less than memory page size. A caveat of rpmalloc
|
||||
// internals is that this must also be strictly less than the span size (default 64KiB)
|
||||
RPMALLOC_EXPORT int
|
||||
rpposix_memalign(void **memptr, size_t alignment, size_t size);
|
||||
|
||||
//! Query the usable size of the given memory block (from given pointer to the end of block)
|
||||
RPMALLOC_EXPORT size_t
|
||||
rpmalloc_usable_size(void *ptr);
|
||||
|
||||
#if RPMALLOC_FIRST_CLASS_HEAPS
|
||||
|
||||
//! Heap type
|
||||
typedef struct heap_t rpmalloc_heap_t;
|
||||
|
||||
//! Acquire a new heap. Will reuse existing released heaps or allocate memory for a new heap
|
||||
// if none available. Heap API is implemented with the strict assumption that only one single
|
||||
// thread will call heap functions for a given heap at any given time, no functions are thread safe.
|
||||
RPMALLOC_EXPORT rpmalloc_heap_t *
|
||||
rpmalloc_heap_acquire(void);
|
||||
|
||||
//! Release a heap (does NOT free the memory allocated by the heap, use rpmalloc_heap_free_all before destroying the heap).
|
||||
// Releasing a heap will enable it to be reused by other threads. Safe to pass a null pointer.
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_heap_release(rpmalloc_heap_t *heap);
|
||||
|
||||
//! Allocate a memory block of at least the given size using the given heap.
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
|
||||
rpmalloc_heap_alloc(rpmalloc_heap_t *heap, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
|
||||
|
||||
//! Allocate a memory block of at least the given size using the given heap. The returned
|
||||
// block will have the requested alignment. Alignment must be a power of two and a multiple of sizeof(void*),
|
||||
// and should ideally be less than memory page size. A caveat of rpmalloc
|
||||
// internals is that this must also be strictly less than the span size (default 64KiB).
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
|
||||
rpmalloc_heap_aligned_alloc(rpmalloc_heap_t *heap, size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(3);
|
||||
|
||||
//! Allocate a memory block of at least the given size using the given heap and zero initialize it.
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
|
||||
rpmalloc_heap_calloc(rpmalloc_heap_t *heap, size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3);
|
||||
|
||||
//! Allocate a memory block of at least the given size using the given heap and zero initialize it. The returned
|
||||
// block will have the requested alignment. Alignment must either be zero, or a power of two and a multiple of sizeof(void*),
|
||||
// and should ideally be less than memory page size. A caveat of rpmalloc
|
||||
// internals is that this must also be strictly less than the span size (default 64KiB).
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
|
||||
rpmalloc_heap_aligned_calloc(rpmalloc_heap_t *heap, size_t alignment, size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3);
|
||||
|
||||
//! Reallocate the given block to at least the given size. The memory block MUST be allocated
|
||||
// by the same heap given to this function.
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
|
||||
rpmalloc_heap_realloc(rpmalloc_heap_t *heap, void *ptr, size_t size, unsigned int flags) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(3);
|
||||
|
||||
//! Reallocate the given block to at least the given size. The memory block MUST be allocated
|
||||
// by the same heap given to this function. The returned block will have the requested alignment.
|
||||
// Alignment must be either zero, or a power of two and a multiple of sizeof(void*), and should ideally be
|
||||
// less than memory page size. A caveat of rpmalloc internals is that this must also be strictly less than
|
||||
// the span size (default 64KiB).
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
|
||||
rpmalloc_heap_aligned_realloc(rpmalloc_heap_t *heap, void *ptr, size_t alignment, size_t size, unsigned int flags) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(4);
|
||||
|
||||
//! Free the given memory block from the given heap. The memory block MUST be allocated
|
||||
// by the same heap given to this function.
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_heap_free(rpmalloc_heap_t *heap, void *ptr);
|
||||
|
||||
//! Free all memory allocated by the heap
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_heap_free_all(rpmalloc_heap_t *heap);
|
||||
|
||||
//! Set the given heap as the current heap for the calling thread. A heap MUST only be current heap
|
||||
// for a single thread, a heap can never be shared between multiple threads. The previous
|
||||
// current heap for the calling thread is released to be reused by other threads.
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_heap_thread_set_current(rpmalloc_heap_t *heap);
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
91
core/memory/heap_allocators/rpmalloc/rpmalloc_compat.cpp
Normal file
91
core/memory/heap_allocators/rpmalloc/rpmalloc_compat.cpp
Normal file
@ -0,0 +1,91 @@
|
||||
/*
|
||||
This file is part of Fennix Kernel.
|
||||
|
||||
Fennix Kernel is free software: you can redistribute it and/or
|
||||
modify it under the terms of the GNU General Public License as
|
||||
published by the Free Software Foundation, either version 3 of
|
||||
the License, or (at your option) any later version.
|
||||
|
||||
Fennix Kernel is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <memory/macro.hpp>
|
||||
#include <sys/mman.h>
|
||||
#include <memory.hpp>
|
||||
#include <assert.h>
|
||||
#include <unistd.h>
|
||||
|
||||
// #include "rpmalloc.c"
|
||||
#include "../../../../kernel.h"
|
||||
|
||||
struct heap_t
|
||||
{
|
||||
char pad[56408];
|
||||
};
|
||||
|
||||
static heap_t *__rpmalloc_tls_heap = nullptr;
|
||||
EXTERNC heap_t **__memory_thread_heap(void)
|
||||
{
|
||||
if (unlikely(!TaskManager || !thisThread))
|
||||
{
|
||||
if (unlikely(!__rpmalloc_tls_heap))
|
||||
{
|
||||
__rpmalloc_tls_heap = (heap_t *)KernelAllocator.RequestPages(TO_PAGES(sizeof(heap_t)));
|
||||
debug("rpmalloc TLS heap: %#lx", __rpmalloc_tls_heap);
|
||||
memset(__rpmalloc_tls_heap, 0, sizeof(heap_t));
|
||||
assert(__rpmalloc_tls_heap);
|
||||
}
|
||||
return &__rpmalloc_tls_heap;
|
||||
}
|
||||
return &__rpmalloc_tls_heap;
|
||||
heap_t *heap = (heap_t *)thisThread->TLS.pBase;
|
||||
return (heap_t **)heap;
|
||||
}
|
||||
|
||||
EXTERNC uintptr_t __get_tid(void)
|
||||
{
|
||||
if (unlikely(!TaskManager || !thisThread))
|
||||
return (uintptr_t)-1;
|
||||
return thisThread->ID;
|
||||
}
|
||||
|
||||
EXTERNC long __rpmalloc_sysconf(int name)
|
||||
{
|
||||
switch (name)
|
||||
{
|
||||
case _SC_PAGESIZE:
|
||||
return PAGE_SIZE;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
EXTERNC void *__rpmalloc_mmap(void *addr, size_t length, int, int, int fd, off_t offset)
|
||||
{
|
||||
assert(addr == 0 && fd == -1 && offset == 0);
|
||||
|
||||
void *ptr = KernelAllocator.RequestPages(TO_PAGES(length));
|
||||
debug("Requested %d pages, got %p", TO_PAGES(length), ptr);
|
||||
if (ptr == nullptr)
|
||||
return MAP_FAILED;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
EXTERNC int __rpmalloc_munmap(void *addr, size_t length)
|
||||
{
|
||||
KernelAllocator.FreePages(addr, TO_PAGES(length));
|
||||
debug("Freed %d pages at %p", TO_PAGES(length), addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXTERNC int __rpmalloc_posix_madvise(void *addr, size_t length, int advice)
|
||||
{
|
||||
function("%#lx %d %d", addr, length, advice);
|
||||
return 0;
|
||||
}
|
@ -26,6 +26,7 @@
|
||||
|
||||
#include "heap_allocators/Xalloc/Xalloc.hpp"
|
||||
#include "heap_allocators/liballoc_1_1/liballoc_1_1.h"
|
||||
#include "heap_allocators/rpmalloc/rpmalloc.h"
|
||||
#include "../../kernel.h"
|
||||
|
||||
// #define DEBUG_ALLOCATIONS 1
|
||||
@ -72,7 +73,7 @@ NIF void tracepagetable(PageTable *pt)
|
||||
NIF void MapFromZero(PageTable *PT)
|
||||
{
|
||||
debug("Mapping from 0x0 to %#llx", bInfo.Memory.Size);
|
||||
Virtual va = Virtual(PT);
|
||||
Virtual vmm = Virtual(PT);
|
||||
size_t MemSize = bInfo.Memory.Size;
|
||||
|
||||
if (Page1GBSupport && PSESupport)
|
||||
@ -80,29 +81,29 @@ NIF void MapFromZero(PageTable *PT)
|
||||
/* Map the first 100MB of memory as 4KB pages */
|
||||
|
||||
// uintptr_t Physical4KBSectionStart = 0x10000000;
|
||||
// va.Map((void *)0,
|
||||
// vmm.Map((void *)0,
|
||||
// (void *)0,
|
||||
// Physical4KBSectionStart,
|
||||
// PTFlag::RW);
|
||||
// RW);
|
||||
|
||||
// va.Map((void *)Physical4KBSectionStart,
|
||||
// vmm.Map((void *)Physical4KBSectionStart,
|
||||
// (void *)Physical4KBSectionStart,
|
||||
// MemSize - Physical4KBSectionStart,
|
||||
// PTFlag::RW,
|
||||
// RW,
|
||||
// Virtual::MapType::OneGiB);
|
||||
|
||||
va.Map((void *)0, (void *)0, MemSize, PTFlag::RW);
|
||||
vmm.Map((void *)0, (void *)0, MemSize, RW);
|
||||
}
|
||||
else
|
||||
va.Map((void *)0, (void *)0, MemSize, PTFlag::RW);
|
||||
vmm.Map((void *)0, (void *)0, MemSize, RW);
|
||||
|
||||
va.Unmap((void *)0);
|
||||
vmm.Unmap((void *)0);
|
||||
}
|
||||
|
||||
NIF void MapFramebuffer(PageTable *PT, bool PSE, bool OneGB)
|
||||
NIF void MapFramebuffer(PageTable *PT)
|
||||
{
|
||||
debug("Mapping Framebuffer");
|
||||
Virtual va = Virtual(PT);
|
||||
Virtual vmm = Virtual(PT);
|
||||
int itrfb = 0;
|
||||
while (1)
|
||||
{
|
||||
@ -111,17 +112,17 @@ NIF void MapFramebuffer(PageTable *PT, bool PSE, bool OneGB)
|
||||
|
||||
size_t fbSize = bInfo.Framebuffer[itrfb].Pitch * bInfo.Framebuffer[itrfb].Height;
|
||||
|
||||
if (PSE && OneGB)
|
||||
if (PSESupport && Page1GBSupport)
|
||||
{
|
||||
va.OptimizedMap(bInfo.Framebuffer[itrfb].BaseAddress,
|
||||
bInfo.Framebuffer[itrfb].BaseAddress,
|
||||
fbSize, PTFlag::RW | PTFlag::US | PTFlag::G);
|
||||
vmm.OptimizedMap(bInfo.Framebuffer[itrfb].BaseAddress,
|
||||
bInfo.Framebuffer[itrfb].BaseAddress,
|
||||
fbSize, RW | US | G);
|
||||
}
|
||||
else
|
||||
{
|
||||
va.Map(bInfo.Framebuffer[itrfb].BaseAddress,
|
||||
bInfo.Framebuffer[itrfb].BaseAddress,
|
||||
fbSize, PTFlag::RW | PTFlag::US | PTFlag::G);
|
||||
vmm.Map(bInfo.Framebuffer[itrfb].BaseAddress,
|
||||
bInfo.Framebuffer[itrfb].BaseAddress,
|
||||
fbSize, RW | US | G);
|
||||
}
|
||||
itrfb++;
|
||||
}
|
||||
@ -176,14 +177,14 @@ NIF void MapKernel(PageTable *PT)
|
||||
uintptr_t BaseKernelMapAddress = (uintptr_t)bInfo.Kernel.PhysicalBase;
|
||||
debug("Base kernel map address: %#lx", BaseKernelMapAddress);
|
||||
uintptr_t k;
|
||||
Virtual va = Virtual(PT);
|
||||
Virtual vmm = Virtual(PT);
|
||||
|
||||
/* Bootstrap section */
|
||||
if (BaseKernelMapAddress == BootstrapStart)
|
||||
{
|
||||
for (k = BootstrapStart; k < BootstrapEnd; k += PAGE_SIZE)
|
||||
{
|
||||
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
|
||||
vmm.Map((void *)k, (void *)BaseKernelMapAddress, RW | G);
|
||||
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
|
||||
BaseKernelMapAddress += PAGE_SIZE;
|
||||
}
|
||||
@ -197,7 +198,7 @@ NIF void MapKernel(PageTable *PT)
|
||||
/* Text section */
|
||||
for (k = KernelTextStart; k < KernelTextEnd; k += PAGE_SIZE)
|
||||
{
|
||||
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
|
||||
vmm.Map((void *)k, (void *)BaseKernelMapAddress, RW | G);
|
||||
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
|
||||
BaseKernelMapAddress += PAGE_SIZE;
|
||||
}
|
||||
@ -205,7 +206,7 @@ NIF void MapKernel(PageTable *PT)
|
||||
/* Data section */
|
||||
for (k = KernelDataStart; k < KernelDataEnd; k += PAGE_SIZE)
|
||||
{
|
||||
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
|
||||
vmm.Map((void *)k, (void *)BaseKernelMapAddress, RW | G);
|
||||
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
|
||||
BaseKernelMapAddress += PAGE_SIZE;
|
||||
}
|
||||
@ -213,7 +214,7 @@ NIF void MapKernel(PageTable *PT)
|
||||
/* Read only data section */
|
||||
for (k = KernelRoDataStart; k < KernelRoDataEnd; k += PAGE_SIZE)
|
||||
{
|
||||
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::G);
|
||||
vmm.Map((void *)k, (void *)BaseKernelMapAddress, G);
|
||||
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
|
||||
BaseKernelMapAddress += PAGE_SIZE;
|
||||
}
|
||||
@ -221,7 +222,7 @@ NIF void MapKernel(PageTable *PT)
|
||||
/* Block starting symbol section */
|
||||
for (k = KernelBssStart; k < KernelBssEnd; k += PAGE_SIZE)
|
||||
{
|
||||
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
|
||||
vmm.Map((void *)k, (void *)BaseKernelMapAddress, RW | G);
|
||||
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
|
||||
BaseKernelMapAddress += PAGE_SIZE;
|
||||
}
|
||||
@ -233,10 +234,63 @@ NIF void MapKernel(PageTable *PT)
|
||||
{
|
||||
for (k = KernelFileStart; k < KernelFileEnd; k += PAGE_SIZE)
|
||||
{
|
||||
va.Map((void *)k, (void *)k, PTFlag::G);
|
||||
vmm.Map((void *)k, (void *)k, G);
|
||||
KernelAllocator.ReservePage((void *)k);
|
||||
}
|
||||
}
|
||||
else
|
||||
info("Cannot determine kernel file address. Ignoring.");
|
||||
}
|
||||
|
||||
NIF void CreatePageTable(PageTable *pt)
|
||||
{
|
||||
static int check_cpuid = 0;
|
||||
|
||||
if (!check_cpuid++)
|
||||
{
|
||||
if (strcmp(CPU::Vendor(), x86_CPUID_VENDOR_AMD) == 0)
|
||||
{
|
||||
CPU::x86::AMD::CPUID0x80000001 cpuid;
|
||||
cpuid.Get();
|
||||
PSESupport = cpuid.EDX.PSE;
|
||||
Page1GBSupport = cpuid.EDX.Page1GB;
|
||||
}
|
||||
else if (strcmp(CPU::Vendor(), x86_CPUID_VENDOR_INTEL) == 0)
|
||||
{
|
||||
CPU::x86::Intel::CPUID0x00000001 cpuid;
|
||||
cpuid.Get();
|
||||
PSESupport = cpuid.EDX.PSE;
|
||||
}
|
||||
|
||||
if (PSESupport)
|
||||
{
|
||||
#if defined(a64)
|
||||
CPU::x64::CR4 cr4 = CPU::x64::readcr4();
|
||||
cr4.PSE = 1;
|
||||
CPU::x64::writecr4(cr4);
|
||||
#elif defined(a32)
|
||||
CPU::x32::CR4 cr4 = CPU::x32::readcr4();
|
||||
cr4.PSE = 1;
|
||||
CPU::x32::writecr4(cr4);
|
||||
#elif defined(aa64)
|
||||
#endif
|
||||
trace("PSE Support Enabled");
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
if (Page1GBSupport)
|
||||
debug("1GB Page Support Enabled");
|
||||
#endif
|
||||
}
|
||||
|
||||
/* TODO: Map faster */
|
||||
MapFromZero(pt);
|
||||
MapFramebuffer(pt);
|
||||
MapKernel(pt);
|
||||
|
||||
#ifdef DEBUG
|
||||
tracepagetable(pt);
|
||||
#endif
|
||||
}
|
||||
|
||||
NIF void InitializeMemoryManagement()
|
||||
@ -312,58 +366,59 @@ NIF void InitializeMemoryManagement()
|
||||
KernelPageTable = (PageTable *)KernelAllocator.RequestPages(TO_PAGES(PAGE_SIZE + 1));
|
||||
memset(KernelPageTable, 0, PAGE_SIZE);
|
||||
|
||||
if (strcmp(CPU::Vendor(), x86_CPUID_VENDOR_AMD) == 0)
|
||||
{
|
||||
CPU::x86::AMD::CPUID0x80000001 cpuid;
|
||||
cpuid.Get();
|
||||
PSESupport = cpuid.EDX.PSE;
|
||||
Page1GBSupport = cpuid.EDX.Page1GB;
|
||||
}
|
||||
else if (strcmp(CPU::Vendor(), x86_CPUID_VENDOR_INTEL) == 0)
|
||||
{
|
||||
CPU::x86::Intel::CPUID0x00000001 cpuid;
|
||||
cpuid.Get();
|
||||
PSESupport = cpuid.EDX.PSE;
|
||||
}
|
||||
CreatePageTable(KernelPageTable);
|
||||
|
||||
if (PSESupport)
|
||||
{
|
||||
#if defined(a64)
|
||||
CPU::x64::CR4 cr4 = CPU::x64::readcr4();
|
||||
cr4.PSE = 1;
|
||||
CPU::x64::writecr4(cr4);
|
||||
#elif defined(a32)
|
||||
CPU::x32::CR4 cr4 = CPU::x32::readcr4();
|
||||
cr4.PSE = 1;
|
||||
CPU::x32::writecr4(cr4);
|
||||
#elif defined(aa64)
|
||||
#endif
|
||||
trace("PSE Support Enabled");
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
if (Page1GBSupport)
|
||||
debug("1GB Page Support Enabled");
|
||||
#endif
|
||||
|
||||
MapFromZero(KernelPageTable);
|
||||
MapFramebuffer(KernelPageTable, PSESupport, Page1GBSupport);
|
||||
MapKernel(KernelPageTable);
|
||||
|
||||
trace("Applying new page table from address %#lx", KernelPageTable);
|
||||
#ifdef DEBUG
|
||||
tracepagetable(KernelPageTable);
|
||||
#endif
|
||||
trace("Applying new page table from address %#lx",
|
||||
KernelPageTable);
|
||||
CPU::PageTable(KernelPageTable);
|
||||
debug("Page table updated.");
|
||||
|
||||
XallocV1Allocator = new Xalloc::V1((void *)KERNEL_HEAP_BASE, false, false);
|
||||
XallocV2Allocator = new Xalloc::V2((void *)KERNEL_HEAP_BASE);
|
||||
trace("XallocV1 Allocator initialized at %#lx", XallocV1Allocator);
|
||||
trace("XallocV2 Allocator initialized at %#lx", XallocV2Allocator);
|
||||
/* FIXME: Read kernel params */
|
||||
AllocatorType = Config.AllocatorType;
|
||||
|
||||
/* FIXME: Read kernel config */
|
||||
AllocatorType = MemoryAllocatorType::liballoc11;
|
||||
switch (AllocatorType)
|
||||
{
|
||||
case MemoryAllocatorType::Pages:
|
||||
break;
|
||||
case MemoryAllocatorType::XallocV1:
|
||||
{
|
||||
XallocV1Allocator = new Xalloc::V1((void *)KERNEL_HEAP_BASE, false, false);
|
||||
trace("XallocV1 Allocator initialized at %#lx", XallocV1Allocator);
|
||||
break;
|
||||
}
|
||||
case MemoryAllocatorType::XallocV2:
|
||||
{
|
||||
XallocV2Allocator = new Xalloc::V2((void *)KERNEL_HEAP_BASE);
|
||||
trace("XallocV2 Allocator initialized at %#lx", XallocV2Allocator);
|
||||
break;
|
||||
}
|
||||
case MemoryAllocatorType::liballoc11:
|
||||
break;
|
||||
case MemoryAllocatorType::rpmalloc_:
|
||||
{
|
||||
trace("Using rpmalloc allocator");
|
||||
rpmalloc_initialize();
|
||||
break;
|
||||
rpmalloc_config_t config = {
|
||||
.memory_map = nullptr,
|
||||
.memory_unmap = nullptr,
|
||||
.error_callback = nullptr,
|
||||
.map_fail_callback = nullptr,
|
||||
.page_size = PAGE_SIZE,
|
||||
.span_size = 4 * 1024, /* 4 KiB */
|
||||
.span_map_count = 1,
|
||||
.enable_huge_pages = 0,
|
||||
.page_name = nullptr,
|
||||
.huge_page_name = nullptr};
|
||||
rpmalloc_initialize_config(&config);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
error("Unknown allocator type %d", AllocatorType);
|
||||
CPU::Stop();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void *malloc(size_t Size)
|
||||
@ -371,7 +426,7 @@ void *malloc(size_t Size)
|
||||
assert(Size > 0);
|
||||
|
||||
memdbg("malloc(%d)->[%s]", Size,
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0))
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbol((uintptr_t)__builtin_return_address(0))
|
||||
: "Unknown");
|
||||
|
||||
void *ret = nullptr;
|
||||
@ -397,6 +452,11 @@ void *malloc(size_t Size)
|
||||
ret = PREFIX(malloc)(Size);
|
||||
break;
|
||||
}
|
||||
case MemoryAllocatorType::rpmalloc_:
|
||||
{
|
||||
ret = rpmalloc(Size);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
error("Unknown allocator type %d", AllocatorType);
|
||||
@ -413,7 +473,7 @@ void *calloc(size_t n, size_t Size)
|
||||
assert(Size > 0);
|
||||
|
||||
memdbg("calloc(%d, %d)->[%s]", n, Size,
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0))
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbol((uintptr_t)__builtin_return_address(0))
|
||||
: "Unknown");
|
||||
|
||||
void *ret = nullptr;
|
||||
@ -439,6 +499,11 @@ void *calloc(size_t n, size_t Size)
|
||||
void *ret = PREFIX(calloc)(n, Size);
|
||||
return ret;
|
||||
}
|
||||
case MemoryAllocatorType::rpmalloc_:
|
||||
{
|
||||
ret = rpcalloc(n, Size);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
error("Unknown allocator type %d", AllocatorType);
|
||||
@ -455,7 +520,7 @@ void *realloc(void *Address, size_t Size)
|
||||
assert(Size > 0);
|
||||
|
||||
memdbg("realloc(%#lx, %d)->[%s]", Address, Size,
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0))
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbol((uintptr_t)__builtin_return_address(0))
|
||||
: "Unknown");
|
||||
|
||||
void *ret = nullptr;
|
||||
@ -481,6 +546,11 @@ void *realloc(void *Address, size_t Size)
|
||||
void *ret = PREFIX(realloc)(Address, Size);
|
||||
return ret;
|
||||
}
|
||||
case MemoryAllocatorType::rpmalloc_:
|
||||
{
|
||||
ret = rprealloc(Address, Size);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
error("Unknown allocator type %d", AllocatorType);
|
||||
@ -497,7 +567,7 @@ void free(void *Address)
|
||||
assert(Address != nullptr);
|
||||
|
||||
memdbg("free(%#lx)->[%s]", Address,
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0))
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbol((uintptr_t)__builtin_return_address(0))
|
||||
: "Unknown");
|
||||
|
||||
switch (AllocatorType)
|
||||
@ -523,6 +593,11 @@ void free(void *Address)
|
||||
(Address);
|
||||
break;
|
||||
}
|
||||
case MemoryAllocatorType::rpmalloc_:
|
||||
{
|
||||
rpfree(Address);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
error("Unknown allocator type %d", AllocatorType);
|
||||
@ -536,7 +611,7 @@ void *operator new(std::size_t Size)
|
||||
assert(Size > 0);
|
||||
|
||||
memdbg("new(%d)->[%s]", Size,
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0))
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbol((uintptr_t)__builtin_return_address(0))
|
||||
: "Unknown");
|
||||
|
||||
void *ret = malloc(Size);
|
||||
@ -548,7 +623,7 @@ void *operator new[](std::size_t Size)
|
||||
assert(Size > 0);
|
||||
|
||||
memdbg("new[](%d)->[%s]", Size,
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0))
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbol((uintptr_t)__builtin_return_address(0))
|
||||
: "Unknown");
|
||||
|
||||
void *ret = malloc(Size);
|
||||
@ -560,7 +635,7 @@ void *operator new(std::size_t Size, std::align_val_t Alignment)
|
||||
assert(Size > 0);
|
||||
|
||||
memdbg("new(%d, %d)->[%s]", Size, Alignment,
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0))
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbol((uintptr_t)__builtin_return_address(0))
|
||||
: "Unknown");
|
||||
|
||||
fixme("operator new with alignment(%#lx) is not implemented",
|
||||
@ -575,7 +650,7 @@ void operator delete(void *Pointer)
|
||||
assert(Pointer != nullptr);
|
||||
|
||||
memdbg("delete(%#lx)->[%s]", Pointer,
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0))
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbol((uintptr_t)__builtin_return_address(0))
|
||||
: "Unknown");
|
||||
|
||||
free(Pointer);
|
||||
@ -586,7 +661,7 @@ void operator delete[](void *Pointer)
|
||||
assert(Pointer != nullptr);
|
||||
|
||||
memdbg("delete[](%#lx)->[%s]", Pointer,
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0))
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbol((uintptr_t)__builtin_return_address(0))
|
||||
: "Unknown");
|
||||
|
||||
free(Pointer);
|
||||
@ -599,7 +674,7 @@ void operator delete(void *Pointer, long unsigned int Size)
|
||||
|
||||
memdbg("delete(%#lx, %d)->[%s]",
|
||||
Pointer, Size,
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0))
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbol((uintptr_t)__builtin_return_address(0))
|
||||
: "Unknown");
|
||||
|
||||
free(Pointer);
|
||||
@ -612,7 +687,7 @@ void operator delete[](void *Pointer, long unsigned int Size)
|
||||
|
||||
memdbg("delete[](%#lx, %d)->[%s]",
|
||||
Pointer, Size,
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0))
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbol((uintptr_t)__builtin_return_address(0))
|
||||
: "Unknown");
|
||||
|
||||
free(Pointer);
|
||||
|
@ -1,6 +1,9 @@
|
||||
#include <memory.hpp>
|
||||
|
||||
#include <filesystem.hpp>
|
||||
#include <signal.hpp>
|
||||
#include <utsname.h>
|
||||
#include <time.h>
|
||||
|
||||
namespace Memory
|
||||
{
|
||||
@ -13,29 +16,75 @@ namespace Memory
|
||||
#endif
|
||||
}
|
||||
|
||||
PageTable PageTable::Fork()
|
||||
PageTable *PageTable::Fork()
|
||||
{
|
||||
PageTable NewTable;
|
||||
memcpy(&NewTable, this, sizeof(PageTable));
|
||||
PageTable *NewTable = (PageTable *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageTable)));
|
||||
// memset(NewTable, 0, sizeof(PageTable));
|
||||
// CreatePageTable(NewTable);
|
||||
memcpy(NewTable, this, sizeof(PageTable));
|
||||
|
||||
debug("Forking page table %#lx to %#lx", this, NewTable);
|
||||
#if defined(a64)
|
||||
for (size_t i = 0; i < sizeof(Entries) / sizeof(Entries[0]); i++)
|
||||
{
|
||||
PageMapLevel4 *PML4 = &Entries[i];
|
||||
PageMapLevel4 *NewPML4 = &NewTable->Entries[i];
|
||||
if (!PML4->Present)
|
||||
continue;
|
||||
|
||||
PageDirectoryPointerTableEntryPtr *ptrPDPT = (PageDirectoryPointerTableEntryPtr *)(PML4->GetAddress() << 12);
|
||||
PageDirectoryPointerTableEntryPtr *ptrNewPDPT = (PageDirectoryPointerTableEntryPtr *)KernelAllocator.RequestPage();
|
||||
NewPML4->SetAddress((uintptr_t)ptrNewPDPT >> 12);
|
||||
for (size_t j = 0; j < sizeof(ptrPDPT->Entries) / sizeof(ptrPDPT->Entries[0]); j++)
|
||||
{
|
||||
PageDirectoryPointerTableEntry *PDPT = &ptrPDPT->Entries[j];
|
||||
PageDirectoryPointerTableEntry *NewPDPT = &ptrNewPDPT->Entries[j];
|
||||
*NewPDPT = *PDPT;
|
||||
|
||||
if (!PDPT->Present)
|
||||
continue;
|
||||
if (PDPT->PageSize)
|
||||
continue;
|
||||
|
||||
PageDirectoryEntryPtr *ptrPDE = (PageDirectoryEntryPtr *)(PDPT->GetAddress() << 12);
|
||||
PageDirectoryEntryPtr *ptrNewPDE = (PageDirectoryEntryPtr *)KernelAllocator.RequestPage();
|
||||
NewPDPT->SetAddress((uintptr_t)ptrNewPDE >> 12);
|
||||
for (size_t k = 0; k < sizeof(ptrPDE->Entries) / sizeof(ptrPDE->Entries[0]); k++)
|
||||
{
|
||||
PageDirectoryEntry *PDE = &ptrPDE->Entries[k];
|
||||
PageDirectoryEntry *NewPDE = &ptrNewPDE->Entries[k];
|
||||
*NewPDE = *PDE;
|
||||
|
||||
if (!PDE->Present)
|
||||
continue;
|
||||
if (PDE->PageSize)
|
||||
continue;
|
||||
|
||||
PageTableEntryPtr *ptrPTE = (PageTableEntryPtr *)(PDE->GetAddress() << 12);
|
||||
PageTableEntryPtr *ptrNewPTE = (PageTableEntryPtr *)KernelAllocator.RequestPage();
|
||||
NewPDE->SetAddress((uintptr_t)ptrNewPTE >> 12);
|
||||
for (size_t l = 0; l < sizeof(ptrPTE->Entries) / sizeof(ptrPTE->Entries[0]); l++)
|
||||
{
|
||||
PageTableEntry *PTE = &ptrPTE->Entries[l];
|
||||
PageTableEntry *NewPTE = &ptrNewPTE->Entries[l];
|
||||
*NewPTE = *PTE;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
#error "PageTable::Fork() not implemented for other architectures"
|
||||
#endif
|
||||
|
||||
debug("Forked page table %#lx to %#lx", this, NewTable);
|
||||
return NewTable;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T PageTable::Get(T Address)
|
||||
/* We can't have Memory::Virtual in the header */
|
||||
void *PageTable::__getPhysical(void *Address)
|
||||
{
|
||||
Virtual vmm = Virtual(this);
|
||||
Virtual vmm(this);
|
||||
void *PhysAddr = vmm.GetPhysical((void *)Address);
|
||||
uintptr_t Diff = uintptr_t(Address);
|
||||
Diff &= 0xFFF;
|
||||
Diff = uintptr_t(PhysAddr) + Diff;
|
||||
return (T)Diff;
|
||||
return PhysAddr;
|
||||
}
|
||||
|
||||
/* Templates */
|
||||
template struct stat *PageTable::Get<struct stat *>(struct stat *);
|
||||
template const char *PageTable::Get<const char *>(const char *);
|
||||
template const void *PageTable::Get<const void *>(const void *);
|
||||
template uintptr_t PageTable::Get<uintptr_t>(uintptr_t);
|
||||
template void *PageTable::Get<void *>(void *);
|
||||
/* ... */
|
||||
}
|
||||
|
@ -107,9 +107,11 @@ namespace Memory
|
||||
}
|
||||
|
||||
error("Out of memory! (Free: %ld MiB; Used: %ld MiB; Reserved: %ld MiB)",
|
||||
TO_MiB(FreeMemory), TO_MiB(UsedMemory), TO_MiB(ReservedMemory));
|
||||
TO_MiB(FreeMemory.load()), TO_MiB(UsedMemory.load()), TO_MiB(ReservedMemory.load()));
|
||||
KPrint("Out of memory! (Free: %ld MiB; Used: %ld MiB; Reserved: %ld MiB)",
|
||||
TO_MiB(FreeMemory), TO_MiB(UsedMemory), TO_MiB(ReservedMemory));
|
||||
TO_MiB(FreeMemory.load()), TO_MiB(UsedMemory.load()), TO_MiB(ReservedMemory.load()));
|
||||
debug("Raw values: free %#lx used %#lx reserved %#lx",
|
||||
FreeMemory.load(), UsedMemory.load(), ReservedMemory.load());
|
||||
CPU::Stop();
|
||||
__builtin_unreachable();
|
||||
}
|
||||
@ -157,9 +159,11 @@ namespace Memory
|
||||
}
|
||||
|
||||
error("Out of memory! (Free: %ld MiB; Used: %ld MiB; Reserved: %ld MiB)",
|
||||
TO_MiB(FreeMemory), TO_MiB(UsedMemory), TO_MiB(ReservedMemory));
|
||||
TO_MiB(FreeMemory.load()), TO_MiB(UsedMemory.load()), TO_MiB(ReservedMemory.load()));
|
||||
KPrint("Out of memory! (Free: %ld MiB; Used: %ld MiB; Reserved: %ld MiB)",
|
||||
TO_MiB(FreeMemory), TO_MiB(UsedMemory), TO_MiB(ReservedMemory));
|
||||
TO_MiB(FreeMemory.load()), TO_MiB(UsedMemory.load()), TO_MiB(ReservedMemory.load()));
|
||||
debug("Raw values: free %#lx used %#lx reserved %#lx",
|
||||
FreeMemory.load(), UsedMemory.load(), ReservedMemory.load());
|
||||
CPU::Halt(true);
|
||||
__builtin_unreachable();
|
||||
}
|
||||
@ -185,8 +189,8 @@ namespace Memory
|
||||
|
||||
if (PageBitmap.Set(Index, false))
|
||||
{
|
||||
FreeMemory += PAGE_SIZE;
|
||||
UsedMemory -= PAGE_SIZE;
|
||||
FreeMemory.fetch_add(PAGE_SIZE);
|
||||
UsedMemory.fetch_sub(PAGE_SIZE);
|
||||
if (PageBitmapIndex > Index)
|
||||
PageBitmapIndex = Index;
|
||||
}
|
||||
@ -215,8 +219,8 @@ namespace Memory
|
||||
|
||||
if (PageBitmap.Set(Index, true))
|
||||
{
|
||||
FreeMemory -= PAGE_SIZE;
|
||||
UsedMemory += PAGE_SIZE;
|
||||
FreeMemory.fetch_sub(PAGE_SIZE);
|
||||
UsedMemory.fetch_add(PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
@ -243,8 +247,8 @@ namespace Memory
|
||||
|
||||
if (PageBitmap.Set(Index, true))
|
||||
{
|
||||
FreeMemory -= PAGE_SIZE;
|
||||
ReservedMemory += PAGE_SIZE;
|
||||
FreeMemory.fetch_sub(PAGE_SIZE);
|
||||
ReservedMemory.fetch_add(PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
@ -264,8 +268,8 @@ namespace Memory
|
||||
|
||||
if (PageBitmap.Set(Index, true))
|
||||
{
|
||||
FreeMemory -= PAGE_SIZE;
|
||||
ReservedMemory += PAGE_SIZE;
|
||||
FreeMemory.fetch_sub(PAGE_SIZE);
|
||||
ReservedMemory.fetch_add(PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -282,8 +286,8 @@ namespace Memory
|
||||
|
||||
if (PageBitmap.Set(Index, false))
|
||||
{
|
||||
FreeMemory += PAGE_SIZE;
|
||||
ReservedMemory -= PAGE_SIZE;
|
||||
FreeMemory.fetch_add(PAGE_SIZE);
|
||||
ReservedMemory.fetch_sub(PAGE_SIZE);
|
||||
if (PageBitmapIndex > Index)
|
||||
PageBitmapIndex = Index;
|
||||
}
|
||||
@ -305,8 +309,8 @@ namespace Memory
|
||||
|
||||
if (PageBitmap.Set(Index, false))
|
||||
{
|
||||
FreeMemory += PAGE_SIZE;
|
||||
ReservedMemory -= PAGE_SIZE;
|
||||
FreeMemory.fetch_add(PAGE_SIZE);
|
||||
ReservedMemory.fetch_sub(PAGE_SIZE);
|
||||
if (PageBitmapIndex > Index)
|
||||
PageBitmapIndex = Index;
|
||||
}
|
||||
@ -320,8 +324,8 @@ namespace Memory
|
||||
uint64_t MemorySize = bInfo.Memory.Size;
|
||||
debug("Memory size: %lld bytes (%ld pages)",
|
||||
MemorySize, TO_PAGES(MemorySize));
|
||||
TotalMemory = MemorySize;
|
||||
FreeMemory = MemorySize;
|
||||
TotalMemory.store(MemorySize);
|
||||
FreeMemory.store(MemorySize);
|
||||
|
||||
size_t BitmapSize = (size_t)(MemorySize / PAGE_SIZE) / 8 + 1;
|
||||
uintptr_t BitmapAddress = 0x0;
|
||||
|
@ -80,7 +80,7 @@ namespace Memory
|
||||
if (this->UserMode)
|
||||
{
|
||||
std::vector<AllocatedPages> ParentAllocatedPages = Parent->GetAllocatedPages();
|
||||
Virtual vma = Virtual(this->vma->GetTable());
|
||||
Virtual vma(this->vma->GetTable());
|
||||
foreach (auto Page in ParentAllocatedPages)
|
||||
{
|
||||
void *NewPhysical = this->vma->RequestPages(1);
|
||||
@ -162,6 +162,7 @@ namespace Memory
|
||||
}
|
||||
|
||||
debug("Allocated stack at %#lx", this->StackBottom);
|
||||
debug("Stack Range: %#lx - %#lx", this->StackBottom, this->StackTop);
|
||||
}
|
||||
|
||||
StackGuard::~StackGuard()
|
||||
|
@ -74,6 +74,8 @@ namespace Memory
|
||||
bool VirtualMemoryArea::Add(void *Address, size_t Count)
|
||||
{
|
||||
SmartLock(MgrLock);
|
||||
function("%#lx, %lld", Address, Count);
|
||||
|
||||
if (Address == nullptr)
|
||||
{
|
||||
error("Address is null!");
|
||||
@ -118,7 +120,10 @@ namespace Memory
|
||||
void *VirtualMemoryArea::RequestPages(size_t Count, bool User)
|
||||
{
|
||||
SmartLock(MgrLock);
|
||||
function("%lld, %s", Count, User ? "true" : "false");
|
||||
|
||||
void *Address = KernelAllocator.RequestPages(Count);
|
||||
memset(Address, 0, Count * PAGE_SIZE);
|
||||
for (size_t i = 0; i < Count; i++)
|
||||
{
|
||||
int Flags = Memory::PTFlag::RW;
|
||||
@ -127,23 +132,19 @@ namespace Memory
|
||||
|
||||
void *AddressToMap = (void *)((uintptr_t)Address + (i * PAGE_SIZE));
|
||||
|
||||
Memory::Virtual vmm = Memory::Virtual(this->Table);
|
||||
vmm.Remap(AddressToMap, AddressToMap, Flags);
|
||||
Memory::Virtual vmm(this->Table);
|
||||
vmm.Map(AddressToMap, AddressToMap, Flags);
|
||||
}
|
||||
|
||||
AllocatedPagesList.push_back({Address, Count});
|
||||
|
||||
/* For security reasons, we clear the allocated page
|
||||
if it's a user page. */
|
||||
if (User)
|
||||
memset(Address, 0, Count * PAGE_SIZE);
|
||||
|
||||
return Address;
|
||||
}
|
||||
|
||||
void VirtualMemoryArea::FreePages(void *Address, size_t Count)
|
||||
{
|
||||
SmartLock(MgrLock);
|
||||
function("%#lx, %lld", Address, Count);
|
||||
|
||||
forItr(itr, AllocatedPagesList)
|
||||
{
|
||||
if (itr->Address == Address)
|
||||
@ -162,7 +163,7 @@ namespace Memory
|
||||
|
||||
KernelAllocator.FreePages(Address, Count);
|
||||
|
||||
Memory::Virtual vmm = Memory::Virtual(this->Table);
|
||||
Memory::Virtual vmm(this->Table);
|
||||
for (size_t i = 0; i < Count; i++)
|
||||
{
|
||||
void *AddressToMap = (void *)((uintptr_t)Address + (i * PAGE_SIZE));
|
||||
@ -178,6 +179,8 @@ namespace Memory
|
||||
void VirtualMemoryArea::DetachAddress(void *Address)
|
||||
{
|
||||
SmartLock(MgrLock);
|
||||
function("%#lx", Address);
|
||||
|
||||
forItr(itr, AllocatedPagesList)
|
||||
{
|
||||
if (itr->Address == Address)
|
||||
@ -193,7 +196,14 @@ namespace Memory
|
||||
bool Read, bool Write, bool Exec,
|
||||
bool Fixed, bool Shared)
|
||||
{
|
||||
Memory::Virtual vmm = Memory::Virtual(this->Table);
|
||||
function("%#lx, %lld, %s, %s, %s, %s, %s", Address, Length,
|
||||
Read ? "true" : "false",
|
||||
Write ? "true" : "false",
|
||||
Exec ? "true" : "false",
|
||||
Fixed ? "true" : "false",
|
||||
Shared ? "true" : "false");
|
||||
|
||||
Memory::Virtual vmm(this->Table);
|
||||
|
||||
// FIXME
|
||||
// for (uintptr_t j = uintptr_t(Address);
|
||||
@ -209,20 +219,21 @@ namespace Memory
|
||||
// }
|
||||
|
||||
bool AnyAddress = Address == nullptr;
|
||||
debug("AnyAddress: %s", AnyAddress ? "true" : "false");
|
||||
|
||||
if (AnyAddress)
|
||||
{
|
||||
Address = this->RequestPages(1);
|
||||
if (Address == nullptr)
|
||||
return nullptr;
|
||||
memset(Address, 0, PAGE_SIZE);
|
||||
Address = this->RequestPages(TO_PAGES(Length), true);
|
||||
debug("Allocated %#lx-%#lx for pt %#lx",
|
||||
Address, (uintptr_t)Address + Length, this->Table);
|
||||
return Address;
|
||||
}
|
||||
|
||||
SmartLock(MgrLock);
|
||||
vmm.Unmap(Address, Length);
|
||||
vmm.Map(Address, nullptr, Length, PTFlag::CoW);
|
||||
|
||||
if (AnyAddress)
|
||||
vmm.Remap(Address, Address, PTFlag::RW | PTFlag::US);
|
||||
debug("CoW region created at range %#lx-%#lx for pt %#lx",
|
||||
Address, (uintptr_t)Address + Length, this->Table);
|
||||
|
||||
SharedRegion sr{
|
||||
.Address = Address,
|
||||
@ -235,13 +246,15 @@ namespace Memory
|
||||
.ReferenceCount = 0,
|
||||
};
|
||||
SharedRegions.push_back(sr);
|
||||
debug("CoW region created at %#lx for pt %#lx",
|
||||
Address, this->Table);
|
||||
return Address;
|
||||
}
|
||||
|
||||
bool VirtualMemoryArea::HandleCoW(uintptr_t PFA)
|
||||
{
|
||||
function("%#lx", PFA);
|
||||
Memory::Virtual vmm = Memory::Virtual(this->Table);
|
||||
Memory::Virtual vmm(this->Table);
|
||||
Memory::PageTableEntry *pte = vmm.GetPTE((void *)PFA);
|
||||
|
||||
if (!pte)
|
||||
@ -260,6 +273,9 @@ namespace Memory
|
||||
|
||||
if (PFA >= Start && PFA < End)
|
||||
{
|
||||
debug("Start: %#lx, End: %#lx (PFA: %#lx)",
|
||||
Start, End, PFA);
|
||||
|
||||
if (sr.Shared)
|
||||
{
|
||||
fixme("Shared CoW");
|
||||
@ -272,30 +288,117 @@ namespace Memory
|
||||
return false;
|
||||
memset(pAddr, 0, PAGE_SIZE);
|
||||
|
||||
uint64_t Flags = 0;
|
||||
if (sr.Read)
|
||||
Flags |= PTFlag::US;
|
||||
if (sr.Write)
|
||||
Flags |= PTFlag::RW;
|
||||
// if (sr.Exec)
|
||||
// Flags |= PTFlag::XD;
|
||||
assert(pte->Present == true);
|
||||
pte->ReadWrite = sr.Write;
|
||||
pte->UserSupervisor = sr.Read;
|
||||
pte->ExecuteDisable = sr.Exec;
|
||||
|
||||
vmm.Remap((void *)PFA, pAddr, Flags);
|
||||
pte->CopyOnWrite = false;
|
||||
debug("PFA %#lx is CoW (pt %#lx, flags %#lx)",
|
||||
PFA, this->Table, pte->raw);
|
||||
#if defined(a64)
|
||||
CPU::x64::invlpg((void *)PFA);
|
||||
#elif defined(a32)
|
||||
CPU::x32::invlpg((void *)PFA);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
debug("PFA %#lx is not CoW", PFA);
|
||||
debug("PFA %#lx is not CoW (pt %#lx)",
|
||||
PFA, this->Table);
|
||||
return false;
|
||||
}
|
||||
|
||||
void VirtualMemoryArea::FreeAllPages()
|
||||
{
|
||||
SmartLock(MgrLock);
|
||||
foreach (auto ap in AllocatedPagesList)
|
||||
{
|
||||
KernelAllocator.FreePages(ap.Address, ap.PageCount);
|
||||
Memory::Virtual vmm(this->Table);
|
||||
for (size_t i = 0; i < ap.PageCount; i++)
|
||||
vmm.Remap((void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)),
|
||||
(void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)),
|
||||
Memory::PTFlag::RW);
|
||||
}
|
||||
AllocatedPagesList.clear();
|
||||
}
|
||||
|
||||
void VirtualMemoryArea::Fork(VirtualMemoryArea *Parent)
|
||||
{
|
||||
function("%#lx", Parent);
|
||||
|
||||
if (Parent == nullptr)
|
||||
{
|
||||
error("Parent is null!");
|
||||
return;
|
||||
}
|
||||
|
||||
if (Parent->Table == nullptr)
|
||||
{
|
||||
error("Parent's table is null!");
|
||||
return;
|
||||
}
|
||||
|
||||
Memory::Virtual vmm(this->Table);
|
||||
SmartLock(MgrLock);
|
||||
foreach (auto ap in Parent->GetAllocatedPagesList())
|
||||
{
|
||||
MgrLock.Unlock();
|
||||
void *Address = this->RequestPages(ap.PageCount);
|
||||
MgrLock.Lock(__FUNCTION__);
|
||||
if (Address == nullptr)
|
||||
return;
|
||||
|
||||
memcpy(Address, ap.Address, ap.PageCount * PAGE_SIZE);
|
||||
|
||||
// map these new allocated pages to be the same as the parent
|
||||
for (size_t i = 0; i < ap.PageCount; i++)
|
||||
{
|
||||
void *AddressToMap = (void *)((uintptr_t)ap.Address + (i * PAGE_SIZE));
|
||||
void *RealAddress = (void *)((uintptr_t)Address + (i * PAGE_SIZE));
|
||||
|
||||
#if defined(a86)
|
||||
Memory::PageTableEntry *pte = vmm.GetPTE(AddressToMap);
|
||||
uintptr_t Flags = 0;
|
||||
Flags |= pte->Present ? 1UL : 0;
|
||||
Flags |= pte->ReadWrite ? 2UL : 0;
|
||||
Flags |= pte->UserSupervisor ? 4UL : 0;
|
||||
Flags |= pte->CopyOnWrite ? 512UL : 0;
|
||||
|
||||
debug("Mapping %#lx to %#lx (flags %s/%s/%s/%s)",
|
||||
RealAddress, AddressToMap,
|
||||
Flags & PTFlag::P ? "P" : "-",
|
||||
Flags & PTFlag::RW ? "RW" : "-",
|
||||
Flags & PTFlag::US ? "US" : "-",
|
||||
Flags & PTFlag::CoW ? "CoW" : "-");
|
||||
vmm.Map(AddressToMap, RealAddress, Flags);
|
||||
#else
|
||||
#warning "Not implemented"
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
foreach (auto sr in Parent->GetSharedRegions())
|
||||
{
|
||||
MgrLock.Unlock();
|
||||
void *Address = this->CreateCoWRegion(sr.Address, sr.Length,
|
||||
sr.Read, sr.Write, sr.Exec,
|
||||
sr.Fixed, sr.Shared);
|
||||
MgrLock.Lock(__FUNCTION__);
|
||||
if (Address == nullptr)
|
||||
return;
|
||||
memcpy(Address, sr.Address, sr.Length);
|
||||
}
|
||||
}
|
||||
|
||||
VirtualMemoryArea::VirtualMemoryArea(PageTable *Table)
|
||||
{
|
||||
debug("+ %#lx %s", this,
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "");
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbol((uintptr_t)__builtin_return_address(0)) : "");
|
||||
|
||||
SmartLock(MgrLock);
|
||||
if (Table)
|
||||
@ -316,12 +419,22 @@ namespace Memory
|
||||
VirtualMemoryArea::~VirtualMemoryArea()
|
||||
{
|
||||
debug("- %#lx %s", this,
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "");
|
||||
KernelSymbolTable ? KernelSymbolTable->GetSymbol((uintptr_t)__builtin_return_address(0)) : "");
|
||||
|
||||
#ifdef DEBUG
|
||||
if (this->Table == KernelPageTable)
|
||||
debug("Not remapping kernel page table allocated pages.");
|
||||
#endif
|
||||
|
||||
SmartLock(MgrLock);
|
||||
Memory::Virtual vmm(this->Table);
|
||||
foreach (auto ap in AllocatedPagesList)
|
||||
{
|
||||
KernelAllocator.FreePages(ap.Address, ap.PageCount);
|
||||
Memory::Virtual vmm = Memory::Virtual(this->Table);
|
||||
|
||||
if (this->Table == KernelPageTable)
|
||||
continue;
|
||||
|
||||
for (size_t i = 0; i < ap.PageCount; i++)
|
||||
vmm.Remap((void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)),
|
||||
(void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)),
|
||||
|
@ -20,15 +20,25 @@
|
||||
#include <convert.h>
|
||||
#include <debug.h>
|
||||
|
||||
#include "../../kernel.h"
|
||||
|
||||
namespace Memory
|
||||
{
|
||||
Virtual::Virtual(PageTable *Table)
|
||||
{
|
||||
if (Table)
|
||||
this->Table = Table;
|
||||
else
|
||||
this->Table = (PageTable *)CPU::PageTable();
|
||||
}
|
||||
Virtual::Virtual(PageTable *Table)
|
||||
{
|
||||
if (Table)
|
||||
this->pTable = Table;
|
||||
else
|
||||
this->pTable = thisPageTable;
|
||||
|
||||
Virtual::~Virtual() {}
|
||||
// debug("+ %#lx (PT: %#lx) %s", this, this->pTable,
|
||||
// KernelSymbolTable
|
||||
// ? KernelSymbolTable->GetSymbol((uintptr_t)__builtin_return_address(0))
|
||||
// : "Unknown");
|
||||
}
|
||||
|
||||
Virtual::~Virtual()
|
||||
{
|
||||
// debug("- %#lx", this);
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user