Merge remote-tracking branch 'Kernel/multiboot2_64' into Kernel-multiboot2_64

This commit is contained in:
EnderIce2
2024-11-20 05:17:14 +02:00
291 changed files with 54773 additions and 0 deletions

View File

@ -0,0 +1,137 @@
# Xalloc
Xalloc is a custom memory allocator designed for hobby operating systems. It is written in C++ and provides a simple and efficient way to manage memory in your hobby OS.
#### ❗ This project is still in development and is not ready for use in production environments. ❗
---
## Features
- **Simple API** - Xalloc provides a simple API for allocating and freeing memory. It is designed to be easy to use and understand.
- [ ] todo complete this
---
## Getting Started
### Implementing missing functions
You will need to implement the following functions in your OS:
##### Wrapper.cpp
```cpp
extern "C" void *Xalloc_REQUEST_PAGES(Xsize_t Pages)
{
// ...
}
extern "C" void Xalloc_FREE_PAGES(void *Address, Xsize_t Pages)
{
// ...
}
extern "C" void Xalloc_MAP_MEMORY(void *VirtualAddress, void *PhysicalAddress, Xsize_t Flags)
{
// ...
}
extern "C" void Xalloc_UNMAP_MEMORY(void *VirtualAddress)
{
// ...
}
```
##### Xalloc.hpp
```cpp
#define Xalloc_PAGE_SIZE <page size> /* <-- Replace with your page size */
#define Xalloc_trace(m, ...) <trace function>
#define Xalloc_warn(m, ...) <warning function>
#define Xalloc_err(m, ...) <error function>
#define Xalloc_def <define a lock> /* eg. std::mutex Xalloc_lock; */
#define Xalloc_lock <lock function>
#define Xalloc_unlock <unlock function>
```
### Typical usage
```cpp
#include "Xalloc.hpp"
Xalloc::V1 *XallocV1Allocator = nullptr;
int main()
{
/* Virtual Base User SMAP */
XallocV1Allocator = new Xalloc::V1((void *)0xFFFFA00000000000, false, false);
void *p = XallocV1Allocator->malloc(1234);
/* ... */
XallocV1Allocator->free(p);
delete XallocV1Allocator;
return 0;
}
```
or
```cpp
#include "Xalloc.hpp"
int main()
{
/* Virtual Base User SMAP */
Xalloc::V1 XallocV1Allocator((void *)0xFFFFA00000000000, false, false);
void *p = XallocV1Allocator.malloc(1234);
/* ... */
XallocV1Allocator.free(p);
return 0;
}
```
---
## API
### Xalloc::V1
```cpp
void *malloc(Xsize_t Size);
```
Allocates a block of memory of size `Size` bytes.
If `Size` is 0, then `nullptr` is returned.
- `Size` - The size of the block to allocate in bytes.
<br><br>
```cpp
void free(void *Address);
```
Frees the memory block pointed to by `Address`.
If `Address` is `nullptr`, then no operation is performed.
- `Address` - The address of the memory block to free.
<br><br>
```cpp
void *calloc(Xsize_t NumberOfBlocks, Xsize_t Size);
```
Allocates a block of memory for an array of `NumberOfBlocks` elements, each of them `Size` bytes long.
If `NumberOfBlocks` or `Size` is 0, then `nullptr` is returned.
- `NumberOfBlocks` - The number of elements to allocate.
- `Size` - The size of each element in bytes.
<br><br>
```cpp
void *realloc(void *Address, Xsize_t Size);
```
Changes the size of the memory block pointed to by `Address` to `Size` bytes.
If `Address` is `nullptr`, then the call is equivalent to `malloc(Size)`.
If `Size` is equal to zero, and `Address` is not `nullptr`, then the call is equivalent to `free(Address)`.
- `Address` - The address of the memory block to resize.
- `Size` - The new size of the memory block in bytes.
---

View File

@ -0,0 +1,23 @@
#include "Xalloc.hpp"
#include <memory.hpp>
extern "C" void *Xalloc_REQUEST_PAGES(Xsize_t Pages)
{
return KernelAllocator.RequestPages(Pages);
}
extern "C" void Xalloc_FREE_PAGES(void *Address, Xsize_t Pages)
{
KernelAllocator.FreePages(Address, Pages);
}
extern "C" void Xalloc_MAP_MEMORY(void *VirtualAddress, void *PhysicalAddress, Xsize_t Flags)
{
Memory::Virtual(KernelPageTable).Map(VirtualAddress, PhysicalAddress, Flags);
}
extern "C" void Xalloc_UNMAP_MEMORY(void *VirtualAddress)
{
Memory::Virtual(KernelPageTable).Unmap(VirtualAddress);
}

View File

@ -0,0 +1,102 @@
#ifndef __FENNIX_KERNEL_Xalloc_H__
#define __FENNIX_KERNEL_Xalloc_H__
#include <memory.hpp>
#include <lock.hpp>
#include <debug.h>
typedef long unsigned Xuint64_t;
typedef long unsigned Xsize_t;
#define Xalloc_StopOnFail true
#define Xalloc_PAGE_SIZE PAGE_SIZE
#define Xalloc_trace(m, ...) trace(m, ##__VA_ARGS__)
#define Xalloc_warn(m, ...) warn(m, ##__VA_ARGS__)
#define Xalloc_err(m, ...) error(m, ##__VA_ARGS__)
#define Xalloc_def NewLock(XallocLock)
#define Xalloc_lock XallocLock.Lock(__FUNCTION__)
#define Xalloc_unlock XallocLock.Unlock()
namespace Xalloc
{
class V1
{
private:
void *BaseVirtualAddress = nullptr;
void *FirstBlock = nullptr;
void *LastBlock = nullptr;
bool UserMapping = false;
bool SMAPUsed = false;
public:
/** @brief Execute "stac" instruction if the kernel has SMAP enabled */
void Xstac();
/** @brief Execute "clac" instruction if the kernel has SMAP enabled */
void Xclac();
/**
* @brief Arrange the blocks to optimize the memory usage
* The allocator is not arranged by default
* to avoid performance issues.
* This function will defragment the memory
* and free the unused blocks.
*
* You should call this function when the
* kernel is idle or when is not using
* the allocator.
*/
void Arrange();
/**
* @brief Allocate a new memory block
*
* @param Size Size of the block to allocate.
* @return void* Pointer to the allocated block.
*/
void *malloc(Xsize_t Size);
/**
* @brief Free a previously allocated block
*
* @param Address Address of the block to free.
*/
void free(void *Address);
/**
* @brief Allocate a new memory block
*
* @param NumberOfBlocks Number of blocks to allocate.
* @param Size Size of the block to allocate.
* @return void* Pointer to the allocated block.
*/
void *calloc(Xsize_t NumberOfBlocks, Xsize_t Size);
/**
* @brief Reallocate a previously allocated block
*
* @param Address Address of the block to reallocate.
* @param Size New size of the block.
* @return void* Pointer to the reallocated block.
*/
void *realloc(void *Address, Xsize_t Size);
/**
* @brief Construct a new Allocator object
*
* @param BaseVirtualAddress Virtual address to map the pages.
* @param UserMode Map the new pages with USER flag?
* @param SMAPEnabled Does the kernel has Supervisor Mode Access Prevention enabled?
*/
V1(void *BaseVirtualAddress, bool UserMode, bool SMAPEnabled);
/**
* @brief Destroy the Allocator object
*
*/
~V1();
};
}
#endif // !__FENNIX_KERNEL_Xalloc_H__

View File

@ -0,0 +1,271 @@
#include "Xalloc.hpp"
Xalloc_def;
#define XALLOC_CONCAT(x, y) x##y
#define XStoP(x) (x / Xalloc_PAGE_SIZE + 1)
#define XPtoS(x) (x * Xalloc_PAGE_SIZE)
#define Xalloc_BlockChecksum 0xA110C
extern "C" void *Xalloc_REQUEST_PAGES(Xsize_t Pages);
extern "C" void Xalloc_FREE_PAGES(void *Address, Xsize_t Pages);
extern "C" void Xalloc_MAP_MEMORY(void *VirtualAddress, void *PhysicalAddress, Xsize_t Flags);
extern "C" void Xalloc_UNMAP_MEMORY(void *VirtualAddress);
// TODO: Change memcpy with an optimized version
void *Xmemcpy(void *__restrict__ Destination, const void *__restrict__ Source, Xuint64_t Length)
{
unsigned char *dst = (unsigned char *)Destination;
const unsigned char *src = (const unsigned char *)Source;
for (Xuint64_t i = 0; i < Length; i++)
dst[i] = src[i];
return Destination;
}
// TODO: Change memset with an optimized version
void *Xmemset(void *__restrict__ Destination, int Data, Xuint64_t Length)
{
unsigned char *Buffer = (unsigned char *)Destination;
for (Xuint64_t i = 0; i < Length; i++)
Buffer[i] = (unsigned char)Data;
return Destination;
}
namespace Xalloc
{
class Block
{
public:
void *Address = nullptr;
int Checksum = Xalloc_BlockChecksum;
Xsize_t Size = 0;
Block *Next = nullptr;
Block *Last = nullptr;
bool IsFree = true;
bool Check()
{
if (this->Checksum != Xalloc_BlockChecksum)
return false;
return true;
}
Block(Xsize_t Size)
{
this->Address = Xalloc_REQUEST_PAGES(XStoP(Size));
this->Size = Size;
Xmemset(this->Address, 0, Size);
}
~Block()
{
Xalloc_FREE_PAGES(this->Address, XStoP(this->Size));
}
/**
* @brief Overload new operator to allocate memory from the heap
* @param Size Unused
* @return void* Pointer to the allocated memory
*/
void *operator new(Xsize_t Size)
{
void *ptr = Xalloc_REQUEST_PAGES(XStoP(sizeof(Block)));
return ptr;
}
/**
* @brief Overload delete operator to free memory from the heap
* @param Address Pointer to the memory to free
*/
void operator delete(void *Address)
{
Xalloc_FREE_PAGES(Address, XStoP(sizeof(Block)));
}
} __attribute__((packed, aligned((16))));
class SmartSMAPClass
{
private:
V1 *allocator = nullptr;
public:
SmartSMAPClass(V1 *allocator)
{
this->allocator = allocator;
this->allocator->Xstac();
}
~SmartSMAPClass() { this->allocator->Xclac(); }
};
#define SmartSMAP SmartSMAPClass XALLOC_CONCAT(SmartSMAP##_, __COUNTER__)(this)
void V1::Xstac()
{
if (this->SMAPUsed)
{
#if defined(a64) || defined(a32)
asm volatile("stac" ::
: "cc");
#endif
}
}
void V1::Xclac()
{
if (this->SMAPUsed)
{
#if defined(a64) || defined(a32)
asm volatile("clac" ::
: "cc");
#endif
}
}
void V1::Arrange()
{
Xalloc_err("Arrange() is not implemented yet!");
}
void *V1::malloc(Xsize_t Size)
{
if (Size == 0)
{
Xalloc_warn("Attempted to allocate 0 bytes!");
return nullptr;
}
SmartSMAP;
Xalloc_lock;
if (this->FirstBlock == nullptr)
{
this->FirstBlock = new Block(Size);
((Block *)this->FirstBlock)->IsFree = false;
Xalloc_unlock;
return ((Block *)this->FirstBlock)->Address;
}
Block *CurrentBlock = ((Block *)this->FirstBlock);
while (CurrentBlock != nullptr)
{
if (!CurrentBlock->Check())
{
Xalloc_err("Block %#lx has an invalid checksum! (%#x != %#x)",
(Xuint64_t)CurrentBlock, CurrentBlock->Checksum, Xalloc_BlockChecksum);
while (Xalloc_StopOnFail)
;
}
else if (CurrentBlock->IsFree && CurrentBlock->Size >= Size)
{
CurrentBlock->IsFree = false;
Xmemset(CurrentBlock->Address, 0, Size);
Xalloc_unlock;
return CurrentBlock->Address;
}
CurrentBlock = CurrentBlock->Next;
}
CurrentBlock = ((Block *)this->FirstBlock);
while (CurrentBlock->Next != nullptr)
CurrentBlock = CurrentBlock->Next;
CurrentBlock->Next = new Block(Size);
((Block *)CurrentBlock->Next)->Last = CurrentBlock;
((Block *)CurrentBlock->Next)->IsFree = false;
Xalloc_unlock;
return ((Block *)CurrentBlock->Next)->Address;
}
void V1::free(void *Address)
{
if (Address == nullptr)
{
Xalloc_warn("Attempted to free a null pointer!");
return;
}
SmartSMAP;
Xalloc_lock;
Block *CurrentBlock = ((Block *)this->FirstBlock);
while (CurrentBlock != nullptr)
{
if (!CurrentBlock->Check())
{
Xalloc_err("Block %#lx checksum failed!", (Xuint64_t)CurrentBlock);
while (Xalloc_StopOnFail)
;
}
else if (CurrentBlock->Address == Address)
{
if (CurrentBlock->IsFree)
{
Xalloc_warn("Attempted to free an already freed pointer!");
Xalloc_unlock;
return;
}
CurrentBlock->IsFree = true;
Xalloc_unlock;
return;
}
CurrentBlock = CurrentBlock->Next;
}
Xalloc_err("Invalid address.");
Xalloc_unlock;
}
void *V1::calloc(Xsize_t NumberOfBlocks, Xsize_t Size)
{
if (NumberOfBlocks == 0 || Size == 0)
{
Xalloc_warn("The %s%s%s is 0!",
NumberOfBlocks == 0 ? "NumberOfBlocks" : "",
NumberOfBlocks == 0 && Size == 0 ? " and " : "",
Size == 0 ? "Size" : "");
return nullptr;
}
return this->malloc(NumberOfBlocks * Size);
}
void *V1::realloc(void *Address, Xsize_t Size)
{
if (Address == nullptr)
return this->malloc(Size);
if (Size == 0)
{
this->free(Address);
return nullptr;
}
// SmartSMAP;
// Xalloc_lock;
// ...
// Xalloc_unlock;
// TODO: Implement realloc
this->free(Address);
return this->malloc(Size);
}
V1::V1(void *BaseVirtualAddress, bool UserMode, bool SMAPEnabled)
{
SmartSMAP;
Xalloc_lock;
this->SMAPUsed = SMAPEnabled;
this->UserMapping = UserMode;
this->BaseVirtualAddress = BaseVirtualAddress;
Xalloc_unlock;
}
V1::~V1()
{
SmartSMAP;
Xalloc_lock;
Xalloc_trace("Destructor not implemented yet.");
Xalloc_unlock;
}
}

View File

@ -0,0 +1,679 @@
#include <memory.hpp>
#include <convert.h>
#include <lock.hpp>
#include <debug.h>
#ifdef DEBUG
#include <uart.hpp>
#endif
#include "HeapAllocators/Xalloc/Xalloc.hpp"
#include "../Library/liballoc_1_1.h"
#include "../../kernel.h"
// #define DEBUG_ALLOCATIONS_SL 1
// #define DEBUG_ALLOCATIONS 1
#ifdef DEBUG_ALLOCATIONS
#define memdbg(m, ...) \
debug(m, ##__VA_ARGS__); \
__sync
#else
#define memdbg(m, ...)
#endif
using namespace Memory;
#ifdef DEBUG_ALLOCATIONS_SL
NewLock(AllocatorLock);
NewLock(OperatorAllocatorLock);
#endif
Physical KernelAllocator;
PageTable4 *KernelPageTable = nullptr;
PageTable4 *UserspaceKernelOnlyPageTable = nullptr;
void *KPT = nullptr;
static MemoryAllocatorType AllocatorType = MemoryAllocatorType::None;
Xalloc::V1 *XallocV1Allocator = nullptr;
#ifdef DEBUG
NIF void tracepagetable(PageTable4 *pt)
{
for (int i = 0; i < 512; i++)
{
#if defined(a64)
if (pt->Entries[i].Present)
debug("Entry %03d: %x %x %x %x %x %x %x %p-%#llx", i,
pt->Entries[i].Present, pt->Entries[i].ReadWrite,
pt->Entries[i].UserSupervisor, pt->Entries[i].WriteThrough,
pt->Entries[i].CacheDisable, pt->Entries[i].Accessed,
pt->Entries[i].ExecuteDisable, pt->Entries[i].Address << 12,
pt->Entries[i]);
#elif defined(a32)
#elif defined(aa64)
#endif
}
}
#endif
NIF void MapFromZero(PageTable4 *PT, BootInfo *Info)
{
static int once = 0;
if (!once++)
{
Virtual va = Virtual(PT);
void *NullAddress = KernelAllocator.RequestPage();
memset(NullAddress, 0, PAGE_SIZE); // TODO: If the CPU instruction pointer hits this page, there should be function to handle it. (memcpy assembly code?)
va.Map((void *)0, (void *)NullAddress, PTFlag::RW | PTFlag::US);
uintptr_t VirtualOffsetNormalVMA = NORMAL_VMA_OFFSET;
size_t MemSize = Info->Memory.Size;
for (size_t t = 0; t < MemSize; t += PAGE_SIZE)
{
va.Map((void *)t, (void *)t, PTFlag::RW /* | PTFlag::US */);
// va.Map((void *)VirtualOffsetNormalVMA, (void *)t, PTFlag::RW /* | PTFlag::US */);
// VirtualOffsetNormalVMA += PAGE_SIZE;
}
}
else
{
error("MapFromZero() called more than once!");
CPU::Stop();
}
}
NIF void MapFramebuffer(PageTable4 *PT, BootInfo *Info)
{
Virtual va = Virtual(PT);
int itrfb = 0;
while (1)
{
if (!Info->Framebuffer[itrfb].BaseAddress)
break;
for (uintptr_t fb_base = (uintptr_t)Info->Framebuffer[itrfb].BaseAddress;
fb_base < ((uintptr_t)Info->Framebuffer[itrfb].BaseAddress + ((Info->Framebuffer[itrfb].Pitch * Info->Framebuffer[itrfb].Height) + PAGE_SIZE));
fb_base += PAGE_SIZE)
va.Map((void *)fb_base, (void *)fb_base, PTFlag::RW | PTFlag::US | PTFlag::G);
itrfb++;
}
}
NIF void MapKernel(PageTable4 *PT, BootInfo *Info)
{
/* KernelStart KernelTextEnd KernelRoDataEnd KernelEnd
Kernel Start & Text Start ------ Text End ------ Kernel Rodata End ------ Kernel Data End & Kernel End
*/
Virtual va = Virtual(PT);
uintptr_t KernelStart = (uintptr_t)&_kernel_start;
uintptr_t KernelTextEnd = (uintptr_t)&_kernel_text_end;
uintptr_t KernelDataEnd = (uintptr_t)&_kernel_data_end;
uintptr_t KernelRoDataEnd = (uintptr_t)&_kernel_rodata_end;
uintptr_t KernelEnd = (uintptr_t)&_kernel_end;
uintptr_t BaseKernelMapAddress = (uintptr_t)Info->Kernel.PhysicalBase;
uintptr_t k;
for (k = KernelStart; k < KernelTextEnd; k += PAGE_SIZE)
{
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW);
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
for (k = KernelTextEnd; k < KernelDataEnd; k += PAGE_SIZE)
{
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
for (k = KernelDataEnd; k < KernelRoDataEnd; k += PAGE_SIZE)
{
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::P | PTFlag::G);
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
for (k = KernelRoDataEnd; k < KernelEnd; k += PAGE_SIZE)
{
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
debug("\nStart: %#llx - Text End: %#llx - RoEnd: %#llx - End: %#llx\nStart Physical: %#llx - End Physical: %#llx",
KernelStart, KernelTextEnd, KernelRoDataEnd, KernelEnd, Info->Kernel.PhysicalBase, BaseKernelMapAddress - PAGE_SIZE);
}
NIF void InitializeMemoryManagement(BootInfo *Info)
{
#ifdef DEBUG
for (uint64_t i = 0; i < Info->Memory.Entries; i++)
{
uintptr_t Base = reinterpret_cast<uintptr_t>(Info->Memory.Entry[i].BaseAddress);
uintptr_t Length = Info->Memory.Entry[i].Length;
uintptr_t End = Base + Length;
const char *Type = "Unknown";
switch (Info->Memory.Entry[i].Type)
{
case likely(Usable):
Type = "Usable";
break;
case Reserved:
Type = "Reserved";
break;
case ACPIReclaimable:
Type = "ACPI Reclaimable";
break;
case ACPINVS:
Type = "ACPI NVS";
break;
case BadMemory:
Type = "Bad Memory";
break;
case BootloaderReclaimable:
Type = "Bootloader Reclaimable";
break;
case KernelAndModules:
Type = "Kernel and Modules";
break;
case Framebuffer:
Type = "Framebuffer";
break;
default:
break;
}
debug("%lld: %#016llx-%#016llx %s",
i,
Base,
End,
Type);
}
#endif
trace("Initializing Physical Memory Manager");
// KernelAllocator = Physical(); <- Already called in the constructor
KernelAllocator.Init(Info);
debug("Memory Info: %lldMB / %lldMB (%lldMB reserved)",
TO_MB(KernelAllocator.GetUsedMemory()),
TO_MB(KernelAllocator.GetTotalMemory()),
TO_MB(KernelAllocator.GetReservedMemory()));
AllocatorType = MemoryAllocatorType::Pages;
trace("Initializing Virtual Memory Manager");
KernelPageTable = (PageTable4 *)KernelAllocator.RequestPages(TO_PAGES(PAGE_SIZE));
memset(KernelPageTable, 0, PAGE_SIZE);
UserspaceKernelOnlyPageTable = (PageTable4 *)KernelAllocator.RequestPages(TO_PAGES(PAGE_SIZE));
memset(UserspaceKernelOnlyPageTable, 0, PAGE_SIZE);
debug("Mapping from 0x0 to %#llx", Info->Memory.Size);
MapFromZero(KernelPageTable, Info);
debug("Mapping from 0x0 %#llx for Userspace Page Table", Info->Memory.Size);
UserspaceKernelOnlyPageTable[0] = KernelPageTable[0];
/* Mapping Framebuffer address */
debug("Mapping Framebuffer");
MapFramebuffer(KernelPageTable, Info);
debug("Mapping Framebuffer for Userspace Page Table");
MapFramebuffer(UserspaceKernelOnlyPageTable, Info);
/* Kernel mapping */
debug("Mapping Kernel");
MapKernel(KernelPageTable, Info);
debug("Mapping Kernel for Userspace Page Table");
MapKernel(UserspaceKernelOnlyPageTable, Info);
trace("Applying new page table from address %p", KernelPageTable);
#ifdef DEBUG
debug("Kernel:");
tracepagetable(KernelPageTable);
debug("Userspace:");
tracepagetable(UserspaceKernelOnlyPageTable);
#endif
KPT = KernelPageTable;
#if defined(a64) || defined(a32)
asmv("mov %0, %%cr3" ::"r"(KPT));
#elif defined(aa64)
asmv("msr ttbr0_el1, %0" ::"r"(KPT));
#endif
debug("Page table updated.");
if (strstr(Info->Kernel.CommandLine, "xallocv1"))
{
XallocV1Allocator = new Xalloc::V1((void *)KERNEL_HEAP_BASE, false, false);
AllocatorType = MemoryAllocatorType::XallocV1;
trace("XallocV1 Allocator initialized (%p)", XallocV1Allocator);
}
else if (strstr(Info->Kernel.CommandLine, "liballoc11"))
{
AllocatorType = MemoryAllocatorType::liballoc11;
}
}
void *HeapMalloc(size_t Size)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(AllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
#endif
memdbg("malloc(%d)->[%s]", Size, KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown");
void *ret = nullptr;
switch (AllocatorType)
{
case MemoryAllocatorType::Pages:
{
ret = KernelAllocator.RequestPages(TO_PAGES(Size));
memset(ret, 0, Size);
break;
}
case MemoryAllocatorType::XallocV1:
{
ret = XallocV1Allocator->malloc(Size);
break;
}
case MemoryAllocatorType::liballoc11:
{
ret = PREFIX(malloc)(Size);
memset(ret, 0, Size);
break;
}
default:
throw;
}
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "malloc( %ld )=%p~%p\n\r",
Size,
ret, __builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
return ret;
}
void *HeapCalloc(size_t n, size_t Size)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(AllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
#endif
memdbg("calloc(%d, %d)->[%s]", n, Size, KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown");
void *ret = nullptr;
switch (AllocatorType)
{
case MemoryAllocatorType::Pages:
{
ret = KernelAllocator.RequestPages(TO_PAGES(n * Size));
memset(ret, 0, n * Size);
break;
}
case MemoryAllocatorType::XallocV1:
{
ret = XallocV1Allocator->calloc(n, Size);
break;
}
case MemoryAllocatorType::liballoc11:
{
void *ret = PREFIX(calloc)(n, Size);
memset(ret, 0, Size);
return ret;
}
default:
throw;
}
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "calloc( %ld %ld )=%p~%p\n\r",
n, Size,
ret, __builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
return ret;
}
void *HeapRealloc(void *Address, size_t Size)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(AllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
#endif
memdbg("realloc(%#lx, %d)->[%s]", Address, Size, KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown");
void *ret = nullptr;
switch (AllocatorType)
{
case unlikely(MemoryAllocatorType::Pages):
{
ret = KernelAllocator.RequestPages(TO_PAGES(Size)); // WARNING: Potential memory leak
memset(ret, 0, Size);
break;
}
case MemoryAllocatorType::XallocV1:
{
ret = XallocV1Allocator->realloc(Address, Size);
break;
}
case MemoryAllocatorType::liballoc11:
{
void *ret = PREFIX(realloc)(Address, Size);
memset(ret, 0, Size);
return ret;
}
default:
throw;
}
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "realloc( %p %ld )=%p~%p\n\r",
Address, Size,
ret, __builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
return ret;
}
void HeapFree(void *Address)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(AllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
#endif
memdbg("free(%#lx)->[%s]", Address, KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown");
switch (AllocatorType)
{
case unlikely(MemoryAllocatorType::Pages):
{
KernelAllocator.FreePage(Address); // WARNING: Potential memory leak
break;
}
case MemoryAllocatorType::XallocV1:
{
XallocV1Allocator->free(Address);
break;
}
case MemoryAllocatorType::liballoc11:
{
PREFIX(free)
(Address);
break;
}
default:
throw;
}
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "free( %p )~%p\n\r",
Address,
__builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
}
void *operator new(size_t Size)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(OperatorAllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
#endif
memdbg("new(%d)->[%s]", Size, KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown");
void *ret = HeapMalloc(Size);
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "new( %ld )=%p~%p\n\r",
Size,
ret, __builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
return ret;
}
void *operator new[](size_t Size)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(OperatorAllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
#endif
memdbg("new[](%d)->[%s]", Size, KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown");
void *ret = HeapMalloc(Size);
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "new[]( %ld )=%p~%p\n\r",
Size,
ret, __builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
return ret;
}
void *operator new(unsigned long Size, std::align_val_t Alignment)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(OperatorAllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
#endif
memdbg("new(%d, %d)->[%s]", Size, Alignment, KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown");
fixme("operator new with alignment(%#lx) is not implemented", Alignment);
void *ret = HeapMalloc(Size);
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "new( %ld %#lx )=%p~%p\n\r",
Size, (uintptr_t)Alignment,
ret, __builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
return ret;
}
void operator delete(void *Pointer)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(OperatorAllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
#endif
memdbg("delete(%#lx)->[%s]", Pointer, KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown");
HeapFree(Pointer);
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "delete( %p )~%p\n\r",
Pointer,
__builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
}
void operator delete[](void *Pointer)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(OperatorAllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
#endif
memdbg("delete[](%#lx)->[%s]", Pointer, KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown");
HeapFree(Pointer);
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "delete[]( %p )~%p\n\r",
Pointer,
__builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
}
void operator delete(void *Pointer, long unsigned int Size)
{
UNUSED(Size);
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(OperatorAllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
#endif
memdbg("delete(%#lx, %d)->[%s]", Pointer, Size, KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown");
HeapFree(Pointer);
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "delete( %p %ld )~%p\n\r",
Pointer, Size,
__builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
}
void operator delete[](void *Pointer, long unsigned int Size)
{
UNUSED(Size);
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(OperatorAllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
#endif
memdbg("delete[](%#lx, %d)->[%s]", Pointer, Size, KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown");
HeapFree(Pointer);
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "delete[]( %p %ld )~%p\n\r",
Pointer, Size,
__builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
}

View File

@ -0,0 +1,220 @@
#include <memory.hpp>
#include <debug.h>
#include "../../kernel.h"
namespace Memory
{
ReadFSFunction(MEM_Read)
{
if (!Size)
Size = node->Length;
if (Offset > node->Length)
return 0;
if (Offset + Size > node->Length)
Size = node->Length - Offset;
memcpy(Buffer, (uint8_t *)(node->Address + Offset), Size);
return Size;
}
WriteFSFunction(MEM_Write)
{
if (!Size)
Size = node->Length;
if (Offset > node->Length)
return 0;
if (Offset + Size > node->Length)
Size = node->Length - Offset;
memcpy((uint8_t *)(node->Address + Offset), Buffer, Size);
return Size;
}
VirtualFileSystem::FileSystemOperations mem_op = {
.Name = "mem",
.Read = MEM_Read,
.Write = MEM_Write,
};
uint64_t MemMgr::GetAllocatedMemorySize()
{
uint64_t Size = 0;
foreach (auto ap in AllocatedPagesList)
Size += ap.PageCount;
return FROM_PAGES(Size);
}
bool MemMgr::Add(void *Address, size_t Count)
{
if (Address == nullptr)
{
error("Address is null!");
return false;
}
if (Count == 0)
{
error("Count is 0!");
return false;
}
for (size_t i = 0; i < AllocatedPagesList.size(); i++)
{
if (AllocatedPagesList[i].Address == Address)
{
error("Address already exists!");
return false;
}
else if ((uintptr_t)Address < (uintptr_t)AllocatedPagesList[i].Address)
{
if ((uintptr_t)Address + (Count * PAGE_SIZE) > (uintptr_t)AllocatedPagesList[i].Address)
{
error("Address intersects with an allocated page!");
return false;
}
}
else
{
if ((uintptr_t)AllocatedPagesList[i].Address + (AllocatedPagesList[i].PageCount * PAGE_SIZE) > (uintptr_t)Address)
{
error("Address intersects with an allocated page!");
return false;
}
}
}
if (this->Directory)
{
char FileName[64];
sprintf(FileName, "%lx-%ld", (uintptr_t)Address, Count);
VirtualFileSystem::Node *n = vfs->Create(FileName, VirtualFileSystem::NodeFlags::FILE, this->Directory);
if (n)
{
n->Address = (uintptr_t)Address;
n->Length = Count * PAGE_SIZE;
n->Operator = &mem_op;
}
}
AllocatedPagesList.push_back({Address, Count});
return true;
}
void *MemMgr::RequestPages(size_t Count, bool User)
{
void *Address = KernelAllocator.RequestPages(Count);
for (size_t i = 0; i < Count; i++)
{
int Flags = Memory::PTFlag::RW;
if (User)
Flags |= Memory::PTFlag::US;
Memory::Virtual(this->PageTable).Remap((void *)((uintptr_t)Address + (i * PAGE_SIZE)), (void *)((uint64_t)Address + (i * PAGE_SIZE)), Flags);
}
if (this->Directory)
{
char FileName[64];
sprintf(FileName, "%lx-%ld", (uintptr_t)Address, Count);
VirtualFileSystem::Node *n = vfs->Create(FileName, VirtualFileSystem::NodeFlags::FILE, this->Directory);
if (n) // If null, error or file already exists
{
n->Address = (uintptr_t)Address;
n->Length = Count * PAGE_SIZE;
n->Operator = &mem_op;
}
}
AllocatedPagesList.push_back({Address, Count});
/* For security reasons, we clear the memory
if the page is user accessible. */
if (User)
memset(Address, 0, Count * PAGE_SIZE);
return Address;
}
void MemMgr::FreePages(void *Address, size_t Count)
{
for (size_t i = 0; i < AllocatedPagesList.size(); i++)
if (AllocatedPagesList[i].Address == Address)
{
/** TODO: Advanced checks. Allow if the page count is less than the requested one.
* This will allow the user to free only a part of the allocated pages.
*
* But this will be in a separate function because we need to specify if we
* want to free from the start or from the end and return the new address.
*/
if (AllocatedPagesList[i].PageCount != Count)
{
error("Page count mismatch! (Allocated: %lld, Requested: %lld)", AllocatedPagesList[i].PageCount, Count);
return;
}
KernelAllocator.FreePages(Address, Count);
for (size_t i = 0; i < Count; i++)
Memory::Virtual(this->PageTable).Remap((void *)((uintptr_t)Address + (i * PAGE_SIZE)), (void *)((uint64_t)Address + (i * PAGE_SIZE)), Memory::PTFlag::RW);
// Memory::Virtual(this->PageTable).Unmap((void *)((uintptr_t)Address + (i * PAGE_SIZE)));
if (this->Directory)
{
char FileName[64];
sprintf(FileName, "%lx-%ld", (uintptr_t)Address, Count);
VirtualFileSystem::FileStatus s = vfs->Delete(FileName, false, this->Directory);
if (s != VirtualFileSystem::FileStatus::OK)
error("Failed to delete file %s", FileName);
}
AllocatedPagesList.remove(i);
return;
}
}
void MemMgr::DetachAddress(void *Address)
{
for (size_t i = 0; i < AllocatedPagesList.size(); i++)
if (AllocatedPagesList[i].Address == Address)
{
if (this->Directory)
{
char FileName[64];
sprintf(FileName, "%lx-%ld", (uintptr_t)Address, AllocatedPagesList[i].PageCount);
VirtualFileSystem::FileStatus s = vfs->Delete(FileName, false, this->Directory);
if (s != VirtualFileSystem::FileStatus::OK)
error("Failed to delete file %s", FileName);
}
AllocatedPagesList.remove(i);
return;
}
}
MemMgr::MemMgr(PageTable4 *PageTable, VirtualFileSystem::Node *Directory)
{
if (PageTable)
this->PageTable = PageTable;
else
#if defined(a64)
this->PageTable = (PageTable4 *)CPU::x64::readcr3().raw;
#elif defined(a32)
this->PageTable = (PageTable4 *)CPU::x32::readcr3().raw;
#endif
this->Directory = Directory;
debug("+ %#lx", this);
}
MemMgr::~MemMgr()
{
foreach (auto ap in AllocatedPagesList)
{
KernelAllocator.FreePages(ap.Address, ap.PageCount);
for (size_t i = 0; i < ap.PageCount; i++)
Memory::Virtual(this->PageTable).Remap((void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)), (void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)), Memory::PTFlag::RW);
}
if (this->Directory)
foreach (auto Child in this->Directory->Children)
vfs->Delete(Child, true);
debug("- %#lx", this);
}
}

View File

@ -0,0 +1,28 @@
#include <memory.hpp>
namespace Memory
{
Virtual::PageMapIndexer::PageMapIndexer(uintptr_t VirtualAddress)
{
#if defined(a64)
uintptr_t Address = VirtualAddress;
Address >>= 12;
this->PTEIndex = Address & 0x1FF;
Address >>= 9;
this->PDEIndex = Address & 0x1FF;
Address >>= 9;
this->PDPTEIndex = Address & 0x1FF;
Address >>= 9;
this->PMLIndex = Address & 0x1FF;
#elif defined(a32)
uintptr_t Address = VirtualAddress;
Address >>= 12;
this->PTEIndex = Address & 0x3FF;
Address >>= 10;
this->PDEIndex = Address & 0x3FF;
Address >>= 10;
this->PDPTEIndex = Address & 0x3FF;
#elif defined(aa64)
#endif
}
}

View File

@ -0,0 +1,385 @@
#include <memory.hpp>
#include <debug.h>
#ifdef DEBUG
#include <uart.hpp>
#endif
#include "../../kernel.h"
extern "C" char BootPageTable[]; // 0x10000 in length
namespace Memory
{
uint64_t Physical::GetTotalMemory()
{
SmartLock(this->MemoryLock);
return this->TotalMemory;
}
uint64_t Physical::GetFreeMemory()
{
SmartLock(this->MemoryLock);
return this->FreeMemory;
}
uint64_t Physical::GetReservedMemory()
{
SmartLock(this->MemoryLock);
return this->ReservedMemory;
}
uint64_t Physical::GetUsedMemory()
{
SmartLock(this->MemoryLock);
return this->UsedMemory;
}
bool Physical::SwapPage(void *Address)
{
fixme("%p", Address);
return false;
}
bool Physical::SwapPages(void *Address, size_t PageCount)
{
for (size_t i = 0; i < PageCount; i++)
if (!this->SwapPage((void *)((uintptr_t)Address + (i * PAGE_SIZE))))
return false;
return false;
}
bool Physical::UnswapPage(void *Address)
{
fixme("%p", Address);
return false;
}
bool Physical::UnswapPages(void *Address, size_t PageCount)
{
for (size_t i = 0; i < PageCount; i++)
if (!this->UnswapPage((void *)((uintptr_t)Address + (i * PAGE_SIZE))))
return false;
return false;
}
void *Physical::RequestPage()
{
SmartLock(this->MemoryLock);
for (; PageBitmapIndex < PageBitmap.Size * 8; PageBitmapIndex++)
{
if (PageBitmap[PageBitmapIndex] == true)
continue;
this->LockPage((void *)(PageBitmapIndex * PAGE_SIZE));
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
if (this->SwapPage((void *)(PageBitmapIndex * PAGE_SIZE)))
{
this->LockPage((void *)(PageBitmapIndex * PAGE_SIZE));
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "RequestPage( )=%p~%p\n\r",
(void *)(PageBitmapIndex * PAGE_SIZE), __builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
error("Out of memory! (Free: %ldMB; Used: %ldMB; Reserved: %ldMB)", TO_MB(FreeMemory), TO_MB(UsedMemory), TO_MB(ReservedMemory));
CPU::Stop();
__builtin_unreachable();
}
void *Physical::RequestPages(size_t Count)
{
SmartLock(this->MemoryLock);
for (; PageBitmapIndex < PageBitmap.Size * 8; PageBitmapIndex++)
{
if (PageBitmap[PageBitmapIndex] == true)
continue;
for (uint64_t Index = PageBitmapIndex; Index < PageBitmap.Size * 8; Index++)
{
if (PageBitmap[Index] == true)
continue;
for (size_t i = 0; i < Count; i++)
if (PageBitmap[Index + i] == true)
goto NextPage;
this->LockPages((void *)(Index * PAGE_SIZE), Count);
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "RequestPages( %ld )=%p~%p\n\r",
Count,
(void *)(Index * PAGE_SIZE), __builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
return (void *)(Index * PAGE_SIZE);
NextPage:
Index += Count;
continue;
}
}
if (this->SwapPages((void *)(PageBitmapIndex * PAGE_SIZE), Count))
{
this->LockPages((void *)(PageBitmapIndex * PAGE_SIZE), Count);
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "RequestPages( %ld )=%p~%p\n\r",
Count,
(void *)(PageBitmapIndex * PAGE_SIZE), __builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
error("Out of memory! (Free: %ldMB; Used: %ldMB; Reserved: %ldMB)", TO_MB(FreeMemory), TO_MB(UsedMemory), TO_MB(ReservedMemory));
CPU::Halt(true);
__builtin_unreachable();
}
void Physical::FreePage(void *Address)
{
SmartLock(this->MemoryLock);
if (unlikely(Address == nullptr))
{
warn("Null pointer passed to FreePage.");
return;
}
size_t Index = (size_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == false))
return;
if (PageBitmap.Set(Index, false))
{
FreeMemory += PAGE_SIZE;
UsedMemory -= PAGE_SIZE;
if (PageBitmapIndex > Index)
PageBitmapIndex = Index;
}
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "FreePage( %p )~%p\n\r",
Address,
__builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
}
void Physical::FreePages(void *Address, size_t Count)
{
if (unlikely(Address == nullptr || Count == 0))
{
warn("%s%s%s passed to FreePages.", Address == nullptr ? "Null pointer " : "", Address == nullptr && Count == 0 ? "and " : "", Count == 0 ? "Zero count" : "");
return;
}
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "!FreePages( %p %ld )~%p\n\r",
Address, Count,
__builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
for (size_t t = 0; t < Count; t++)
this->FreePage((void *)((uintptr_t)Address + (t * PAGE_SIZE)));
}
void Physical::LockPage(void *Address)
{
if (unlikely(Address == nullptr))
warn("Trying to lock null address.");
uintptr_t Index = (uintptr_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == true))
return;
if (PageBitmap.Set(Index, true))
{
FreeMemory -= PAGE_SIZE;
UsedMemory += PAGE_SIZE;
}
}
void Physical::LockPages(void *Address, size_t PageCount)
{
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to lock %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
for (size_t i = 0; i < PageCount; i++)
this->LockPage((void *)((uintptr_t)Address + (i * PAGE_SIZE)));
}
void Physical::ReservePage(void *Address)
{
if (unlikely(Address == nullptr))
warn("Trying to reserve null address.");
uintptr_t Index = (uintptr_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == true))
return;
if (PageBitmap.Set(Index, true))
{
FreeMemory -= PAGE_SIZE;
ReservedMemory += PAGE_SIZE;
}
}
void Physical::ReservePages(void *Address, size_t PageCount)
{
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to reserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
for (size_t t = 0; t < PageCount; t++)
this->ReservePage((void *)((uintptr_t)Address + (t * PAGE_SIZE)));
}
void Physical::UnreservePage(void *Address)
{
if (unlikely(Address == nullptr))
warn("Trying to unreserve null address.");
uintptr_t Index = (uintptr_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == false))
return;
if (PageBitmap.Set(Index, false))
{
FreeMemory += PAGE_SIZE;
ReservedMemory -= PAGE_SIZE;
if (PageBitmapIndex > Index)
PageBitmapIndex = Index;
}
}
void Physical::UnreservePages(void *Address, size_t PageCount)
{
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to unreserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
for (size_t t = 0; t < PageCount; t++)
this->UnreservePage((void *)((uintptr_t)Address + (t * PAGE_SIZE)));
}
void Physical::Init(BootInfo *Info)
{
SmartLock(this->MemoryLock);
void *LargestFreeMemorySegment = nullptr;
uint64_t LargestFreeMemorySegmentSize = 0;
uint64_t MemorySize = Info->Memory.Size;
for (uint64_t i = 0; i < Info->Memory.Entries; i++)
if (Info->Memory.Entry[i].Type == Usable)
if (Info->Memory.Entry[i].Length > LargestFreeMemorySegmentSize)
{
// We don't want to use 0 as a memory address.
if (Info->Memory.Entry[i].BaseAddress == nullptr)
continue;
LargestFreeMemorySegment = (void *)Info->Memory.Entry[i].BaseAddress;
LargestFreeMemorySegmentSize = Info->Memory.Entry[i].Length;
debug("Largest free memory segment: %llp (%lldMB)",
(void *)Info->Memory.Entry[i].BaseAddress,
TO_MB(Info->Memory.Entry[i].Length));
}
TotalMemory = MemorySize;
FreeMemory = MemorySize;
if (LargestFreeMemorySegment == nullptr)
{
error("No free memory found!");
CPU::Stop();
}
size_t BitmapSize = (MemorySize / PAGE_SIZE) / 8 + 1;
trace("Initializing Bitmap at %llp-%llp (%lld Bytes)",
LargestFreeMemorySegment,
(void *)((uintptr_t)LargestFreeMemorySegment + BitmapSize),
BitmapSize);
PageBitmap.Size = BitmapSize;
PageBitmap.Buffer = (uint8_t *)LargestFreeMemorySegment;
for (size_t i = 0; i < BitmapSize; i++)
*(uint8_t *)(PageBitmap.Buffer + i) = 0;
trace("Reserving pages...");
for (uint64_t i = 0; i < Info->Memory.Entries; i++)
if (Info->Memory.Entry[i].Type != Usable)
this->ReservePages((void *)Info->Memory.Entry[i].BaseAddress, Info->Memory.Entry[i].Length / PAGE_SIZE + 1);
this->ReservePages(0, 0x100);
this->ReservePages(BootPageTable, TO_PAGES(0x10000));
trace("Locking bitmap pages...");
this->LockPages(PageBitmap.Buffer, PageBitmap.Size / PAGE_SIZE + 1);
}
Physical::Physical() {}
Physical::~Physical() {}
}

View File

@ -0,0 +1,83 @@
#include <memory.hpp>
#include <debug.h>
namespace Memory
{
StackGuard::StackGuard(bool User, PageTable4 *Table)
{
this->UserMode = User;
this->Table = Table;
if (this->UserMode)
{
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE));
debug("AllocatedStack: %p", AllocatedStack);
memset(AllocatedStack, 0, USER_STACK_SIZE);
for (size_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
{
Virtual(Table).Map((void *)(USER_STACK_BASE + (i * PAGE_SIZE)),
(void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE)),
PTFlag::RW | PTFlag::US);
debug("Mapped %p to %p", (void *)(USER_STACK_BASE + (i * PAGE_SIZE)),
(void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE)));
}
this->StackBottom = (void *)USER_STACK_BASE;
this->StackTop = (void *)(USER_STACK_BASE + USER_STACK_SIZE);
this->StackPhyiscalBottom = AllocatedStack;
this->StackPhyiscalTop = (void *)((uintptr_t)AllocatedStack + USER_STACK_SIZE);
this->Size = USER_STACK_SIZE;
}
else
{
this->StackBottom = KernelAllocator.RequestPages(TO_PAGES(STACK_SIZE));
this->StackPhyiscalBottom = this->StackBottom;
debug("StackBottom: %p", this->StackBottom);
memset(this->StackBottom, 0, STACK_SIZE);
this->StackTop = (void *)((uintptr_t)this->StackBottom + STACK_SIZE);
this->StackPhyiscalTop = this->StackTop;
this->Size = STACK_SIZE;
}
trace("Allocated stack at %p", this->StackBottom);
}
StackGuard::~StackGuard()
{
fixme("Temporarily disabled stack guard deallocation");
// KernelAllocator.FreePages(this->StackBottom, TO_PAGES(this->Size));
// debug("Freed stack at %p", this->StackBottom);
}
bool StackGuard::Expand(uintptr_t FaultAddress)
{
if (this->UserMode)
{
if (FaultAddress < (uintptr_t)this->StackBottom - USER_STACK_SIZE ||
FaultAddress > (uintptr_t)this->StackTop)
{
return false; // It's not about the stack.
}
else
{
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE));
debug("AllocatedStack: %p", AllocatedStack);
memset(AllocatedStack, 0, USER_STACK_SIZE);
for (uintptr_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
{
Virtual(this->Table).Map((void *)((uintptr_t)this->StackBottom - (i * PAGE_SIZE)), (void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE)), PTFlag::RW | PTFlag::US);
debug("Mapped %p to %p", (void *)((uintptr_t)this->StackBottom - (i * PAGE_SIZE)), (void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE)));
}
this->StackBottom = (void *)((uintptr_t)this->StackBottom - USER_STACK_SIZE);
this->Size += USER_STACK_SIZE;
info("Stack expanded to %p", this->StackBottom);
return true;
}
}
else
{
fixme("Not implemented and probably not needed");
return false;
}
error("Reached end of function! How?");
return false;
}
}

View File

@ -0,0 +1,219 @@
#include <memory.hpp>
#include <convert.h>
#include <debug.h>
namespace Memory
{
bool Virtual::Check(void *VirtualAddress, PTFlag Flag)
{
// 0x1000 aligned
uintptr_t Address = (uintptr_t)VirtualAddress;
Address &= 0xFFFFFFFFFFFFF000;
PageMapIndexer Index = PageMapIndexer(Address);
PageMapLevel4 PML4 = this->Table->Entries[Index.PMLIndex];
PageDirectoryPointerTableEntryPtr *PDPTE = nullptr;
PageDirectoryEntryPtr *PDE = nullptr;
PageTableEntryPtr *PTE = nullptr;
if ((PML4.raw & Flag) > 0)
{
PDPTE = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4.GetAddress() << 12);
if (PDPTE)
if ((PDPTE->Entries[Index.PDPTEIndex].Present))
{
PDE = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE->Entries[Index.PDPTEIndex].GetAddress() << 12);
if (PDE)
if ((PDE->Entries[Index.PDEIndex].Present))
{
PTE = (PageTableEntryPtr *)((uintptr_t)PDE->Entries[Index.PDEIndex].GetAddress() << 12);
if (PTE)
if ((PTE->Entries[Index.PTEIndex].Present))
{
return true;
}
}
}
}
return false;
}
void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags)
{
SmartLock(this->MemoryLock);
if (unlikely(!this->Table))
{
error("No page table");
return;
}
PageMapIndexer Index = PageMapIndexer((uintptr_t)VirtualAddress);
// Clear any flags that are not 1 << 0 (Present) - 1 << 5 (Accessed) because rest are for page table entries only
uint64_t DirectoryFlags = Flags & 0x3F;
PageMapLevel4 PML4 = this->Table->Entries[Index.PMLIndex];
PageDirectoryPointerTableEntryPtr *PDPTEPtr = nullptr;
if (!PML4.Present)
{
PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)KernelAllocator.RequestPage();
memset(PDPTEPtr, 0, PAGE_SIZE);
PML4.Present = true;
PML4.SetAddress((uintptr_t)PDPTEPtr >> 12);
}
else
PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4.GetAddress() << 12);
PML4.raw |= DirectoryFlags;
this->Table->Entries[Index.PMLIndex] = PML4;
PageDirectoryPointerTableEntry PDPTE = PDPTEPtr->Entries[Index.PDPTEIndex];
PageDirectoryEntryPtr *PDEPtr = nullptr;
if (!PDPTE.Present)
{
PDEPtr = (PageDirectoryEntryPtr *)KernelAllocator.RequestPage();
memset(PDEPtr, 0, PAGE_SIZE);
PDPTE.Present = true;
PDPTE.SetAddress((uintptr_t)PDEPtr >> 12);
}
else
PDEPtr = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE.GetAddress() << 12);
PDPTE.raw |= DirectoryFlags;
PDPTEPtr->Entries[Index.PDPTEIndex] = PDPTE;
PageDirectoryEntry PDE = PDEPtr->Entries[Index.PDEIndex];
PageTableEntryPtr *PTEPtr = nullptr;
if (!PDE.Present)
{
PTEPtr = (PageTableEntryPtr *)KernelAllocator.RequestPage();
memset(PTEPtr, 0, PAGE_SIZE);
PDE.Present = true;
PDE.SetAddress((uintptr_t)PTEPtr >> 12);
}
else
PTEPtr = (PageTableEntryPtr *)((uintptr_t)PDE.GetAddress() << 12);
PDE.raw |= DirectoryFlags;
PDEPtr->Entries[Index.PDEIndex] = PDE;
PageTableEntry PTE = PTEPtr->Entries[Index.PTEIndex];
PTE.Present = true;
PTE.raw |= Flags;
PTE.SetAddress((uintptr_t)PhysicalAddress >> 12);
PTEPtr->Entries[Index.PTEIndex] = PTE;
#if defined(a64)
CPU::x64::invlpg(VirtualAddress);
#elif defined(a32)
CPU::x32::invlpg(VirtualAddress);
#elif defined(aa64)
asmv("dsb sy");
asmv("tlbi vae1is, %0"
:
: "r"(VirtualAddress)
: "memory");
asmv("dsb sy");
asmv("isb");
#endif
#ifdef DEBUG
/* https://stackoverflow.com/a/3208376/9352057 */
#define BYTE_TO_BINARY_PATTERN "%c%c%c%c%c%c%c%c"
#define BYTE_TO_BINARY(byte) \
(byte & 0x80 ? '1' : '0'), \
(byte & 0x40 ? '1' : '0'), \
(byte & 0x20 ? '1' : '0'), \
(byte & 0x10 ? '1' : '0'), \
(byte & 0x08 ? '1' : '0'), \
(byte & 0x04 ? '1' : '0'), \
(byte & 0x02 ? '1' : '0'), \
(byte & 0x01 ? '1' : '0')
if (!this->Check(VirtualAddress, (PTFlag)Flags)) // quick workaround just to see where it fails
warn("Failed to map %#lx - %#lx with flags: " BYTE_TO_BINARY_PATTERN, VirtualAddress, PhysicalAddress, BYTE_TO_BINARY(Flags));
#endif
}
void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, size_t PageCount, uint64_t Flags)
{
for (size_t i = 0; i < PageCount; i++)
this->Map((void *)((uintptr_t)VirtualAddress + (i * PAGE_SIZE)), (void *)((uintptr_t)PhysicalAddress + (i * PAGE_SIZE)), Flags);
}
void Virtual::Unmap(void *VirtualAddress)
{
SmartLock(this->MemoryLock);
if (!this->Table)
{
error("No page table");
return;
}
PageMapIndexer Index = PageMapIndexer((uintptr_t)VirtualAddress);
PageMapLevel4 PML4 = this->Table->Entries[Index.PMLIndex];
if (!PML4.Present)
{
error("Page not present");
return;
}
PageDirectoryPointerTableEntryPtr *PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4.Address << 12);
PageDirectoryPointerTableEntry PDPTE = PDPTEPtr->Entries[Index.PDPTEIndex];
if (!PDPTE.Present)
{
error("Page not present");
return;
}
PageDirectoryEntryPtr *PDEPtr = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE.Address << 12);
PageDirectoryEntry PDE = PDEPtr->Entries[Index.PDEIndex];
if (!PDE.Present)
{
error("Page not present");
return;
}
PageTableEntryPtr *PTEPtr = (PageTableEntryPtr *)((uintptr_t)PDE.Address << 12);
PageTableEntry PTE = PTEPtr->Entries[Index.PTEIndex];
if (!PTE.Present)
{
error("Page not present");
return;
}
PTE.Present = false;
PTEPtr->Entries[Index.PTEIndex] = PTE;
#if defined(a64)
CPU::x64::invlpg(VirtualAddress);
#elif defined(a32)
CPU::x32::invlpg(VirtualAddress);
#elif defined(aa64)
asmv("dsb sy");
asmv("tlbi vae1is, %0"
:
: "r"(VirtualAddress)
: "memory");
asmv("dsb sy");
asmv("isb");
#endif
}
void Virtual::Unmap(void *VirtualAddress, size_t PageCount)
{
for (size_t i = 0; i < PageCount; i++)
this->Unmap((void *)((uintptr_t)VirtualAddress + (i * PAGE_SIZE)));
}
void Virtual::Remap(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags)
{
this->Unmap(VirtualAddress);
this->Map(VirtualAddress, PhysicalAddress, Flags);
}
Virtual::Virtual(PageTable4 *Table)
{
if (Table)
this->Table = Table;
else
this->Table = (PageTable4 *)CPU::PageTable();
}
Virtual::~Virtual() {}
}