Kernel/multiboot2_64

This commit is contained in:
EnderIce2
2024-11-20 05:16:57 +02:00
parent b4c0a78c6c
commit c4e458fcc6
189 changed files with 0 additions and 37283 deletions

View File

@ -1,285 +0,0 @@
#include "Xalloc.hpp"
namespace Xalloc
{
class XLockClass
{
struct SpinLockData
{
uint64_t LockData = 0x0;
const char *CurrentHolder = "(nul)";
const char *AttemptingToGet = "(nul)";
uint64_t Count = 0;
};
void DeadLock(SpinLockData Lock)
{
Xalloc_warn("Potential deadlock in lock '%s' held by '%s'! %ld locks in queue.", Lock.AttemptingToGet, Lock.CurrentHolder, Lock.Count);
}
private:
SpinLockData LockData;
bool IsLocked = false;
public:
int Lock(const char *FunctionName)
{
LockData.AttemptingToGet = FunctionName;
Retry:
unsigned int i = 0;
while (__atomic_exchange_n(&IsLocked, true, __ATOMIC_ACQUIRE) && ++i < 0x10000000)
;
if (i >= 0x10000000)
{
DeadLock(LockData);
goto Retry;
}
LockData.Count++;
LockData.CurrentHolder = FunctionName;
__sync_synchronize();
return 0;
}
int Unlock()
{
__sync_synchronize();
__atomic_store_n(&IsLocked, false, __ATOMIC_RELEASE);
LockData.Count--;
IsLocked = false;
return 0;
}
};
class XSmartLock
{
private:
XLockClass *LockPointer = nullptr;
public:
XSmartLock(XLockClass &Lock, const char *FunctionName)
{
this->LockPointer = &Lock;
this->LockPointer->Lock(FunctionName);
}
~XSmartLock() { this->LockPointer->Unlock(); }
};
XLockClass XLock;
#define XSL XSmartLock CONCAT(lock##_, __COUNTER__)(XLock, __FUNCTION__)
class SmartSMAPClass
{
private:
AllocatorV1 *allocator = nullptr;
public:
SmartSMAPClass(AllocatorV1 *allocator)
{
this->allocator = allocator;
this->allocator->Xstac();
}
~SmartSMAPClass() { this->allocator->Xclac(); }
};
#define SmartSMAP SmartSMAPClass XALLOC_CONCAT(SmartSMAP##_, __COUNTER__)(this)
AllocatorV1::AllocatorV1(void *Address, bool UserMode, bool SMAPEnabled)
{
SmartSMAP;
XSL;
void *Position = Address;
UserMapping = UserMode;
SMAPUsed = SMAPEnabled;
for (Xuint64_t i = 0; i < 0x20; i++)
{
void *Page = Xalloc_REQUEST_PAGE();
if (UserMapping)
Xalloc_MAP_MEMORY(Position, Page, Xalloc_MAP_MEMORY_READ_WRITE | Xalloc_MAP_MEMORY_USER);
else
Xalloc_MAP_MEMORY(Position, Page, Xalloc_MAP_MEMORY_READ_WRITE);
Xalloc_trace("Preallocate Heap Memory (%#llx-%#llx [%#llx])...", Position, (Xuint64_t)Position + Xalloc_PAGE_SIZE, Page);
Position = (void *)((Xuint64_t)Position + Xalloc_PAGE_SIZE);
}
Xuint64_t HeapLength = 16 * Xalloc_PAGE_SIZE;
this->HeapStart = Address;
this->HeapEnd = (void *)((Xuint64_t)this->HeapStart + HeapLength);
HeapSegment *StartSegment = (HeapSegment *)Address;
StartSegment->Length = HeapLength - sizeof(HeapSegment);
StartSegment->Next = nullptr;
StartSegment->Last = nullptr;
StartSegment->IsFree = true;
this->LastSegment = StartSegment;
}
AllocatorV1::~AllocatorV1()
{
SmartSMAP;
XSL;
Xalloc_trace("Destructor not implemented yet.");
}
void AllocatorV1::ExpandHeap(Xuint64_t Length)
{
if (Length % Xalloc_PAGE_SIZE)
{
Length -= Length % Xalloc_PAGE_SIZE;
Length += Xalloc_PAGE_SIZE;
}
Xuint64_t PageCount = Length / Xalloc_PAGE_SIZE;
HeapSegment *NewSegment = (HeapSegment *)this->HeapEnd;
for (Xuint64_t i = 0; i < PageCount; i++)
{
void *Page = Xalloc_REQUEST_PAGE();
if (UserMapping)
Xalloc_MAP_MEMORY(this->HeapEnd, Page, Xalloc_MAP_MEMORY_READ_WRITE | Xalloc_MAP_MEMORY_USER);
else
Xalloc_MAP_MEMORY(this->HeapEnd, Page, Xalloc_MAP_MEMORY_READ_WRITE);
// Xalloc_trace("Expanding Heap Memory (%#llx-%#llx [%#llx])...", this->HeapEnd, (Xuint64_t)this->HeapEnd + Xalloc_PAGE_SIZE, Page);
this->HeapEnd = (void *)((Xuint64_t)this->HeapEnd + Xalloc_PAGE_SIZE);
}
NewSegment->IsFree = true;
NewSegment->Last = this->LastSegment;
this->LastSegment->Next = NewSegment;
this->LastSegment = NewSegment;
NewSegment->Next = nullptr;
NewSegment->Length = Length - sizeof(HeapSegment);
NewSegment->CombineBackward(this->LastSegment);
}
void *AllocatorV1::Malloc(Xuint64_t Size)
{
SmartSMAP;
XSL;
if (this->HeapStart == nullptr)
{
Xalloc_err("Memory allocation not initialized yet!");
return 0;
}
if (Size < 0x10)
{
// Xalloc_warn("Allocation size is too small, using 0x10 instead!");
Size = 0x10;
}
// #ifdef DEBUG
// if (Size < 1024)
// debug("Allocating %dB", Size);
// else if (TO_KB(Size) < 1024)
// debug("Allocating %dKB", TO_KB(Size));
// else if (TO_MB(Size) < 1024)
// debug("Allocating %dMB", TO_MB(Size));
// else if (TO_GB(Size) < 1024)
// debug("Allocating %dGB", TO_GB(Size));
// #endif
if (Size % 0x10 > 0) // it is not a multiple of 0x10
{
Size -= (Size % 0x10);
Size += 0x10;
}
if (Size == 0)
{
return nullptr;
}
HeapSegment *CurrentSegment = (HeapSegment *)this->HeapStart;
while (true)
{
if (CurrentSegment->IsFree)
{
if (CurrentSegment->Length > Size)
{
CurrentSegment->Split(Size, this->LastSegment);
CurrentSegment->IsFree = false;
return (void *)((Xuint64_t)CurrentSegment + sizeof(HeapSegment));
}
if (CurrentSegment->Length == Size)
{
CurrentSegment->IsFree = false;
return (void *)((Xuint64_t)CurrentSegment + sizeof(HeapSegment));
}
}
if (CurrentSegment->Next == nullptr)
break;
CurrentSegment = CurrentSegment->Next;
}
ExpandHeap(Size);
XLock.Unlock();
return this->Malloc(Size);
}
void AllocatorV1::Free(void *Address)
{
SmartSMAP;
XSL;
if (this->HeapStart == nullptr)
{
Xalloc_err("Memory allocation not initialized yet!");
return;
}
HeapSegment *Segment = (HeapSegment *)Address - 1;
Segment->IsFree = true;
Segment->CombineForward(this->LastSegment);
Segment->CombineBackward(this->LastSegment);
}
void *AllocatorV1::Calloc(Xuint64_t NumberOfBlocks, Xuint64_t Size)
{
SmartSMAP;
XSL;
if (this->HeapStart == nullptr)
{
Xalloc_err("Memory allocation not initialized yet!");
return 0;
}
if (Size < 0x10)
{
// Xalloc_warn("Allocation size is too small, using 0x10 instead!");
Size = 0x10;
}
XLock.Unlock();
void *Block = this->Malloc(NumberOfBlocks * Size);
XLock.Lock(__FUNCTION__);
if (Block)
Xmemset(Block, 0, NumberOfBlocks * Size);
return Block;
}
void *AllocatorV1::Realloc(void *Address, Xuint64_t Size)
{
SmartSMAP;
XSL;
if (this->HeapStart == nullptr)
{
Xalloc_err("Memory allocation not initialized yet!");
return 0;
}
if (!Address && Size == 0)
{
XLock.Unlock();
this->Free(Address);
return nullptr;
}
else if (!Address)
{
XLock.Unlock();
return this->Calloc(Size, sizeof(char));
}
if (Size < 0x10)
{
// Xalloc_warn("Allocation size is too small, using 0x10 instead!");
Size = 0x10;
}
XLock.Unlock();
void *newAddress = this->Calloc(Size, sizeof(char));
XLock.Lock(__FUNCTION__);
Xmemcpy(newAddress, Address, Size);
return newAddress;
}
}

View File

@ -1,180 +0,0 @@
#pragma once
#include <memory.hpp>
#include <debug.h>
// Functions defines
// Page allocation functions
#define Xalloc_REQUEST_PAGE() KernelAllocator.RequestPage()
#define Xalloc_REQUEST_PAGES(Pages) KernelAllocator.RequestPages(Pages)
#define Xalloc_FREE_PAGE(Address) KernelAllocator.FreePage(Address)
#define Xalloc_FREE_PAGES(Address, Pages) KernelAllocator.FreePages(Address, Pages)
#define Xalloc_MAP_MEMORY(VirtualAddress, PhysicalAddress, Flags) Memory::Virtual(KernelPageTable).Map(VirtualAddress, PhysicalAddress, Flags)
#define Xalloc_UNMAP_MEMORY(VirtualAddress) Memory::Virtual(KernelPageTable).Unmap(VirtualAddress)
#define Xalloc_MAP_MEMORY_READ_WRITE Memory::PTFlag::RW
#define Xalloc_MAP_MEMORY_USER Memory::PTFlag::US
#define Xalloc_PAGE_SIZE PAGE_SIZE
#define Xalloc_trace(m, ...) trace(m, ##__VA_ARGS__)
#define Xalloc_warn(m, ...) warn(m, ##__VA_ARGS__)
#define Xalloc_err(m, ...) error(m, ##__VA_ARGS__)
#define XALLOC_CONCAT(x, y) x##y
typedef long unsigned Xuint64_t;
namespace Xalloc
{
class AllocatorV1
{
private:
struct HeapSegment
{
Xuint64_t Length;
HeapSegment *Next;
HeapSegment *Last;
bool IsFree;
HeapSegment *Split(Xuint64_t SplitLength, HeapSegment *LastSegment)
{
if (SplitLength < 0x10)
return nullptr;
int64_t SplitSegmentLength = Length - SplitLength - (sizeof(HeapSegment));
if (SplitSegmentLength < 0x10)
return nullptr;
HeapSegment *NewSplitHdr = (HeapSegment *)((Xuint64_t)this + SplitLength + sizeof(HeapSegment));
Next->Last = NewSplitHdr;
NewSplitHdr->Next = Next;
Next = NewSplitHdr;
NewSplitHdr->Last = this;
NewSplitHdr->Length = SplitSegmentLength;
NewSplitHdr->IsFree = IsFree;
Length = SplitLength;
if (LastSegment == this)
LastSegment = NewSplitHdr;
return NewSplitHdr;
}
void CombineForward(HeapSegment *LastSegment)
{
if (Next == nullptr)
return;
if (Next->IsFree == false)
return;
if (Next == LastSegment)
LastSegment = this;
if (Next->Next != nullptr)
Next->Next->Last = this;
Length = Length + Next->Length + sizeof(HeapSegment);
Next = Next->Next;
}
void CombineBackward(HeapSegment *LastSegment)
{
if (Last != nullptr && Last->IsFree)
Last->CombineForward(LastSegment);
}
} __attribute__((aligned(16)));
void *HeapStart = nullptr;
void *HeapEnd = nullptr;
HeapSegment *LastSegment = nullptr;
bool UserMapping = false;
bool SMAPUsed = false;
void ExpandHeap(Xuint64_t Length);
// TODO: Change memcpy with an optimized version
static inline void *Xmemcpy(void *__restrict__ Destination, const void *__restrict__ Source, Xuint64_t Length)
{
unsigned char *dst = (unsigned char *)Destination;
const unsigned char *src = (const unsigned char *)Source;
for (Xuint64_t i = 0; i < Length; i++)
dst[i] = src[i];
return Destination;
}
// TODO: Change memset with an optimized version
static inline void *Xmemset(void *__restrict__ Destination, int Data, Xuint64_t Length)
{
unsigned char *Buffer = (unsigned char *)Destination;
for (Xuint64_t i = 0; i < Length; i++)
Buffer[i] = (unsigned char)Data;
return Destination;
}
public:
inline void Xstac()
{
if (this->SMAPUsed)
{
#if defined(__amd64__) || defined(__i386__)
asm volatile("stac" ::
: "cc");
#endif
}
}
inline void Xclac()
{
if (this->SMAPUsed)
{
#if defined(__amd64__) || defined(__i386__)
asm volatile("clac" ::
: "cc");
#endif
}
}
/**
* @brief Construct a new Allocator V1 object
*
* @param Address Virtual address to allocate.
* @param UserMode Map the new pages with USER flag?
* @param SMAPEnabled Does the kernel has Supervisor Mode Access Prevention enabled?
*/
AllocatorV1(void *Address, bool UserMode, bool SMAPEnabled);
/**
* @brief Destroy the Allocator V 1 object
*
*/
~AllocatorV1();
/**
* @brief Allocate a new memory block
*
* @param Size Size of the block to allocate.
* @return void* Pointer to the allocated block.
*/
void *Malloc(Xuint64_t Size);
/**
* @brief Free a previously allocated block
*
* @param Address Address of the block to free.
*/
void Free(void *Address);
/**
* @brief Allocate a new memory block
*
* @param NumberOfBlocks Number of blocks to allocate.
* @param Size Size of the block to allocate.
* @return void* Pointer to the allocated block.
*/
void *Calloc(Xuint64_t NumberOfBlocks, Xuint64_t Size);
/**
* @brief Reallocate a previously allocated block
*
* @param Address Address of the block to reallocate.
* @param Size New size of the block.
* @return void* Pointer to the reallocated block.
*/
void *Realloc(void *Address, Xuint64_t Size);
};
}

View File

@ -1,340 +0,0 @@
#include <memory.hpp>
#include <convert.h>
#include <debug.h>
#include "HeapAllocators/Xalloc.hpp"
#include "../Library/liballoc_1_1.h"
using namespace Memory;
Physical KernelAllocator;
PageTable4 *KernelPageTable = nullptr;
PageTable4 *UserspaceKernelOnlyPageTable = nullptr;
static MemoryAllocatorType AllocatorType = MemoryAllocatorType::None;
Xalloc::AllocatorV1 *XallocV1Allocator = nullptr;
#ifdef DEBUG
__no_instrument_function void tracepagetable(PageTable4 *pt)
{
for (int i = 0; i < 512; i++)
{
#if defined(__amd64__)
if (pt->Entries[i].Present)
debug("Entry %03d: %x %x %x %x %x %x %x %p-%#llx", i,
pt->Entries[i].Present, pt->Entries[i].ReadWrite,
pt->Entries[i].UserSupervisor, pt->Entries[i].WriteThrough,
pt->Entries[i].CacheDisable, pt->Entries[i].Accessed,
pt->Entries[i].ExecuteDisable, pt->Entries[i].Address << 12,
pt->Entries[i]);
#elif defined(__i386__)
#elif defined(__aarch64__)
#endif
}
}
#endif
__no_instrument_function void MapFromZero(PageTable4 *PT, BootInfo *Info)
{
Virtual va = Virtual(PT);
uint64_t VirtualOffsetNormalVMA = NORMAL_VMA_OFFSET;
uint64_t MemSize = Info->Memory.Size;
for (uint64_t t = 0; t < MemSize; t += PAGE_SIZE)
{
va.Map((void *)t, (void *)t, PTFlag::RW /* | PTFlag::US */);
va.Map((void *)VirtualOffsetNormalVMA, (void *)t, PTFlag::RW /* | PTFlag::US */);
VirtualOffsetNormalVMA += PAGE_SIZE;
}
}
__no_instrument_function void MapFramebuffer(PageTable4 *PT, BootInfo *Info)
{
Virtual va = Virtual(PT);
int itrfb = 0;
while (1)
{
if (!Info->Framebuffer[itrfb].BaseAddress)
break;
for (uint64_t fb_base = (uint64_t)Info->Framebuffer[itrfb].BaseAddress;
fb_base < ((uint64_t)Info->Framebuffer[itrfb].BaseAddress + ((Info->Framebuffer[itrfb].Pitch * Info->Framebuffer[itrfb].Height) + PAGE_SIZE));
fb_base += PAGE_SIZE)
va.Map((void *)(fb_base + NORMAL_VMA_OFFSET), (void *)fb_base, PTFlag::RW | PTFlag::US | PTFlag::G);
itrfb++;
}
}
__no_instrument_function void MapKernel(PageTable4 *PT, BootInfo *Info)
{
/* KernelStart KernelTextEnd KernelRoDataEnd KernelEnd
Kernel Start & Text Start ------ Text End ------ Kernel Rodata End ------ Kernel Data End & Kernel End
*/
Virtual va = Virtual(PT);
uint64_t KernelStart = (uint64_t)&_kernel_start;
uint64_t KernelTextEnd = (uint64_t)&_kernel_text_end;
uint64_t KernelDataEnd = (uint64_t)&_kernel_data_end;
uint64_t KernelRoDataEnd = (uint64_t)&_kernel_rodata_end;
uint64_t KernelEnd = (uint64_t)&_kernel_end;
uint64_t BaseKernelMapAddress = (uint64_t)Info->Kernel.PhysicalBase;
uint64_t k;
for (k = KernelStart; k < KernelTextEnd; k += PAGE_SIZE)
{
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW);
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
for (k = KernelTextEnd; k < KernelDataEnd; k += PAGE_SIZE)
{
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
for (k = KernelDataEnd; k < KernelRoDataEnd; k += PAGE_SIZE)
{
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::P | PTFlag::G);
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
for (k = KernelRoDataEnd; k < KernelEnd; k += PAGE_SIZE)
{
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
debug("\nStart: %#llx - Text End: %#llx - RoEnd: %#llx - End: %#llx\nStart Physical: %#llx - End Physical: %#llx",
KernelStart, KernelTextEnd, KernelRoDataEnd, KernelEnd, Info->Kernel.PhysicalBase, BaseKernelMapAddress - PAGE_SIZE);
}
__no_instrument_function void InitializeMemoryManagement(BootInfo *Info)
{
#ifdef DEBUG
for (uint64_t i = 0; i < Info->Memory.Entries; i++)
{
uint64_t Base = reinterpret_cast<uint64_t>(Info->Memory.Entry[i].BaseAddress);
uint64_t Length = Info->Memory.Entry[i].Length;
uint64_t End = Base + Length;
const char *Type = "Unknown";
switch (Info->Memory.Entry[i].Type)
{
case likely(Usable):
Type = "Usable";
break;
case Reserved:
Type = "Reserved";
break;
case ACPIReclaimable:
Type = "ACPI Reclaimable";
break;
case ACPINVS:
Type = "ACPI NVS";
break;
case BadMemory:
Type = "Bad Memory";
break;
case BootloaderReclaimable:
Type = "Bootloader Reclaimable";
break;
case KernelAndModules:
Type = "Kernel and Modules";
break;
case Framebuffer:
Type = "Framebuffer";
break;
default:
break;
}
debug("%lld: %#016llx-%#016llx %s",
i,
Base,
End,
Type);
}
#endif
trace("Initializing Physical Memory Manager");
KernelAllocator = Physical();
KernelAllocator.Init(Info);
debug("Memory Info: %lldMB / %lldMB (%lldMB reserved)",
TO_MB(KernelAllocator.GetUsedMemory()),
TO_MB(KernelAllocator.GetTotalMemory()),
TO_MB(KernelAllocator.GetReservedMemory()));
AllocatorType = MemoryAllocatorType::Pages;
trace("Initializing Virtual Memory Manager");
KernelPageTable = (PageTable4 *)KernelAllocator.RequestPages(TO_PAGES(PAGE_SIZE));
memset(KernelPageTable, 0, PAGE_SIZE);
UserspaceKernelOnlyPageTable = (PageTable4 *)KernelAllocator.RequestPages(TO_PAGES(PAGE_SIZE));
memset(UserspaceKernelOnlyPageTable, 0, PAGE_SIZE);
debug("Mapping from 0x0 to %#llx", Info->Memory.Size);
MapFromZero(KernelPageTable, Info);
debug("Mapping from 0x0 %#llx for Userspace Page Table", Info->Memory.Size);
UserspaceKernelOnlyPageTable[0] = KernelPageTable[0]; // TODO: This is a hack to speed up the boot process
// MapFromZero(UserspaceKernelOnlyPageTable, Info);
/* Mapping Framebuffer address */
debug("Mapping Framebuffer");
MapFramebuffer(KernelPageTable, Info);
debug("Mapping Framebuffer for Userspace Page Table");
MapFramebuffer(UserspaceKernelOnlyPageTable, Info);
/* Kernel mapping */
debug("Mapping Kernel");
MapKernel(KernelPageTable, Info);
debug("Mapping Kernel for Userspace Page Table");
MapKernel(UserspaceKernelOnlyPageTable, Info);
trace("Applying new page table from address %p", KernelPageTable);
#ifdef DEBUG
debug("Kernel:");
tracepagetable(KernelPageTable);
debug("Userspace:");
tracepagetable(UserspaceKernelOnlyPageTable);
#endif
#if defined(__amd64__) || defined(__i386__)
asmv("mov %0, %%cr3" ::"r"(KernelPageTable));
#elif defined(__aarch64__)
asmv("msr ttbr0_el1, %0" ::"r"(KernelPageTable));
#endif
debug("Page table updated.");
if (strstr(Info->Kernel.CommandLine, "xallocv1"))
{
XallocV1Allocator = new Xalloc::AllocatorV1((void *)KERNEL_HEAP_BASE, false, false);
AllocatorType = MemoryAllocatorType::XallocV1;
trace("XallocV1 Allocator initialized (%p)", XallocV1Allocator);
}
else if (strstr(Info->Kernel.CommandLine, "liballoc11"))
{
AllocatorType = MemoryAllocatorType::liballoc11;
}
}
void *HeapMalloc(uint64_t Size)
{
switch (AllocatorType)
{
case unlikely(MemoryAllocatorType::Pages):
return KernelAllocator.RequestPages(TO_PAGES(Size));
case MemoryAllocatorType::XallocV1:
{
void *ret = XallocV1Allocator->Malloc(Size);
memset(ret, 0, Size);
return ret;
}
case MemoryAllocatorType::liballoc11:
{
void *ret = PREFIX(malloc)(Size);
memset(ret, 0, Size);
return ret;
}
default:
throw;
}
}
void *HeapCalloc(uint64_t n, uint64_t Size)
{
switch (AllocatorType)
{
case unlikely(MemoryAllocatorType::Pages):
return KernelAllocator.RequestPages(TO_PAGES(n * Size));
case MemoryAllocatorType::XallocV1:
{
void *ret = XallocV1Allocator->Calloc(n, Size);
memset(ret, 0, n * Size);
return ret;
}
case MemoryAllocatorType::liballoc11:
{
void *ret = PREFIX(calloc)(n, Size);
memset(ret, 0, Size);
return ret;
}
default:
throw;
}
}
void *HeapRealloc(void *Address, uint64_t Size)
{
switch (AllocatorType)
{
case unlikely(MemoryAllocatorType::Pages):
return KernelAllocator.RequestPages(TO_PAGES(Size)); // WARNING: Potential memory leak
case MemoryAllocatorType::XallocV1:
{
void *ret = XallocV1Allocator->Realloc(Address, Size);
memset(ret, 0, Size);
return ret;
}
case MemoryAllocatorType::liballoc11:
{
void *ret = PREFIX(realloc)(Address, Size);
memset(ret, 0, Size);
return ret;
}
default:
throw;
}
}
void HeapFree(void *Address)
{
switch (AllocatorType)
{
case unlikely(MemoryAllocatorType::Pages):
KernelAllocator.FreePage(Address); // WARNING: Potential memory leak
break;
case MemoryAllocatorType::XallocV1:
if (XallocV1Allocator)
XallocV1Allocator->Free(Address);
break;
case MemoryAllocatorType::liballoc11:
PREFIX(free)
(Address);
break;
default:
throw;
}
}
void *operator new(size_t Size)
{
return HeapMalloc(Size);
}
void *operator new[](size_t Size)
{
return HeapMalloc(Size);
}
void *operator new(unsigned long Size, std::align_val_t Alignment)
{
fixme("operator new with alignment(%#lx) is not implemented", Alignment);
return HeapMalloc(Size);
}
void operator delete(void *Pointer)
{
HeapFree(Pointer);
}
void operator delete[](void *Pointer)
{
HeapFree(Pointer);
}
void operator delete(void *Pointer, long unsigned int Size)
{
HeapFree(Pointer);
}
void operator delete[](void *Pointer, long unsigned int Size)
{
HeapFree(Pointer);
}

View File

@ -1,28 +0,0 @@
#include <memory.hpp>
namespace Memory
{
Virtual::PageMapIndexer::PageMapIndexer(uint64_t VirtualAddress)
{
#if defined(__amd64__)
uint64_t Address = VirtualAddress;
Address >>= 12;
this->PTEIndex = Address & 0x1FF;
Address >>= 9;
this->PDEIndex = Address & 0x1FF;
Address >>= 9;
this->PDPTEIndex = Address & 0x1FF;
Address >>= 9;
this->PMLIndex = Address & 0x1FF;
#elif defined(__i386__)
uint64_t Address = VirtualAddress;
Address >>= 12;
this->PTEIndex = Address & 0x3FF;
Address >>= 10;
this->PDEIndex = Address & 0x3FF;
Address >>= 10;
this->PDPTEIndex = Address & 0x3FF;
#elif defined(__aarch64__)
#endif
}
}

View File

@ -1,277 +0,0 @@
#include <memory.hpp>
#include <debug.h>
namespace Memory
{
uint64_t Physical::GetTotalMemory()
{
SmartLock(this->MemoryLock);
return this->TotalMemory;
}
uint64_t Physical::GetFreeMemory()
{
SmartLock(this->MemoryLock);
return this->FreeMemory;
}
uint64_t Physical::GetReservedMemory()
{
SmartLock(this->MemoryLock);
return this->ReservedMemory;
}
uint64_t Physical::GetUsedMemory()
{
SmartLock(this->MemoryLock);
return this->UsedMemory;
}
bool Physical::SwapPage(void *Address)
{
fixme("%p", Address);
return false;
}
bool Physical::SwapPages(void *Address, uint64_t PageCount)
{
for (uint64_t i = 0; i < PageCount; i++)
if (!this->SwapPage((void *)((uint64_t)Address + (i * PAGE_SIZE))))
return false;
return false;
}
bool Physical::UnswapPage(void *Address)
{
fixme("%p", Address);
return false;
}
bool Physical::UnswapPages(void *Address, uint64_t PageCount)
{
for (uint64_t i = 0; i < PageCount; i++)
if (!this->UnswapPage((void *)((uint64_t)Address + (i * PAGE_SIZE))))
return false;
return false;
}
void *Physical::RequestPage()
{
SmartLock(this->MemoryLock);
for (; PageBitmapIndex < PageBitmap.Size * 8; PageBitmapIndex++)
{
if (PageBitmap[PageBitmapIndex] == true)
continue;
this->LockPage((void *)(PageBitmapIndex * PAGE_SIZE));
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
if (this->SwapPage((void *)(PageBitmapIndex * PAGE_SIZE)))
{
this->LockPage((void *)(PageBitmapIndex * PAGE_SIZE));
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
error("Out of memory! (Free: %ldMB; Used: %ldMB; Reserved: %ldMB)", TO_MB(FreeMemory), TO_MB(UsedMemory), TO_MB(ReservedMemory));
CPU::Halt(true);
return nullptr;
}
void *Physical::RequestPages(uint64_t Count)
{
SmartLock(this->MemoryLock);
for (; PageBitmapIndex < PageBitmap.Size * 8; PageBitmapIndex++)
{
if (PageBitmap[PageBitmapIndex] == true)
continue;
for (uint64_t Index = PageBitmapIndex; Index < PageBitmap.Size * 8; Index++)
{
if (PageBitmap[Index] == true)
continue;
for (uint64_t i = 0; i < Count; i++)
if (PageBitmap[Index + i] == true)
goto NextPage;
this->LockPages((void *)(Index * PAGE_SIZE), Count);
return (void *)(Index * PAGE_SIZE);
NextPage:
Index += Count;
continue;
}
}
if (this->SwapPages((void *)(PageBitmapIndex * PAGE_SIZE), Count))
{
this->LockPages((void *)(PageBitmapIndex * PAGE_SIZE), Count);
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
error("Out of memory! (Free: %ldMB; Used: %ldMB; Reserved: %ldMB)", TO_MB(FreeMemory), TO_MB(UsedMemory), TO_MB(ReservedMemory));
CPU::Halt(true);
return nullptr;
}
void Physical::FreePage(void *Address)
{
SmartLock(this->MemoryLock);
if (unlikely(Address == nullptr))
{
warn("Null pointer passed to FreePage.");
return;
}
uint64_t Index = (uint64_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == false))
return;
if (PageBitmap.Set(Index, false))
{
FreeMemory += PAGE_SIZE;
UsedMemory -= PAGE_SIZE;
if (PageBitmapIndex > Index)
PageBitmapIndex = Index;
}
}
void Physical::FreePages(void *Address, uint64_t Count)
{
if (unlikely(Address == nullptr || Count == 0))
{
warn("%s%s passed to FreePages.", Address == nullptr ? "Null pointer" : "", Count == 0 ? "Zero count" : "");
return;
}
for (uint64_t t = 0; t < Count; t++)
this->FreePage((void *)((uint64_t)Address + (t * PAGE_SIZE)));
}
void Physical::LockPage(void *Address)
{
if (unlikely(Address == nullptr))
warn("Trying to lock null address.");
uint64_t Index = (uint64_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == true))
return;
if (PageBitmap.Set(Index, true))
{
FreeMemory -= PAGE_SIZE;
UsedMemory += PAGE_SIZE;
}
}
void Physical::LockPages(void *Address, uint64_t PageCount)
{
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to lock %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
for (uint64_t i = 0; i < PageCount; i++)
this->LockPage((void *)((uint64_t)Address + (i * PAGE_SIZE)));
}
void Physical::ReservePage(void *Address)
{
if (unlikely(Address == nullptr))
warn("Trying to reserve null address.");
uint64_t Index = (uint64_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == true))
return;
if (PageBitmap.Set(Index, true))
{
FreeMemory -= PAGE_SIZE;
ReservedMemory += PAGE_SIZE;
}
}
void Physical::ReservePages(void *Address, uint64_t PageCount)
{
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to reserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
for (uint64_t t = 0; t < PageCount; t++)
this->ReservePage((void *)((uint64_t)Address + (t * PAGE_SIZE)));
}
void Physical::UnreservePage(void *Address)
{
if (unlikely(Address == nullptr))
warn("Trying to unreserve null address.");
uint64_t Index = (uint64_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == false))
return;
if (PageBitmap.Set(Index, false))
{
FreeMemory += PAGE_SIZE;
ReservedMemory -= PAGE_SIZE;
if (PageBitmapIndex > Index)
PageBitmapIndex = Index;
}
}
void Physical::UnreservePages(void *Address, uint64_t PageCount)
{
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to unreserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
for (uint64_t t = 0; t < PageCount; t++)
this->UnreservePage((void *)((uint64_t)Address + (t * PAGE_SIZE)));
}
void Physical::Init(BootInfo *Info)
{
SmartLock(this->MemoryLock);
void *LargestFreeMemorySegment = nullptr;
uint64_t LargestFreeMemorySegmentSize = 0;
uint64_t MemorySize = Info->Memory.Size;
for (uint64_t i = 0; i < Info->Memory.Entries; i++)
if (Info->Memory.Entry[i].Type == Usable)
if (Info->Memory.Entry[i].Length > LargestFreeMemorySegmentSize)
{
// We don't want to use 0 as a memory address.
if (Info->Memory.Entry[i].BaseAddress == nullptr)
continue;
LargestFreeMemorySegment = (void *)Info->Memory.Entry[i].BaseAddress;
LargestFreeMemorySegmentSize = Info->Memory.Entry[i].Length;
debug("Largest free memory segment: %llp (%lldMB)",
(void *)Info->Memory.Entry[i].BaseAddress,
TO_MB(Info->Memory.Entry[i].Length));
}
TotalMemory = MemorySize;
FreeMemory = MemorySize;
if (LargestFreeMemorySegment == nullptr)
{
error("No free memory found!");
CPU::Stop();
}
uint64_t BitmapSize = (MemorySize / PAGE_SIZE) / 8 + 1;
trace("Initializing Bitmap at %llp-%llp (%lld Bytes)",
LargestFreeMemorySegment,
(void *)((uint64_t)LargestFreeMemorySegment + BitmapSize),
BitmapSize);
PageBitmap.Size = BitmapSize;
PageBitmap.Buffer = (uint8_t *)LargestFreeMemorySegment;
for (uint64_t i = 0; i < BitmapSize; i++)
*(uint8_t *)(PageBitmap.Buffer + i) = 0;
trace("Reserving pages...");
for (uint64_t i = 0; i < Info->Memory.Entries; i++)
if (Info->Memory.Entry[i].Type != Usable)
this->ReservePages((void *)Info->Memory.Entry[i].BaseAddress, Info->Memory.Entry[i].Length / PAGE_SIZE + 1);
trace("Locking bitmap pages...");
this->ReservePages(0, 0x100);
this->LockPages(PageBitmap.Buffer, PageBitmap.Size / PAGE_SIZE + 1);
}
Physical::Physical() {}
Physical::~Physical() {}
}

View File

@ -1,74 +0,0 @@
#include <memory.hpp>
#include <debug.h>
namespace Memory
{
StackGuard::StackGuard(bool User, PageTable4 *Table)
{
this->UserMode = User;
this->Table = Table;
if (this->UserMode)
{
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE));
debug("AllocatedStack: %p", AllocatedStack);
memset(AllocatedStack, 0, USER_STACK_SIZE);
for (uint64_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
{
Virtual(Table).Map((void *)((uint64_t)AllocatedStack + (i * PAGE_SIZE)),
(void *)(USER_STACK_BASE + (i * PAGE_SIZE)),
PTFlag::RW | PTFlag::US);
}
this->StackBottom = (void *)USER_STACK_BASE;
this->StackTop = (void *)(USER_STACK_BASE + USER_STACK_SIZE);
this->Size = USER_STACK_SIZE;
}
else
{
this->StackBottom = KernelAllocator.RequestPages(TO_PAGES(STACK_SIZE));
debug("StackBottom: %p", this->StackBottom);
memset(this->StackBottom, 0, STACK_SIZE);
this->StackTop = (void *)((uint64_t)this->StackBottom + STACK_SIZE);
this->Size = STACK_SIZE;
}
trace("Allocated stack at %p", this->StackBottom);
}
StackGuard::~StackGuard()
{
fixme("Temporarily disabled stack guard deallocation");
// KernelAllocator.FreePages(this->StackBottom, TO_PAGES(this->Size));
// debug("Freed stack at %p", this->StackBottom);
}
bool StackGuard::Expand(uint64_t FaultAddress)
{
if (this->UserMode)
{
if (FaultAddress < (uint64_t)this->StackBottom - USER_STACK_SIZE ||
FaultAddress > (uint64_t)this->StackTop)
{
return false; // It's not about the stack.
}
else
{
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE));
debug("AllocatedStack: %p", AllocatedStack);
memset(AllocatedStack, 0, USER_STACK_SIZE);
for (uint64_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
Virtual(this->Table).Map((void *)((uint64_t)AllocatedStack + (i * PAGE_SIZE)), (void *)((uint64_t)this->StackBottom - (i * PAGE_SIZE)), PTFlag::RW | PTFlag::US);
this->StackBottom = (void *)((uint64_t)this->StackBottom - USER_STACK_SIZE);
this->Size += USER_STACK_SIZE;
info("Stack expanded to %p", this->StackBottom);
return true;
}
}
else
{
fixme("Not implemented and probably not needed");
return false;
}
error("Reached end of function! How?");
return false;
}
}

View File

@ -1,217 +0,0 @@
#include <memory.hpp>
#include <convert.h>
#include <debug.h>
namespace Memory
{
bool Virtual::Check(void *VirtualAddress, PTFlag Flag)
{
// 0x1000 aligned
uint64_t Address = (uint64_t)VirtualAddress;
Address &= 0xFFFFFFFFFFFFF000;
PageMapIndexer Index = PageMapIndexer((uint64_t)Address);
PageMapLevel4 PML4 = this->Table->Entries[Index.PMLIndex];
PageDirectoryPointerTableEntryPtr *PDPTE = nullptr;
PageDirectoryEntryPtr *PDE = nullptr;
PageTableEntryPtr *PTE = nullptr;
if ((PML4.raw & Flag) > 0)
{
PDPTE = (PageDirectoryPointerTableEntryPtr *)((uint64_t)PML4.GetAddress() << 12);
if (PDPTE)
if ((PDPTE->Entries[Index.PDPTEIndex].Present))
{
PDE = (PageDirectoryEntryPtr *)((uint64_t)PDPTE->Entries[Index.PDPTEIndex].GetAddress() << 12);
if (PDE)
if ((PDE->Entries[Index.PDEIndex].Present))
{
PTE = (PageTableEntryPtr *)((uint64_t)PDE->Entries[Index.PDEIndex].GetAddress() << 12);
if (PTE)
if ((PTE->Entries[Index.PTEIndex].Present))
{
return true;
}
}
}
}
return false;
}
void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags)
{
SmartLock(this->MemoryLock);
if (unlikely(!this->Table))
{
error("No page table");
return;
}
PageMapIndexer Index = PageMapIndexer((uint64_t)VirtualAddress);
PageMapLevel4 PML4 = this->Table->Entries[Index.PMLIndex];
PageDirectoryPointerTableEntryPtr *PDPTEPtr = nullptr;
if (!PML4.Present)
{
PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)KernelAllocator.RequestPage();
memset(PDPTEPtr, 0, PAGE_SIZE);
PML4.Present = true;
PML4.raw |= Flags;
PML4.SetAddress((uint64_t)PDPTEPtr >> 12);
this->Table->Entries[Index.PMLIndex] = PML4;
}
else
PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)((uint64_t)PML4.GetAddress() << 12);
PageDirectoryPointerTableEntry PDPTE = PDPTEPtr->Entries[Index.PDPTEIndex];
PageDirectoryEntryPtr *PDEPtr = nullptr;
if (!PDPTE.Present)
{
PDEPtr = (PageDirectoryEntryPtr *)KernelAllocator.RequestPage();
memset(PDEPtr, 0, PAGE_SIZE);
PDPTE.Present = true;
PDPTE.raw |= Flags;
PDPTE.SetAddress((uint64_t)PDEPtr >> 12);
PDPTEPtr->Entries[Index.PDPTEIndex] = PDPTE;
}
else
PDEPtr = (PageDirectoryEntryPtr *)((uint64_t)PDPTE.GetAddress() << 12);
PageDirectoryEntry PDE = PDEPtr->Entries[Index.PDEIndex];
PageTableEntryPtr *PTEPtr = nullptr;
if (!PDE.Present)
{
PTEPtr = (PageTableEntryPtr *)KernelAllocator.RequestPage();
memset(PTEPtr, 0, PAGE_SIZE);
PDE.Present = true;
PDE.raw |= Flags;
PDE.SetAddress((uint64_t)PTEPtr >> 12);
PDEPtr->Entries[Index.PDEIndex] = PDE;
}
else
PTEPtr = (PageTableEntryPtr *)((uint64_t)PDE.GetAddress() << 12);
PageTableEntry PTE = PTEPtr->Entries[Index.PTEIndex];
PTE.Present = true;
PTE.raw |= Flags;
PTE.SetAddress((uint64_t)PhysicalAddress >> 12);
PTEPtr->Entries[Index.PTEIndex] = PTE;
#if defined(__amd64__)
CPU::x64::invlpg(VirtualAddress);
#elif defined(__i386__)
CPU::x32::invlpg(VirtualAddress);
#elif defined(__aarch64__)
asmv("dsb sy");
asmv("tlbi vae1is, %0"
:
: "r"(VirtualAddress)
: "memory");
asmv("dsb sy");
asmv("isb");
#endif
#ifdef DEBUG
/* https://stackoverflow.com/a/3208376/9352057 */
#define BYTE_TO_BINARY_PATTERN "%c%c%c%c%c%c%c%c"
#define BYTE_TO_BINARY(byte) \
(byte & 0x80 ? '1' : '0'), \
(byte & 0x40 ? '1' : '0'), \
(byte & 0x20 ? '1' : '0'), \
(byte & 0x10 ? '1' : '0'), \
(byte & 0x08 ? '1' : '0'), \
(byte & 0x04 ? '1' : '0'), \
(byte & 0x02 ? '1' : '0'), \
(byte & 0x01 ? '1' : '0')
if (!this->Check(VirtualAddress, (PTFlag)Flags)) // quick workaround just to see where it fails
warn("Failed to map %#lx - %#lx with flags: " BYTE_TO_BINARY_PATTERN, VirtualAddress, PhysicalAddress, BYTE_TO_BINARY(Flags));
#endif
}
void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, uint64_t PageCount, uint64_t Flags)
{
for (uint64_t i = 0; i < PageCount; i++)
this->Map((void *)((uint64_t)VirtualAddress + (i * PAGE_SIZE)), (void *)((uint64_t)PhysicalAddress + (i * PAGE_SIZE)), Flags);
}
void Virtual::Unmap(void *VirtualAddress)
{
SmartLock(this->MemoryLock);
if (!this->Table)
{
error("No page table");
return;
}
PageMapIndexer Index = PageMapIndexer((uint64_t)VirtualAddress);
PageMapLevel4 PML4 = this->Table->Entries[Index.PMLIndex];
if (!PML4.Present)
{
error("Page not present");
return;
}
PageDirectoryPointerTableEntryPtr *PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)((uint64_t)PML4.Address << 12);
PageDirectoryPointerTableEntry PDPTE = PDPTEPtr->Entries[Index.PDPTEIndex];
if (!PDPTE.Present)
{
error("Page not present");
return;
}
PageDirectoryEntryPtr *PDEPtr = (PageDirectoryEntryPtr *)((uint64_t)PDPTE.Address << 12);
PageDirectoryEntry PDE = PDEPtr->Entries[Index.PDEIndex];
if (!PDE.Present)
{
error("Page not present");
return;
}
PageTableEntryPtr *PTEPtr = (PageTableEntryPtr *)((uint64_t)PDE.Address << 12);
PageTableEntry PTE = PTEPtr->Entries[Index.PTEIndex];
if (!PTE.Present)
{
error("Page not present");
return;
}
PTE.Present = false;
PTEPtr->Entries[Index.PTEIndex] = PTE;
#if defined(__amd64__)
CPU::x64::invlpg(VirtualAddress);
#elif defined(__i386__)
CPU::x32::invlpg(VirtualAddress);
#elif defined(__aarch64__)
asmv("dsb sy");
asmv("tlbi vae1is, %0"
:
: "r"(VirtualAddress)
: "memory");
asmv("dsb sy");
asmv("isb");
#endif
}
void Virtual::Unmap(void *VirtualAddress, uint64_t PageCount)
{
for (uint64_t i = 0; i < PageCount; i++)
this->Unmap((void *)((uint64_t)VirtualAddress + (i * PAGE_SIZE)));
}
void Virtual::Remap(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags)
{
this->Unmap(VirtualAddress);
this->Map(VirtualAddress, PhysicalAddress, Flags);
}
Virtual::Virtual(PageTable4 *Table)
{
if (Table)
this->Table = Table;
else
this->Table = (PageTable4 *)CPU::PageTable();
}
Virtual::~Virtual() {}
}