Update files

This commit is contained in:
Alex
2022-10-08 04:33:53 +03:00
parent 6d5f7e9372
commit 8652d781ce
53 changed files with 7413 additions and 10 deletions

85
core/Debugger.cpp Normal file
View File

@ -0,0 +1,85 @@
#include <debug.h>
#include <uart.hpp>
#include <printf.h>
using namespace UniversalAsynchronousReceiverTransmitter;
static inline void uart_wrapper(char c, void *unused)
{
UART(COM1).Write(c);
(void)unused;
}
static inline void WritePrefix(DebugLevel Level, const char *File, int Line, const char *Function)
{
const char *DbgLvlString;
switch (Level)
{
case DebugLevelError:
DbgLvlString = "ERROR";
break;
case DebugLevelWarning:
DbgLvlString = "WARN ";
break;
case DebugLevelInfo:
DbgLvlString = "INFO ";
break;
case DebugLevelDebug:
DbgLvlString = "DEBUG";
break;
case DebugLevelTrace:
DbgLvlString = "TRACE";
break;
case DebugLevelFixme:
DbgLvlString = "FIXME";
break;
default:
DbgLvlString = "UNKNW";
break;
}
fctprintf(uart_wrapper, nullptr, "%s|%s->%s:%d: ", DbgLvlString, File, Function, Line);
}
namespace SysDbg
{
void Write(DebugLevel Level, const char *File, int Line, const char *Function, const char *Format, ...)
{
WritePrefix(Level, File, Line, Function);
va_list args;
va_start(args, Format);
vfctprintf(uart_wrapper, nullptr, Format, args);
va_end(args);
}
void WriteLine(DebugLevel Level, const char *File, int Line, const char *Function, const char *Format, ...)
{
WritePrefix(Level, File, Line, Function);
va_list args;
va_start(args, Format);
vfctprintf(uart_wrapper, nullptr, Format, args);
va_end(args);
uart_wrapper('\n', nullptr);
}
}
// C compatibility
extern "C" void SysDbgWrite(enum DebugLevel Level, const char *File, int Line, const char *Function, const char *Format, ...)
{
WritePrefix(Level, File, Line, Function);
va_list args;
va_start(args, Format);
vfctprintf(uart_wrapper, nullptr, Format, args);
va_end(args);
}
// C compatibility
extern "C" void SysDbgWriteLine(enum DebugLevel Level, const char *File, int Line, const char *Function, const char *Format, ...)
{
WritePrefix(Level, File, Line, Function);
va_list args;
va_start(args, Format);
vfctprintf(uart_wrapper, nullptr, Format, args);
va_end(args);
uart_wrapper('\n', nullptr);
}

View File

@ -0,0 +1,205 @@
#include "Xalloc.hpp"
namespace Xalloc
{
class SmartSMAPClass
{
private:
AllocatorV1 *allocator = nullptr;
public:
SmartSMAPClass(AllocatorV1 *allocator)
{
this->allocator = allocator;
this->allocator->Xstac();
}
~SmartSMAPClass() { this->allocator->Xclac(); }
};
#define SmartSMAP SmartSMAPClass XALLOC_CONCAT(SmartSMAP##_, __COUNTER__)(this)
AllocatorV1::AllocatorV1(void *Address, bool UserMode, bool SMAPEnabled)
{
SmartSMAP;
void *Position = Address;
UserMapping = UserMode;
SMAPUsed = SMAPEnabled;
for (Xuint64_t i = 0; i < 0x20; i++)
{
void *Page = Xalloc_REQUEST_PAGE();
if (UserMapping)
Xalloc_MAP_MEMORY(Position, Page, Xalloc_MAP_MEMORY_READ_WRITE | Xalloc_MAP_MEMORY_USER);
else
Xalloc_MAP_MEMORY(Position, Page, Xalloc_MAP_MEMORY_READ_WRITE);
Xalloc_trace("Preallocate Heap Memory (%#llx-%#llx [%#llx])...", Position, (Xuint64_t)Position + Xalloc_PAGE_SIZE, Page);
Position = (void *)((Xuint64_t)Position + Xalloc_PAGE_SIZE);
}
Xuint64_t HeapLength = 16 * Xalloc_PAGE_SIZE;
this->HeapStart = Address;
this->HeapEnd = (void *)((Xuint64_t)this->HeapStart + HeapLength);
HeapSegment *StartSegment = (HeapSegment *)Address;
StartSegment->Length = HeapLength - sizeof(HeapSegment);
StartSegment->Next = nullptr;
StartSegment->Last = nullptr;
StartSegment->IsFree = true;
this->LastSegment = StartSegment;
}
AllocatorV1::~AllocatorV1()
{
SmartSMAP;
Xalloc_trace("Destructor not implemented yet.");
}
void AllocatorV1::ExpandHeap(Xuint64_t Length)
{
if (Length % Xalloc_PAGE_SIZE)
{
Length -= Length % Xalloc_PAGE_SIZE;
Length += Xalloc_PAGE_SIZE;
}
Xuint64_t PageCount = Length / Xalloc_PAGE_SIZE;
HeapSegment *NewSegment = (HeapSegment *)this->HeapEnd;
for (Xuint64_t i = 0; i < PageCount; i++)
{
void *Page = Xalloc_REQUEST_PAGE();
if (UserMapping)
Xalloc_MAP_MEMORY(this->HeapEnd, Page, Xalloc_MAP_MEMORY_READ_WRITE | Xalloc_MAP_MEMORY_USER);
else
Xalloc_MAP_MEMORY(this->HeapEnd, Page, Xalloc_MAP_MEMORY_READ_WRITE);
// Xalloc_trace("Expanding Heap Memory (%#llx-%#llx [%#llx])...", this->HeapEnd, (Xuint64_t)this->HeapEnd + Xalloc_PAGE_SIZE, Page);
this->HeapEnd = (void *)((Xuint64_t)this->HeapEnd + Xalloc_PAGE_SIZE);
}
NewSegment->IsFree = true;
NewSegment->Last = this->LastSegment;
this->LastSegment->Next = NewSegment;
this->LastSegment = NewSegment;
NewSegment->Next = nullptr;
NewSegment->Length = Length - sizeof(HeapSegment);
NewSegment->CombineBackward(this->LastSegment);
}
void *AllocatorV1::Malloc(Xuint64_t Size)
{
SmartSMAP;
if (this->HeapStart == nullptr)
{
Xalloc_err("Memory allocation not initialized yet!");
return 0;
}
if (Size < 0x10)
{
// Xalloc_warn("Allocation size is too small, using 0x10 instead!");
Size = 0x10;
}
// #ifdef DEBUG
// if (Size < 1024)
// debug("Allocating %dB", Size);
// else if (TO_KB(Size) < 1024)
// debug("Allocating %dKB", TO_KB(Size));
// else if (TO_MB(Size) < 1024)
// debug("Allocating %dMB", TO_MB(Size));
// else if (TO_GB(Size) < 1024)
// debug("Allocating %dGB", TO_GB(Size));
// #endif
if (Size % 0x10 > 0) // it is not a multiple of 0x10
{
Size -= (Size % 0x10);
Size += 0x10;
}
if (Size == 0)
{
return nullptr;
}
HeapSegment *CurrentSegment = (HeapSegment *)this->HeapStart;
while (true)
{
if (CurrentSegment->IsFree)
{
if (CurrentSegment->Length > Size)
{
CurrentSegment->Split(Size, this->LastSegment);
CurrentSegment->IsFree = false;
return (void *)((Xuint64_t)CurrentSegment + sizeof(HeapSegment));
}
if (CurrentSegment->Length == Size)
{
CurrentSegment->IsFree = false;
return (void *)((Xuint64_t)CurrentSegment + sizeof(HeapSegment));
}
}
if (CurrentSegment->Next == nullptr)
break;
CurrentSegment = CurrentSegment->Next;
}
ExpandHeap(Size);
return this->Malloc(Size);
}
void AllocatorV1::Free(void *Address)
{
SmartSMAP;
if (this->HeapStart == nullptr)
{
Xalloc_err("Memory allocation not initialized yet!");
return;
}
HeapSegment *Segment = (HeapSegment *)Address - 1;
Segment->IsFree = true;
Segment->CombineForward(this->LastSegment);
Segment->CombineBackward(this->LastSegment);
}
void *AllocatorV1::Calloc(Xuint64_t NumberOfBlocks, Xuint64_t Size)
{
SmartSMAP;
if (this->HeapStart == nullptr)
{
Xalloc_err("Memory allocation not initialized yet!");
return 0;
}
if (Size < 0x10)
{
// Xalloc_warn("Allocation size is too small, using 0x10 instead!");
Size = 0x10;
}
void *Block = this->Malloc(NumberOfBlocks * Size);
if (Block)
Xmemset(Block, 0, NumberOfBlocks * Size);
return Block;
}
void *AllocatorV1::Realloc(void *Address, Xuint64_t Size)
{
SmartSMAP;
if (this->HeapStart == nullptr)
{
Xalloc_err("Memory allocation not initialized yet!");
return 0;
}
if (!Address && Size == 0)
{
this->Free(Address);
return nullptr;
}
else if (!Address)
{
return this->Calloc(Size, sizeof(char));
}
if (Size < 0x10)
{
// Xalloc_warn("Allocation size is too small, using 0x10 instead!");
Size = 0x10;
}
void *newAddress = this->Calloc(Size, sizeof(char));
Xmemcpy(newAddress, Address, Size);
return newAddress;
}
}

View File

@ -0,0 +1,180 @@
#pragma once
#include <memory.hpp>
#include <debug.h>
// Functions defines
// Page allocation functions
#define Xalloc_REQUEST_PAGE() KernelAllocator.RequestPage()
#define Xalloc_REQUEST_PAGES(Pages) KernelAllocator.RequestPages(Pages)
#define Xalloc_FREE_PAGE(Address) KernelAllocator.FreePage(Address)
#define Xalloc_FREE_PAGES(Address, Pages) KernelAllocator.FreePages(Address, Pages)
#define Xalloc_MAP_MEMORY(VirtualAddress, PhysicalAddress, Flags) Memory::Virtual(KernelPageTable).Map(VirtualAddress, PhysicalAddress, Flags)
#define Xalloc_UNMAP_MEMORY(VirtualAddress) Memory::Virtual(KernelPageTable).Unmap(VirtualAddress)
#define Xalloc_MAP_MEMORY_READ_WRITE Memory::PTFlag::RW
#define Xalloc_MAP_MEMORY_USER Memory::PTFlag::US
#define Xalloc_PAGE_SIZE PAGE_SIZE
#define Xalloc_trace(m, ...) trace(m, ##__VA_ARGS__)
#define Xalloc_warn(m, ...) warn(m, ##__VA_ARGS__)
#define Xalloc_err(m, ...) error(m, ##__VA_ARGS__)
#define XALLOC_CONCAT(x, y) x##y
typedef long unsigned Xuint64_t;
namespace Xalloc
{
class AllocatorV1
{
private:
struct HeapSegment
{
Xuint64_t Length;
HeapSegment *Next;
HeapSegment *Last;
bool IsFree;
HeapSegment *Split(Xuint64_t SplitLength, HeapSegment *LastSegment)
{
if (SplitLength < 0x10)
return nullptr;
int64_t SplitSegmentLength = Length - SplitLength - (sizeof(HeapSegment));
if (SplitSegmentLength < 0x10)
return nullptr;
HeapSegment *NewSplitHdr = (HeapSegment *)((Xuint64_t)this + SplitLength + sizeof(HeapSegment));
Next->Last = NewSplitHdr;
NewSplitHdr->Next = Next;
Next = NewSplitHdr;
NewSplitHdr->Last = this;
NewSplitHdr->Length = SplitSegmentLength;
NewSplitHdr->IsFree = IsFree;
Length = SplitLength;
if (LastSegment == this)
LastSegment = NewSplitHdr;
return NewSplitHdr;
}
void CombineForward(HeapSegment *LastSegment)
{
if (Next == nullptr)
return;
if (Next->IsFree == false)
return;
if (Next == LastSegment)
LastSegment = this;
if (Next->Next != nullptr)
Next->Next->Last = this;
Length = Length + Next->Length + sizeof(HeapSegment);
Next = Next->Next;
}
void CombineBackward(HeapSegment *LastSegment)
{
if (Last != nullptr && Last->IsFree)
Last->CombineForward(LastSegment);
}
} __attribute__((aligned(16)));
void *HeapStart = nullptr;
void *HeapEnd = nullptr;
HeapSegment *LastSegment = nullptr;
bool UserMapping = false;
bool SMAPUsed = false;
void ExpandHeap(Xuint64_t Length);
// TODO: Change memcpy with an optimized version
static inline void *Xmemcpy(void *__restrict__ Destination, const void *__restrict__ Source, Xuint64_t Length)
{
unsigned char *dst = (unsigned char *)Destination;
const unsigned char *src = (const unsigned char *)Source;
for (Xuint64_t i = 0; i < Length; i++)
dst[i] = src[i];
return Destination;
}
// TODO: Change memset with an optimized version
static inline void *Xmemset(void *__restrict__ Destination, int Data, Xuint64_t Length)
{
unsigned char *Buffer = (unsigned char *)Destination;
for (Xuint64_t i = 0; i < Length; i++)
Buffer[i] = (unsigned char)Data;
return Destination;
}
public:
inline void Xstac()
{
if (this->SMAPUsed)
{
#if defined(__amd64__) || defined(__i386__)
asm volatile("stac" ::
: "cc");
#endif
}
}
inline void Xclac()
{
if (this->SMAPUsed)
{
#if defined(__amd64__) || defined(__i386__)
asm volatile("clac" ::
: "cc");
#endif
}
}
/**
* @brief Construct a new Allocator V1 object
*
* @param Address Virtual address to allocate.
* @param UserMode Map the new pages with USER flag?
* @param SMAPEnabled Does the kernel has Supervisor Mode Access Prevention enabled?
*/
AllocatorV1(void *Address, bool UserMode, bool SMAPEnabled);
/**
* @brief Destroy the Allocator V 1 object
*
*/
~AllocatorV1();
/**
* @brief Allocate a new memory block
*
* @param Size Size of the block to allocate.
* @return void* Pointer to the allocated block.
*/
void *Malloc(Xuint64_t Size);
/**
* @brief Free a previously allocated block
*
* @param Address Address of the block to free.
*/
void Free(void *Address);
/**
* @brief Allocate a new memory block
*
* @param NumberOfBlocks Number of blocks to allocate.
* @param Size Size of the block to allocate.
* @return void* Pointer to the allocated block.
*/
void *Calloc(Xuint64_t NumberOfBlocks, Xuint64_t Size);
/**
* @brief Reallocate a previously allocated block
*
* @param Address Address of the block to reallocate.
* @param Size New size of the block.
* @return void* Pointer to the reallocated block.
*/
void *Realloc(void *Address, Xuint64_t Size);
};
}

215
core/Memory/Memory.cpp Normal file
View File

@ -0,0 +1,215 @@
#include <memory.hpp>
#include <string.h>
#include <debug.h>
#include "HeapAllocators/Xalloc.hpp"
#include "../lib/liballoc_1_1.h"
using namespace Memory;
Physical KernelAllocator;
PageTable *KernelPageTable = nullptr;
enum MemoryAllocatorType
{
None,
Pages,
XallocV1,
liballoc11
};
static MemoryAllocatorType AllocatorType = MemoryAllocatorType::None;
Xalloc::AllocatorV1 *XallocV1Allocator = nullptr;
#ifdef DEBUG
void tracepagetable(PageTable *pt)
{
for (int i = 0; i < 512; i++)
{
if (pt->Entries[i].Value.Present)
debug("Entry %d: %x %x %x %x %x %x %x %x %x %x %x %p-%#lx", i,
pt->Entries[i].Value.Present, pt->Entries[i].Value.ReadWrite,
pt->Entries[i].Value.UserSupervisor, pt->Entries[i].Value.WriteThrough,
pt->Entries[i].Value.CacheDisable, pt->Entries[i].Value.Accessed,
pt->Entries[i].Value.Dirty, pt->Entries[i].Value.PageSize,
pt->Entries[i].Value.Global, pt->Entries[i].Value.PageAttributeTable,
pt->Entries[i].Value.ExecuteDisable, pt->Entries[i].GetAddress(),
pt->Entries[i].Value);
}
}
#endif
void InitializeMemoryManagement(BootInfo *Info)
{
trace("Initializing Physical Memory Manager");
KernelAllocator = Physical();
KernelAllocator.Init(Info);
debug("Memory Info: %dMB / %dMB (%dMB reserved)",
TO_MB(KernelAllocator.GetUsedMemory()),
TO_MB(KernelAllocator.GetTotalMemory()),
TO_MB(KernelAllocator.GetReservedMemory()));
AllocatorType = MemoryAllocatorType::Pages;
trace("Initializing Virtual Memory Manager");
KernelPageTable = (PageTable *)KernelAllocator.RequestPage();
memset(KernelPageTable, 0, PAGE_SIZE);
Virtual kva = Virtual(KernelPageTable);
uint64_t KernelStart = (uint64_t)&_kernel_start;
uint64_t KernelTextEnd = (uint64_t)&_kernel_text_end;
uint64_t KernelDataEnd = (uint64_t)&_kernel_data_end;
uint64_t KernelRoDataEnd = (uint64_t)&_kernel_rodata_end;
uint64_t KernelEnd = (uint64_t)&_kernel_end;
uint64_t VirtualOffsetNormalVMA = NORMAL_VMA_OFFSET;
uint64_t BaseKernelMapAddress = (uint64_t)Info->Kernel.PhysicalBase;
for (uint64_t t = 0; t < Info->Memory.Size; t += PAGE_SIZE)
{
kva.Map((void *)t, (void *)t, PTFlag::RW);
kva.Map((void *)VirtualOffsetNormalVMA, (void *)t, PTFlag::RW);
VirtualOffsetNormalVMA += PAGE_SIZE;
}
/* Mapping Framebuffer address */
int itrfb = 0;
while (1)
{
if (!Info->Framebuffer[itrfb].BaseAddress)
break;
for (uint64_t fb_base = (uint64_t)Info->Framebuffer[itrfb].BaseAddress;
fb_base < ((uint64_t)Info->Framebuffer[itrfb].BaseAddress + ((Info->Framebuffer[itrfb].Pitch * Info->Framebuffer[itrfb].Height) + PAGE_SIZE));
fb_base += PAGE_SIZE)
kva.Map((void *)(fb_base + NORMAL_VMA_OFFSET), (void *)fb_base, PTFlag::RW | PTFlag::US);
itrfb++;
}
/* Kernel mapping */
for (uint64_t k = KernelStart; k < KernelTextEnd; k += PAGE_SIZE)
{
kva.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW);
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
for (uint64_t k = KernelTextEnd; k < KernelDataEnd; k += PAGE_SIZE)
{
kva.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW);
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
for (uint64_t k = KernelDataEnd; k < KernelRoDataEnd; k += PAGE_SIZE)
{
kva.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::P);
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
for (uint64_t k = KernelRoDataEnd; k < KernelEnd; k += PAGE_SIZE)
{
kva.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW);
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
debug("\nStart: %#llx - Text End: %#llx - RoEnd: %#llx - End: %#llx\nStart Physical: %#llx - End Physical: %#llx",
KernelStart, KernelTextEnd, KernelRoDataEnd, KernelEnd, Info->Kernel.PhysicalBase, BaseKernelMapAddress - PAGE_SIZE);
/* KernelStart KernelTextEnd KernelRoDataEnd KernelEnd
Kernel Start & Text Start ------ Text End ------ Kernel Rodata End ------ Kernel Data End & Kernel End
*/
trace("Applying new page table from address %p", KernelPageTable);
#ifdef DEBUG
tracepagetable(KernelPageTable);
#endif
#if defined(__amd64__) || defined(__i386__)
asm volatile("mov %0, %%cr3" ::"r"(KernelPageTable));
#elif defined(__aarch64__)
asm volatile("msr ttbr0_el1, %0" ::"r"(KernelPageTable));
#endif
if (strstr(Info->Kernel.CommandLine, "xallocv1"))
{
XallocV1Allocator = new Xalloc::AllocatorV1((void *)KERNEL_HEAP_BASE, false, false);
AllocatorType = MemoryAllocatorType::XallocV1;
trace("XallocV1 Allocator initialized (%p)", XallocV1Allocator);
}
else if (strstr(Info->Kernel.CommandLine, "liballoc11"))
{
AllocatorType = MemoryAllocatorType::liballoc11;
}
}
void *HeapMalloc(uint64_t Size)
{
switch (AllocatorType)
{
case MemoryAllocatorType::Pages:
return KernelAllocator.RequestPages(TO_PAGES(Size));
case MemoryAllocatorType::XallocV1:
return XallocV1Allocator->Malloc(Size);
case MemoryAllocatorType::liballoc11:
return PREFIX(malloc)(Size);
default:
throw;
}
}
void *HeapCalloc(uint64_t n, uint64_t Size)
{
switch (AllocatorType)
{
case MemoryAllocatorType::Pages:
return KernelAllocator.RequestPages(TO_PAGES(n * Size));
case MemoryAllocatorType::XallocV1:
return XallocV1Allocator->Calloc(n, Size);
case MemoryAllocatorType::liballoc11:
return PREFIX(calloc)(n, Size);
default:
throw;
}
}
void *HeapRealloc(void *Address, uint64_t Size)
{
switch (AllocatorType)
{
case MemoryAllocatorType::Pages:
return KernelAllocator.RequestPages(TO_PAGES(Size)); // WARNING: Potential memory leak
case MemoryAllocatorType::XallocV1:
return XallocV1Allocator->Realloc(Address, Size);
case MemoryAllocatorType::liballoc11:
return PREFIX(realloc)(Address, Size);
default:
throw;
}
}
void HeapFree(void *Address)
{
switch (AllocatorType)
{
case MemoryAllocatorType::Pages:
KernelAllocator.FreePage(Address); // WARNING: Potential memory leak
break;
case MemoryAllocatorType::XallocV1:
XallocV1Allocator->Free(Address);
break;
case MemoryAllocatorType::liballoc11:
PREFIX(free)
(Address);
break;
default:
throw;
}
}
void *operator new(uint64_t Size) { return HeapMalloc(Size); }
void *operator new[](uint64_t Size) { return HeapMalloc(Size); }
void operator delete(void *Pointer) { HeapFree(Pointer); }
void operator delete[](void *Pointer) { HeapFree(Pointer); }
void operator delete(void *Pointer, long unsigned int Size) { HeapFree(Pointer); }
void operator delete[](void *Pointer, long unsigned int Size) { HeapFree(Pointer); }

View File

@ -0,0 +1,266 @@
#include <memory.hpp>
#include <debug.h>
namespace Memory
{
uint64_t Physical::GetTotalMemory()
{
SMARTLOCK(this->MemoryLock);
return this->TotalMemory;
}
uint64_t Physical::GetFreeMemory()
{
SMARTLOCK(this->MemoryLock);
return this->FreeMemory;
}
uint64_t Physical::GetReservedMemory()
{
SMARTLOCK(this->MemoryLock);
return this->ReservedMemory;
}
uint64_t Physical::GetUsedMemory()
{
SMARTLOCK(this->MemoryLock);
return this->UsedMemory;
}
bool Physical::SwapPage(void *Address)
{
fixme("%p", Address);
return false;
}
bool Physical::SwapPages(void *Address, uint64_t PageCount)
{
for (uint64_t i = 0; i < PageCount; i++)
if (!this->SwapPage((void *)((uint64_t)Address + (i * PAGE_SIZE))))
return false;
return false;
}
bool Physical::UnswapPage(void *Address)
{
fixme("%p", Address);
return false;
}
bool Physical::UnswapPages(void *Address, uint64_t PageCount)
{
for (uint64_t i = 0; i < PageCount; i++)
if (!this->UnswapPage((void *)((uint64_t)Address + (i * PAGE_SIZE))))
return false;
return false;
}
void *Physical::RequestPage()
{
SMARTLOCK(this->MemoryLock);
for (; PageBitmapIndex < PageBitmap.Size * 8; PageBitmapIndex++)
{
if (PageBitmap[PageBitmapIndex] == true)
continue;
this->LockPage((void *)(PageBitmapIndex * PAGE_SIZE));
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
if (this->SwapPage((void *)(PageBitmapIndex * PAGE_SIZE)))
{
this->LockPage((void *)(PageBitmapIndex * PAGE_SIZE));
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
error("Out of memory! (Free: %ldMB; Used: %ldMB; Reserved: %ldMB)", TO_MB(FreeMemory), TO_MB(UsedMemory), TO_MB(ReservedMemory));
while (1)
CPU::Halt();
return nullptr;
}
void *Physical::RequestPages(uint64_t Count)
{
SMARTLOCK(this->MemoryLock);
for (; PageBitmapIndex < PageBitmap.Size * 8; PageBitmapIndex++)
{
if (PageBitmap[PageBitmapIndex] == true)
continue;
for (uint64_t Index = PageBitmapIndex; Index < PageBitmap.Size * 8; Index++)
{
if (PageBitmap[Index] == true)
continue;
for (uint64_t i = 0; i < Count; i++)
if (PageBitmap[Index + i] == true)
goto NextPage;
this->LockPages((void *)(Index * PAGE_SIZE), Count);
return (void *)(Index * PAGE_SIZE);
NextPage:
Index += Count;
continue;
}
}
if (this->SwapPages((void *)(PageBitmapIndex * PAGE_SIZE), Count))
{
this->LockPages((void *)(PageBitmapIndex * PAGE_SIZE), Count);
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
error("Out of memory! (Free: %ldMB; Used: %ldMB; Reserved: %ldMB)", TO_MB(FreeMemory), TO_MB(UsedMemory), TO_MB(ReservedMemory));
while (1)
CPU::Halt();
return nullptr;
}
void Physical::FreePage(void *Address)
{
SMARTLOCK(this->MemoryLock);
if (Address == nullptr)
{
warn("Null pointer passed to FreePage.");
return;
}
uint64_t Index = (uint64_t)Address / PAGE_SIZE;
if (PageBitmap[Index] == false)
return;
if (PageBitmap.Set(Index, false))
{
FreeMemory += PAGE_SIZE;
UsedMemory -= PAGE_SIZE;
if (PageBitmapIndex > Index)
PageBitmapIndex = Index;
}
}
void Physical::FreePages(void *Address, uint64_t Count)
{
if (Address == nullptr || Count == 0)
{
warn("%s%s passed to FreePages.", Address == nullptr ? "Null pointer" : "", Count == 0 ? "Zero count" : "");
return;
}
for (uint64_t t = 0; t < Count; t++)
this->FreePage((void *)((uint64_t)Address + (t * PAGE_SIZE)));
}
void Physical::LockPage(void *Address)
{
if (Address == nullptr)
warn("Trying to lock null address.");
uint64_t Index = (uint64_t)Address / PAGE_SIZE;
if (PageBitmap[Index] == true)
return;
if (PageBitmap.Set(Index, true))
{
FreeMemory -= PAGE_SIZE;
UsedMemory += PAGE_SIZE;
}
}
void Physical::LockPages(void *Address, uint64_t PageCount)
{
if (Address == nullptr || PageCount == 0)
warn("Trying to lock %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
for (uint64_t i = 0; i < PageCount; i++)
this->LockPage((void *)((uint64_t)Address + (i * PAGE_SIZE)));
}
void Physical::ReservePage(void *Address)
{
if (Address == nullptr)
warn("Trying to reserve null address.");
uint64_t Index = (uint64_t)Address / PAGE_SIZE;
if (PageBitmap[Index] == true)
return;
if (PageBitmap.Set(Index, true))
{
FreeMemory -= PAGE_SIZE;
ReservedMemory += PAGE_SIZE;
}
}
void Physical::ReservePages(void *Address, uint64_t PageCount)
{
if (Address == nullptr || PageCount == 0)
warn("Trying to reserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
for (uint64_t t = 0; t < PageCount; t++)
this->ReservePage((void *)((uint64_t)Address + (t * PAGE_SIZE)));
}
void Physical::UnreservePage(void *Address)
{
if (Address == nullptr)
warn("Trying to unreserve null address.");
uint64_t Index = (uint64_t)Address / PAGE_SIZE;
if (PageBitmap[Index] == false)
return;
if (PageBitmap.Set(Index, false))
{
FreeMemory += PAGE_SIZE;
ReservedMemory -= PAGE_SIZE;
if (PageBitmapIndex > Index)
PageBitmapIndex = Index;
}
}
void Physical::UnreservePages(void *Address, uint64_t PageCount)
{
if (Address == nullptr || PageCount == 0)
warn("Trying to unreserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
for (uint64_t t = 0; t < PageCount; t++)
this->UnreservePage((void *)((uint64_t)Address + (t * PAGE_SIZE)));
}
void Physical::Init(BootInfo *Info)
{
SMARTLOCK(this->MemoryLock);
void *LargestFreeMemorySegment = nullptr;
uint64_t LargestFreeMemorySegmentSize = 0;
uint64_t MemorySize = Info->Memory.Size;
for (uint64_t i = 0; i < Info->Memory.Entries; i++)
if (Info->Memory.Entry[i].Type == Usable)
if (Info->Memory.Entry[i].Length > LargestFreeMemorySegmentSize)
{
LargestFreeMemorySegment = (void *)Info->Memory.Entry[i].BaseAddress;
LargestFreeMemorySegmentSize = Info->Memory.Entry[i].Length;
debug("Largest free memory segment: %p (%dKB)",
(void *)Info->Memory.Entry[i].BaseAddress,
TO_KB(Info->Memory.Entry[i].Length));
}
TotalMemory = MemorySize;
FreeMemory = MemorySize;
uint64_t BitmapSize = ALIGN_UP((MemorySize / 0x1000) / 8, 0x1000);
trace("Initializing Bitmap (%p %dKB)", LargestFreeMemorySegment, TO_KB(BitmapSize));
PageBitmap.Size = BitmapSize;
PageBitmap.Buffer = (uint8_t *)LargestFreeMemorySegment;
for (uint64_t i = 0; i < BitmapSize; i++)
*(uint8_t *)(PageBitmap.Buffer + i) = 0;
this->ReservePages(0, MemorySize / PAGE_SIZE + 1);
for (uint64_t i = 0; i < Info->Memory.Entries; i++)
if (Info->Memory.Entry[i].Type == Usable)
this->UnreservePages((void *)Info->Memory.Entry[i].BaseAddress, Info->Memory.Entry[i].Length / PAGE_SIZE + 1);
this->ReservePages(0, 0x100); // Reserve between 0 and 0x100000
this->LockPages(PageBitmap.Buffer, PageBitmap.Size / PAGE_SIZE + 1);
}
Physical::Physical() {}
Physical::~Physical() {}
}

View File

@ -0,0 +1,123 @@
#include <memory.hpp>
#include <string.h>
#include <debug.h>
namespace Memory
{
void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags)
{
SMARTLOCK(this->MemoryLock);
if (!this->Table)
{
error("No page table");
return;
}
PageMapIndexer Index = PageMapIndexer((uint64_t)VirtualAddress);
PageDirectoryEntry PDE = this->Table->Entries[Index.PDP_i];
PageTable *PDP;
if (!PDE.GetFlag(PTFlag::P))
{
PDP = (PageTable *)KernelAllocator.RequestPage();
memset(PDP, 0, PAGE_SIZE);
PDE.SetAddress((uint64_t)PDP >> 12);
PDE.SetFlag(PTFlag::P, true);
PDE.AddFlag(Flags);
this->Table->Entries[Index.PDP_i] = PDE;
}
else
PDP = (PageTable *)((uint64_t)PDE.GetAddress() << 12);
PDE = PDP->Entries[Index.PD_i];
PageTable *PD;
if (!PDE.GetFlag(PTFlag::P))
{
PD = (PageTable *)KernelAllocator.RequestPage();
memset(PD, 0, PAGE_SIZE);
PDE.SetAddress((uint64_t)PD >> 12);
PDE.SetFlag(PTFlag::P, true);
PDE.AddFlag(Flags);
PDP->Entries[Index.PD_i] = PDE;
}
else
PD = (PageTable *)((uint64_t)PDE.GetAddress() << 12);
PDE = PD->Entries[Index.PT_i];
PageTable *PT;
if (!PDE.GetFlag(PTFlag::P))
{
PT = (PageTable *)KernelAllocator.RequestPage();
memset(PT, 0, PAGE_SIZE);
PDE.SetAddress((uint64_t)PT >> 12);
PDE.SetFlag(PTFlag::P, true);
PDE.AddFlag(Flags);
PD->Entries[Index.PT_i] = PDE;
}
else
PT = (PageTable *)((uint64_t)PDE.GetAddress() << 12);
PDE = PT->Entries[Index.P_i];
PDE.SetAddress((uint64_t)PhysicalAddress >> 12);
PDE.SetFlag(PTFlag::P, true);
PDE.AddFlag(Flags);
PT->Entries[Index.P_i] = PDE;
#if defined(__amd64__) || defined(__i386__)
asmv("invlpg (%0)"
:
: "r"(VirtualAddress)
: "memory");
#elif defined(__aarch64__)
asmv("dsb sy");
asmv("tlbi vae1is, %0"
:
: "r"(VirtualAddress)
: "memory");
asmv("dsb sy");
asmv("isb");
#endif
}
void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, uint64_t PageCount, uint64_t Flags)
{
for (uint64_t i = 0; i < PageCount; i++)
this->Map((void *)((uint64_t)VirtualAddress + (i * PAGE_SIZE)), (void *)((uint64_t)PhysicalAddress + (i * PAGE_SIZE)), Flags);
}
void Virtual::Unmap(void *VirtualAddress)
{
SMARTLOCK(this->MemoryLock);
if (!this->Table)
{
error("No page table");
return;
}
PageMapIndexer Index = PageMapIndexer((uint64_t)VirtualAddress);
PageDirectoryEntry PDE = this->Table->Entries[Index.PDP_i];
PDE.ClearFlags();
#if defined(__amd64__) || defined(__i386__)
asmv("invlpg (%0)"
:
: "r"(VirtualAddress)
: "memory");
#elif defined(__aarch64__)
asmv("dsb sy");
asmv("tlbi vae1is, %0"
:
: "r"(VirtualAddress)
: "memory");
asmv("dsb sy");
asmv("isb");
#endif
}
void Virtual::Unmap(void *VirtualAddress, uint64_t PageCount)
{
for (uint64_t i = 0; i < PageCount; i++)
this->Unmap((void *)((uint64_t)VirtualAddress + (i * PAGE_SIZE)));
}
Virtual::Virtual(PageTable *Table) { this->Table = Table; }
Virtual::~Virtual() {}
}

58
core/StackGuard.c Normal file
View File

@ -0,0 +1,58 @@
#include <types.h>
#include <debug.h>
#ifndef STACK_CHK_GUARD_VALUE
#if UINTPTR_MAX == UINT32_MAX
#define STACK_CHK_GUARD_VALUE 0x25F6CC8D
#else
#define STACK_CHK_GUARD_VALUE 0xBADFE2EC255A8572
#endif
#endif
__attribute__((weak)) uintptr_t __stack_chk_guard = 0;
__attribute__((weak)) uintptr_t __stack_chk_guard_init(void)
{
return STACK_CHK_GUARD_VALUE;
}
static void __attribute__((constructor, no_stack_protector)) __construct_stk_chk_guard()
{
if (__stack_chk_guard == 0)
__stack_chk_guard = __stack_chk_guard_init();
}
// https://opensource.apple.com/source/xnu/xnu-1504.7.4/libkern/stack_protector.c.auto.html
// static void __guard_setup(void) __attribute__((constructor));
// static void __guard_setup(void)
// {
// read_random(__stack_chk_guard, sizeof(__stack_chk_guard));
// }
__attribute__((weak, noreturn, no_stack_protector)) void __stack_chk_fail(void)
{
error("Stack smashing detected!", false);
for (;;)
{
#if defined(__amd64__) || defined(__i386__)
asmv("hlt");
#elif defined(__aarch64__)
asmv("wfe");
#endif
}
}
// https://github.com/gcc-mirror/gcc/blob/master/libssp/ssp.c
__attribute__((weak, noreturn, no_stack_protector)) void __chk_fail(void)
{
error("Buffer overflow detected!", false);
for (;;)
{
#if defined(__amd64__) || defined(__i386__)
asmv("hlt");
#elif defined(__aarch64__)
asmv("wfe");
#endif
}
}