mirror of
https://github.com/Fennix-Project/Kernel.git
synced 2025-07-11 07:19:20 +00:00
Update files
This commit is contained in:
@ -1,85 +0,0 @@
|
||||
#include <debug.h>
|
||||
|
||||
#include <uart.hpp>
|
||||
#include <printf.h>
|
||||
|
||||
using namespace UniversalAsynchronousReceiverTransmitter;
|
||||
|
||||
static inline void uart_wrapper(char c, void *unused)
|
||||
{
|
||||
UART(COM1).Write(c);
|
||||
(void)unused;
|
||||
}
|
||||
|
||||
static inline void WritePrefix(DebugLevel Level, const char *File, int Line, const char *Function)
|
||||
{
|
||||
const char *DbgLvlString;
|
||||
switch (Level)
|
||||
{
|
||||
case DebugLevelError:
|
||||
DbgLvlString = "ERROR";
|
||||
break;
|
||||
case DebugLevelWarning:
|
||||
DbgLvlString = "WARN ";
|
||||
break;
|
||||
case DebugLevelInfo:
|
||||
DbgLvlString = "INFO ";
|
||||
break;
|
||||
case DebugLevelDebug:
|
||||
DbgLvlString = "DEBUG";
|
||||
break;
|
||||
case DebugLevelTrace:
|
||||
DbgLvlString = "TRACE";
|
||||
break;
|
||||
case DebugLevelFixme:
|
||||
DbgLvlString = "FIXME";
|
||||
break;
|
||||
default:
|
||||
DbgLvlString = "UNKNW";
|
||||
break;
|
||||
}
|
||||
fctprintf(uart_wrapper, nullptr, "%s|%s->%s:%d: ", DbgLvlString, File, Function, Line);
|
||||
}
|
||||
|
||||
namespace SysDbg
|
||||
{
|
||||
void Write(DebugLevel Level, const char *File, int Line, const char *Function, const char *Format, ...)
|
||||
{
|
||||
WritePrefix(Level, File, Line, Function);
|
||||
va_list args;
|
||||
va_start(args, Format);
|
||||
vfctprintf(uart_wrapper, nullptr, Format, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
void WriteLine(DebugLevel Level, const char *File, int Line, const char *Function, const char *Format, ...)
|
||||
{
|
||||
WritePrefix(Level, File, Line, Function);
|
||||
va_list args;
|
||||
va_start(args, Format);
|
||||
vfctprintf(uart_wrapper, nullptr, Format, args);
|
||||
va_end(args);
|
||||
uart_wrapper('\n', nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
// C compatibility
|
||||
extern "C" void SysDbgWrite(enum DebugLevel Level, const char *File, int Line, const char *Function, const char *Format, ...)
|
||||
{
|
||||
WritePrefix(Level, File, Line, Function);
|
||||
va_list args;
|
||||
va_start(args, Format);
|
||||
vfctprintf(uart_wrapper, nullptr, Format, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
// C compatibility
|
||||
extern "C" void SysDbgWriteLine(enum DebugLevel Level, const char *File, int Line, const char *Function, const char *Format, ...)
|
||||
{
|
||||
WritePrefix(Level, File, Line, Function);
|
||||
va_list args;
|
||||
va_start(args, Format);
|
||||
vfctprintf(uart_wrapper, nullptr, Format, args);
|
||||
va_end(args);
|
||||
uart_wrapper('\n', nullptr);
|
||||
}
|
@ -1,23 +0,0 @@
|
||||
#include <interrupts.hpp>
|
||||
|
||||
#if defined(__amd64__)
|
||||
#include "../arch/amd64/cpu/gdt.hpp"
|
||||
#include "../arch/amd64/cpu/idt.hpp"
|
||||
#elif defined(__i386__)
|
||||
#include "../arch/i686/cpu/gdt.hpp"
|
||||
#include "../arch/i686/cpu/idt.hpp"
|
||||
#elif defined(__aarch64__)
|
||||
#endif
|
||||
|
||||
namespace Interrupts
|
||||
{
|
||||
void Initialize()
|
||||
{
|
||||
#if defined(__amd64__)
|
||||
GlobalDescriptorTable::Init(0);
|
||||
InterruptDescriptorTable::Init(0);
|
||||
#elif defined(__i386__)
|
||||
#elif defined(__aarch64__)
|
||||
#endif
|
||||
}
|
||||
}
|
@ -1,205 +0,0 @@
|
||||
#include "Xalloc.hpp"
|
||||
|
||||
namespace Xalloc
|
||||
{
|
||||
class SmartSMAPClass
|
||||
{
|
||||
private:
|
||||
AllocatorV1 *allocator = nullptr;
|
||||
|
||||
public:
|
||||
SmartSMAPClass(AllocatorV1 *allocator)
|
||||
{
|
||||
this->allocator = allocator;
|
||||
this->allocator->Xstac();
|
||||
}
|
||||
~SmartSMAPClass() { this->allocator->Xclac(); }
|
||||
};
|
||||
#define SmartSMAP SmartSMAPClass XALLOC_CONCAT(SmartSMAP##_, __COUNTER__)(this)
|
||||
|
||||
AllocatorV1::AllocatorV1(void *Address, bool UserMode, bool SMAPEnabled)
|
||||
{
|
||||
SmartSMAP;
|
||||
void *Position = Address;
|
||||
UserMapping = UserMode;
|
||||
SMAPUsed = SMAPEnabled;
|
||||
for (Xuint64_t i = 0; i < 0x20; i++)
|
||||
{
|
||||
void *Page = Xalloc_REQUEST_PAGE();
|
||||
if (UserMapping)
|
||||
Xalloc_MAP_MEMORY(Position, Page, Xalloc_MAP_MEMORY_READ_WRITE | Xalloc_MAP_MEMORY_USER);
|
||||
else
|
||||
Xalloc_MAP_MEMORY(Position, Page, Xalloc_MAP_MEMORY_READ_WRITE);
|
||||
Xalloc_trace("Preallocate Heap Memory (%#llx-%#llx [%#llx])...", Position, (Xuint64_t)Position + Xalloc_PAGE_SIZE, Page);
|
||||
Position = (void *)((Xuint64_t)Position + Xalloc_PAGE_SIZE);
|
||||
}
|
||||
Xuint64_t HeapLength = 16 * Xalloc_PAGE_SIZE;
|
||||
this->HeapStart = Address;
|
||||
this->HeapEnd = (void *)((Xuint64_t)this->HeapStart + HeapLength);
|
||||
HeapSegment *StartSegment = (HeapSegment *)Address;
|
||||
StartSegment->Length = HeapLength - sizeof(HeapSegment);
|
||||
StartSegment->Next = nullptr;
|
||||
StartSegment->Last = nullptr;
|
||||
StartSegment->IsFree = true;
|
||||
this->LastSegment = StartSegment;
|
||||
}
|
||||
|
||||
AllocatorV1::~AllocatorV1()
|
||||
{
|
||||
SmartSMAP;
|
||||
Xalloc_trace("Destructor not implemented yet.");
|
||||
}
|
||||
|
||||
void AllocatorV1::ExpandHeap(Xuint64_t Length)
|
||||
{
|
||||
if (Length % Xalloc_PAGE_SIZE)
|
||||
{
|
||||
Length -= Length % Xalloc_PAGE_SIZE;
|
||||
Length += Xalloc_PAGE_SIZE;
|
||||
}
|
||||
Xuint64_t PageCount = Length / Xalloc_PAGE_SIZE;
|
||||
HeapSegment *NewSegment = (HeapSegment *)this->HeapEnd;
|
||||
for (Xuint64_t i = 0; i < PageCount; i++)
|
||||
{
|
||||
void *Page = Xalloc_REQUEST_PAGE();
|
||||
if (UserMapping)
|
||||
Xalloc_MAP_MEMORY(this->HeapEnd, Page, Xalloc_MAP_MEMORY_READ_WRITE | Xalloc_MAP_MEMORY_USER);
|
||||
else
|
||||
Xalloc_MAP_MEMORY(this->HeapEnd, Page, Xalloc_MAP_MEMORY_READ_WRITE);
|
||||
// Xalloc_trace("Expanding Heap Memory (%#llx-%#llx [%#llx])...", this->HeapEnd, (Xuint64_t)this->HeapEnd + Xalloc_PAGE_SIZE, Page);
|
||||
this->HeapEnd = (void *)((Xuint64_t)this->HeapEnd + Xalloc_PAGE_SIZE);
|
||||
}
|
||||
NewSegment->IsFree = true;
|
||||
NewSegment->Last = this->LastSegment;
|
||||
this->LastSegment->Next = NewSegment;
|
||||
this->LastSegment = NewSegment;
|
||||
NewSegment->Next = nullptr;
|
||||
NewSegment->Length = Length - sizeof(HeapSegment);
|
||||
NewSegment->CombineBackward(this->LastSegment);
|
||||
}
|
||||
|
||||
void *AllocatorV1::Malloc(Xuint64_t Size)
|
||||
{
|
||||
SmartSMAP;
|
||||
if (this->HeapStart == nullptr)
|
||||
{
|
||||
Xalloc_err("Memory allocation not initialized yet!");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (Size < 0x10)
|
||||
{
|
||||
// Xalloc_warn("Allocation size is too small, using 0x10 instead!");
|
||||
Size = 0x10;
|
||||
}
|
||||
|
||||
// #ifdef DEBUG
|
||||
// if (Size < 1024)
|
||||
// debug("Allocating %dB", Size);
|
||||
// else if (TO_KB(Size) < 1024)
|
||||
// debug("Allocating %dKB", TO_KB(Size));
|
||||
// else if (TO_MB(Size) < 1024)
|
||||
// debug("Allocating %dMB", TO_MB(Size));
|
||||
// else if (TO_GB(Size) < 1024)
|
||||
// debug("Allocating %dGB", TO_GB(Size));
|
||||
// #endif
|
||||
|
||||
if (Size % 0x10 > 0) // it is not a multiple of 0x10
|
||||
{
|
||||
Size -= (Size % 0x10);
|
||||
Size += 0x10;
|
||||
}
|
||||
if (Size == 0)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
HeapSegment *CurrentSegment = (HeapSegment *)this->HeapStart;
|
||||
while (true)
|
||||
{
|
||||
if (CurrentSegment->IsFree)
|
||||
{
|
||||
if (CurrentSegment->Length > Size)
|
||||
{
|
||||
CurrentSegment->Split(Size, this->LastSegment);
|
||||
CurrentSegment->IsFree = false;
|
||||
return (void *)((Xuint64_t)CurrentSegment + sizeof(HeapSegment));
|
||||
}
|
||||
if (CurrentSegment->Length == Size)
|
||||
{
|
||||
CurrentSegment->IsFree = false;
|
||||
return (void *)((Xuint64_t)CurrentSegment + sizeof(HeapSegment));
|
||||
}
|
||||
}
|
||||
if (CurrentSegment->Next == nullptr)
|
||||
break;
|
||||
CurrentSegment = CurrentSegment->Next;
|
||||
}
|
||||
ExpandHeap(Size);
|
||||
return this->Malloc(Size);
|
||||
}
|
||||
|
||||
void AllocatorV1::Free(void *Address)
|
||||
{
|
||||
SmartSMAP;
|
||||
if (this->HeapStart == nullptr)
|
||||
{
|
||||
Xalloc_err("Memory allocation not initialized yet!");
|
||||
return;
|
||||
}
|
||||
HeapSegment *Segment = (HeapSegment *)Address - 1;
|
||||
Segment->IsFree = true;
|
||||
Segment->CombineForward(this->LastSegment);
|
||||
Segment->CombineBackward(this->LastSegment);
|
||||
}
|
||||
|
||||
void *AllocatorV1::Calloc(Xuint64_t NumberOfBlocks, Xuint64_t Size)
|
||||
{
|
||||
SmartSMAP;
|
||||
if (this->HeapStart == nullptr)
|
||||
{
|
||||
Xalloc_err("Memory allocation not initialized yet!");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (Size < 0x10)
|
||||
{
|
||||
// Xalloc_warn("Allocation size is too small, using 0x10 instead!");
|
||||
Size = 0x10;
|
||||
}
|
||||
|
||||
void *Block = this->Malloc(NumberOfBlocks * Size);
|
||||
if (Block)
|
||||
Xmemset(Block, 0, NumberOfBlocks * Size);
|
||||
return Block;
|
||||
}
|
||||
|
||||
void *AllocatorV1::Realloc(void *Address, Xuint64_t Size)
|
||||
{
|
||||
SmartSMAP;
|
||||
if (this->HeapStart == nullptr)
|
||||
{
|
||||
Xalloc_err("Memory allocation not initialized yet!");
|
||||
return 0;
|
||||
}
|
||||
if (!Address && Size == 0)
|
||||
{
|
||||
this->Free(Address);
|
||||
return nullptr;
|
||||
}
|
||||
else if (!Address)
|
||||
{
|
||||
return this->Calloc(Size, sizeof(char));
|
||||
}
|
||||
|
||||
if (Size < 0x10)
|
||||
{
|
||||
// Xalloc_warn("Allocation size is too small, using 0x10 instead!");
|
||||
Size = 0x10;
|
||||
}
|
||||
|
||||
void *newAddress = this->Calloc(Size, sizeof(char));
|
||||
Xmemcpy(newAddress, Address, Size);
|
||||
return newAddress;
|
||||
}
|
||||
}
|
@ -1,180 +0,0 @@
|
||||
#pragma once
|
||||
#include <memory.hpp>
|
||||
#include <debug.h>
|
||||
|
||||
// Functions defines
|
||||
|
||||
// Page allocation functions
|
||||
#define Xalloc_REQUEST_PAGE() KernelAllocator.RequestPage()
|
||||
#define Xalloc_REQUEST_PAGES(Pages) KernelAllocator.RequestPages(Pages)
|
||||
#define Xalloc_FREE_PAGE(Address) KernelAllocator.FreePage(Address)
|
||||
#define Xalloc_FREE_PAGES(Address, Pages) KernelAllocator.FreePages(Address, Pages)
|
||||
|
||||
#define Xalloc_MAP_MEMORY(VirtualAddress, PhysicalAddress, Flags) Memory::Virtual(KernelPageTable).Map(VirtualAddress, PhysicalAddress, Flags)
|
||||
#define Xalloc_UNMAP_MEMORY(VirtualAddress) Memory::Virtual(KernelPageTable).Unmap(VirtualAddress)
|
||||
#define Xalloc_MAP_MEMORY_READ_WRITE Memory::PTFlag::RW
|
||||
#define Xalloc_MAP_MEMORY_USER Memory::PTFlag::US
|
||||
|
||||
#define Xalloc_PAGE_SIZE PAGE_SIZE
|
||||
|
||||
#define Xalloc_trace(m, ...) trace(m, ##__VA_ARGS__)
|
||||
#define Xalloc_warn(m, ...) warn(m, ##__VA_ARGS__)
|
||||
#define Xalloc_err(m, ...) error(m, ##__VA_ARGS__)
|
||||
|
||||
#define XALLOC_CONCAT(x, y) x##y
|
||||
|
||||
typedef long unsigned Xuint64_t;
|
||||
|
||||
namespace Xalloc
|
||||
{
|
||||
class AllocatorV1
|
||||
{
|
||||
private:
|
||||
struct HeapSegment
|
||||
{
|
||||
Xuint64_t Length;
|
||||
HeapSegment *Next;
|
||||
HeapSegment *Last;
|
||||
bool IsFree;
|
||||
|
||||
HeapSegment *Split(Xuint64_t SplitLength, HeapSegment *LastSegment)
|
||||
{
|
||||
if (SplitLength < 0x10)
|
||||
return nullptr;
|
||||
int64_t SplitSegmentLength = Length - SplitLength - (sizeof(HeapSegment));
|
||||
if (SplitSegmentLength < 0x10)
|
||||
return nullptr;
|
||||
HeapSegment *NewSplitHdr = (HeapSegment *)((Xuint64_t)this + SplitLength + sizeof(HeapSegment));
|
||||
Next->Last = NewSplitHdr;
|
||||
NewSplitHdr->Next = Next;
|
||||
Next = NewSplitHdr;
|
||||
NewSplitHdr->Last = this;
|
||||
NewSplitHdr->Length = SplitSegmentLength;
|
||||
NewSplitHdr->IsFree = IsFree;
|
||||
Length = SplitLength;
|
||||
if (LastSegment == this)
|
||||
LastSegment = NewSplitHdr;
|
||||
return NewSplitHdr;
|
||||
}
|
||||
|
||||
void CombineForward(HeapSegment *LastSegment)
|
||||
{
|
||||
if (Next == nullptr)
|
||||
return;
|
||||
if (Next->IsFree == false)
|
||||
return;
|
||||
if (Next == LastSegment)
|
||||
LastSegment = this;
|
||||
if (Next->Next != nullptr)
|
||||
Next->Next->Last = this;
|
||||
|
||||
Length = Length + Next->Length + sizeof(HeapSegment);
|
||||
Next = Next->Next;
|
||||
}
|
||||
|
||||
void CombineBackward(HeapSegment *LastSegment)
|
||||
{
|
||||
if (Last != nullptr && Last->IsFree)
|
||||
Last->CombineForward(LastSegment);
|
||||
}
|
||||
} __attribute__((aligned(16)));
|
||||
|
||||
void *HeapStart = nullptr;
|
||||
void *HeapEnd = nullptr;
|
||||
HeapSegment *LastSegment = nullptr;
|
||||
bool UserMapping = false;
|
||||
bool SMAPUsed = false;
|
||||
|
||||
void ExpandHeap(Xuint64_t Length);
|
||||
|
||||
// TODO: Change memcpy with an optimized version
|
||||
static inline void *Xmemcpy(void *__restrict__ Destination, const void *__restrict__ Source, Xuint64_t Length)
|
||||
{
|
||||
unsigned char *dst = (unsigned char *)Destination;
|
||||
const unsigned char *src = (const unsigned char *)Source;
|
||||
for (Xuint64_t i = 0; i < Length; i++)
|
||||
dst[i] = src[i];
|
||||
return Destination;
|
||||
}
|
||||
|
||||
// TODO: Change memset with an optimized version
|
||||
static inline void *Xmemset(void *__restrict__ Destination, int Data, Xuint64_t Length)
|
||||
{
|
||||
unsigned char *Buffer = (unsigned char *)Destination;
|
||||
for (Xuint64_t i = 0; i < Length; i++)
|
||||
Buffer[i] = (unsigned char)Data;
|
||||
return Destination;
|
||||
}
|
||||
|
||||
public:
|
||||
inline void Xstac()
|
||||
{
|
||||
if (this->SMAPUsed)
|
||||
{
|
||||
#if defined(__amd64__) || defined(__i386__)
|
||||
asm volatile("stac" ::
|
||||
: "cc");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
inline void Xclac()
|
||||
{
|
||||
if (this->SMAPUsed)
|
||||
{
|
||||
#if defined(__amd64__) || defined(__i386__)
|
||||
asm volatile("clac" ::
|
||||
: "cc");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Construct a new Allocator V1 object
|
||||
*
|
||||
* @param Address Virtual address to allocate.
|
||||
* @param UserMode Map the new pages with USER flag?
|
||||
* @param SMAPEnabled Does the kernel has Supervisor Mode Access Prevention enabled?
|
||||
*/
|
||||
AllocatorV1(void *Address, bool UserMode, bool SMAPEnabled);
|
||||
|
||||
/**
|
||||
* @brief Destroy the Allocator V 1 object
|
||||
*
|
||||
*/
|
||||
~AllocatorV1();
|
||||
|
||||
/**
|
||||
* @brief Allocate a new memory block
|
||||
*
|
||||
* @param Size Size of the block to allocate.
|
||||
* @return void* Pointer to the allocated block.
|
||||
*/
|
||||
void *Malloc(Xuint64_t Size);
|
||||
|
||||
/**
|
||||
* @brief Free a previously allocated block
|
||||
*
|
||||
* @param Address Address of the block to free.
|
||||
*/
|
||||
void Free(void *Address);
|
||||
|
||||
/**
|
||||
* @brief Allocate a new memory block
|
||||
*
|
||||
* @param NumberOfBlocks Number of blocks to allocate.
|
||||
* @param Size Size of the block to allocate.
|
||||
* @return void* Pointer to the allocated block.
|
||||
*/
|
||||
void *Calloc(Xuint64_t NumberOfBlocks, Xuint64_t Size);
|
||||
|
||||
/**
|
||||
* @brief Reallocate a previously allocated block
|
||||
*
|
||||
* @param Address Address of the block to reallocate.
|
||||
* @param Size New size of the block.
|
||||
* @return void* Pointer to the reallocated block.
|
||||
*/
|
||||
void *Realloc(void *Address, Xuint64_t Size);
|
||||
};
|
||||
}
|
@ -1,215 +0,0 @@
|
||||
#include <memory.hpp>
|
||||
|
||||
#include <string.h>
|
||||
#include <debug.h>
|
||||
|
||||
#include "HeapAllocators/Xalloc.hpp"
|
||||
#include "../lib/liballoc_1_1.h"
|
||||
|
||||
using namespace Memory;
|
||||
|
||||
Physical KernelAllocator;
|
||||
PageTable *KernelPageTable = nullptr;
|
||||
|
||||
enum MemoryAllocatorType
|
||||
{
|
||||
None,
|
||||
Pages,
|
||||
XallocV1,
|
||||
liballoc11
|
||||
};
|
||||
|
||||
static MemoryAllocatorType AllocatorType = MemoryAllocatorType::None;
|
||||
Xalloc::AllocatorV1 *XallocV1Allocator = nullptr;
|
||||
|
||||
#ifdef DEBUG
|
||||
void tracepagetable(PageTable *pt)
|
||||
{
|
||||
for (int i = 0; i < 512; i++)
|
||||
{
|
||||
if (pt->Entries[i].Value.Present)
|
||||
debug("Entry %03d: %x %x %x %x %x %x %x %x %x %x %x %p-%#lx", i,
|
||||
pt->Entries[i].Value.Present, pt->Entries[i].Value.ReadWrite,
|
||||
pt->Entries[i].Value.UserSupervisor, pt->Entries[i].Value.WriteThrough,
|
||||
pt->Entries[i].Value.CacheDisable, pt->Entries[i].Value.Accessed,
|
||||
pt->Entries[i].Value.Dirty, pt->Entries[i].Value.PageSize,
|
||||
pt->Entries[i].Value.Global, pt->Entries[i].Value.PageAttributeTable,
|
||||
pt->Entries[i].Value.ExecuteDisable, pt->Entries[i].GetAddress(),
|
||||
pt->Entries[i].Value);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void InitializeMemoryManagement(BootInfo *Info)
|
||||
{
|
||||
trace("Initializing Physical Memory Manager");
|
||||
KernelAllocator = Physical();
|
||||
KernelAllocator.Init(Info);
|
||||
debug("Memory Info: %dMB / %dMB (%dMB reserved)",
|
||||
TO_MB(KernelAllocator.GetUsedMemory()),
|
||||
TO_MB(KernelAllocator.GetTotalMemory()),
|
||||
TO_MB(KernelAllocator.GetReservedMemory()));
|
||||
|
||||
AllocatorType = MemoryAllocatorType::Pages;
|
||||
|
||||
trace("Initializing Virtual Memory Manager");
|
||||
KernelPageTable = (PageTable *)KernelAllocator.RequestPage();
|
||||
memset(KernelPageTable, 0, PAGE_SIZE);
|
||||
Virtual kva = Virtual(KernelPageTable);
|
||||
|
||||
uint64_t KernelStart = (uint64_t)&_kernel_start;
|
||||
uint64_t KernelTextEnd = (uint64_t)&_kernel_text_end;
|
||||
uint64_t KernelDataEnd = (uint64_t)&_kernel_data_end;
|
||||
uint64_t KernelRoDataEnd = (uint64_t)&_kernel_rodata_end;
|
||||
uint64_t KernelEnd = (uint64_t)&_kernel_end;
|
||||
|
||||
uint64_t VirtualOffsetNormalVMA = NORMAL_VMA_OFFSET;
|
||||
uint64_t BaseKernelMapAddress = (uint64_t)Info->Kernel.PhysicalBase;
|
||||
|
||||
for (uint64_t t = 0; t < Info->Memory.Size; t += PAGE_SIZE)
|
||||
{
|
||||
kva.Map((void *)t, (void *)t, PTFlag::RW);
|
||||
kva.Map((void *)VirtualOffsetNormalVMA, (void *)t, PTFlag::RW);
|
||||
VirtualOffsetNormalVMA += PAGE_SIZE;
|
||||
}
|
||||
|
||||
/* Mapping Framebuffer address */
|
||||
int itrfb = 0;
|
||||
while (1)
|
||||
{
|
||||
if (!Info->Framebuffer[itrfb].BaseAddress)
|
||||
break;
|
||||
|
||||
for (uint64_t fb_base = (uint64_t)Info->Framebuffer[itrfb].BaseAddress;
|
||||
fb_base < ((uint64_t)Info->Framebuffer[itrfb].BaseAddress + ((Info->Framebuffer[itrfb].Pitch * Info->Framebuffer[itrfb].Height) + PAGE_SIZE));
|
||||
fb_base += PAGE_SIZE)
|
||||
kva.Map((void *)(fb_base + NORMAL_VMA_OFFSET), (void *)fb_base, PTFlag::RW | PTFlag::US);
|
||||
itrfb++;
|
||||
}
|
||||
|
||||
/* Kernel mapping */
|
||||
for (uint64_t k = KernelStart; k < KernelTextEnd; k += PAGE_SIZE)
|
||||
{
|
||||
kva.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW);
|
||||
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
|
||||
BaseKernelMapAddress += PAGE_SIZE;
|
||||
}
|
||||
|
||||
for (uint64_t k = KernelTextEnd; k < KernelDataEnd; k += PAGE_SIZE)
|
||||
{
|
||||
kva.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW);
|
||||
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
|
||||
BaseKernelMapAddress += PAGE_SIZE;
|
||||
}
|
||||
|
||||
for (uint64_t k = KernelDataEnd; k < KernelRoDataEnd; k += PAGE_SIZE)
|
||||
{
|
||||
kva.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::P);
|
||||
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
|
||||
BaseKernelMapAddress += PAGE_SIZE;
|
||||
}
|
||||
|
||||
for (uint64_t k = KernelRoDataEnd; k < KernelEnd; k += PAGE_SIZE)
|
||||
{
|
||||
kva.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW);
|
||||
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
|
||||
BaseKernelMapAddress += PAGE_SIZE;
|
||||
}
|
||||
|
||||
debug("\nStart: %#llx - Text End: %#llx - RoEnd: %#llx - End: %#llx\nStart Physical: %#llx - End Physical: %#llx",
|
||||
KernelStart, KernelTextEnd, KernelRoDataEnd, KernelEnd, Info->Kernel.PhysicalBase, BaseKernelMapAddress - PAGE_SIZE);
|
||||
|
||||
/* KernelStart KernelTextEnd KernelRoDataEnd KernelEnd
|
||||
Kernel Start & Text Start ------ Text End ------ Kernel Rodata End ------ Kernel Data End & Kernel End
|
||||
*/
|
||||
trace("Applying new page table from address %p", KernelPageTable);
|
||||
#ifdef DEBUG
|
||||
tracepagetable(KernelPageTable);
|
||||
#endif
|
||||
#if defined(__amd64__) || defined(__i386__)
|
||||
asmv("mov %0, %%cr3" ::"r"(KernelPageTable));
|
||||
#elif defined(__aarch64__)
|
||||
asmv("msr ttbr0_el1, %0" ::"r"(KernelPageTable));
|
||||
#endif
|
||||
if (strstr(Info->Kernel.CommandLine, "xallocv1"))
|
||||
{
|
||||
XallocV1Allocator = new Xalloc::AllocatorV1((void *)KERNEL_HEAP_BASE, false, false);
|
||||
AllocatorType = MemoryAllocatorType::XallocV1;
|
||||
trace("XallocV1 Allocator initialized (%p)", XallocV1Allocator);
|
||||
}
|
||||
else if (strstr(Info->Kernel.CommandLine, "liballoc11"))
|
||||
{
|
||||
AllocatorType = MemoryAllocatorType::liballoc11;
|
||||
}
|
||||
}
|
||||
|
||||
void *HeapMalloc(uint64_t Size)
|
||||
{
|
||||
switch (AllocatorType)
|
||||
{
|
||||
case MemoryAllocatorType::Pages:
|
||||
return KernelAllocator.RequestPages(TO_PAGES(Size));
|
||||
case MemoryAllocatorType::XallocV1:
|
||||
return XallocV1Allocator->Malloc(Size);
|
||||
case MemoryAllocatorType::liballoc11:
|
||||
return PREFIX(malloc)(Size);
|
||||
default:
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
void *HeapCalloc(uint64_t n, uint64_t Size)
|
||||
{
|
||||
switch (AllocatorType)
|
||||
{
|
||||
case MemoryAllocatorType::Pages:
|
||||
return KernelAllocator.RequestPages(TO_PAGES(n * Size));
|
||||
case MemoryAllocatorType::XallocV1:
|
||||
return XallocV1Allocator->Calloc(n, Size);
|
||||
case MemoryAllocatorType::liballoc11:
|
||||
return PREFIX(calloc)(n, Size);
|
||||
default:
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
void *HeapRealloc(void *Address, uint64_t Size)
|
||||
{
|
||||
switch (AllocatorType)
|
||||
{
|
||||
case MemoryAllocatorType::Pages:
|
||||
return KernelAllocator.RequestPages(TO_PAGES(Size)); // WARNING: Potential memory leak
|
||||
case MemoryAllocatorType::XallocV1:
|
||||
return XallocV1Allocator->Realloc(Address, Size);
|
||||
case MemoryAllocatorType::liballoc11:
|
||||
return PREFIX(realloc)(Address, Size);
|
||||
default:
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
void HeapFree(void *Address)
|
||||
{
|
||||
switch (AllocatorType)
|
||||
{
|
||||
case MemoryAllocatorType::Pages:
|
||||
KernelAllocator.FreePage(Address); // WARNING: Potential memory leak
|
||||
break;
|
||||
case MemoryAllocatorType::XallocV1:
|
||||
XallocV1Allocator->Free(Address);
|
||||
break;
|
||||
case MemoryAllocatorType::liballoc11:
|
||||
PREFIX(free)
|
||||
(Address);
|
||||
break;
|
||||
default:
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
void *operator new(uint64_t Size) { return HeapMalloc(Size); }
|
||||
void *operator new[](uint64_t Size) { return HeapMalloc(Size); }
|
||||
void operator delete(void *Pointer) { HeapFree(Pointer); }
|
||||
void operator delete[](void *Pointer) { HeapFree(Pointer); }
|
||||
void operator delete(void *Pointer, long unsigned int Size) { HeapFree(Pointer); }
|
||||
void operator delete[](void *Pointer, long unsigned int Size) { HeapFree(Pointer); }
|
@ -1,266 +0,0 @@
|
||||
#include <memory.hpp>
|
||||
|
||||
#include <debug.h>
|
||||
|
||||
namespace Memory
|
||||
{
|
||||
uint64_t Physical::GetTotalMemory()
|
||||
{
|
||||
SMARTLOCK(this->MemoryLock);
|
||||
return this->TotalMemory;
|
||||
}
|
||||
|
||||
uint64_t Physical::GetFreeMemory()
|
||||
{
|
||||
SMARTLOCK(this->MemoryLock);
|
||||
return this->FreeMemory;
|
||||
}
|
||||
|
||||
uint64_t Physical::GetReservedMemory()
|
||||
{
|
||||
SMARTLOCK(this->MemoryLock);
|
||||
return this->ReservedMemory;
|
||||
}
|
||||
|
||||
uint64_t Physical::GetUsedMemory()
|
||||
{
|
||||
SMARTLOCK(this->MemoryLock);
|
||||
return this->UsedMemory;
|
||||
}
|
||||
|
||||
bool Physical::SwapPage(void *Address)
|
||||
{
|
||||
fixme("%p", Address);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Physical::SwapPages(void *Address, uint64_t PageCount)
|
||||
{
|
||||
for (uint64_t i = 0; i < PageCount; i++)
|
||||
if (!this->SwapPage((void *)((uint64_t)Address + (i * PAGE_SIZE))))
|
||||
return false;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Physical::UnswapPage(void *Address)
|
||||
{
|
||||
fixme("%p", Address);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Physical::UnswapPages(void *Address, uint64_t PageCount)
|
||||
{
|
||||
for (uint64_t i = 0; i < PageCount; i++)
|
||||
if (!this->UnswapPage((void *)((uint64_t)Address + (i * PAGE_SIZE))))
|
||||
return false;
|
||||
return false;
|
||||
}
|
||||
|
||||
void *Physical::RequestPage()
|
||||
{
|
||||
SMARTLOCK(this->MemoryLock);
|
||||
for (; PageBitmapIndex < PageBitmap.Size * 8; PageBitmapIndex++)
|
||||
{
|
||||
if (PageBitmap[PageBitmapIndex] == true)
|
||||
continue;
|
||||
this->LockPage((void *)(PageBitmapIndex * PAGE_SIZE));
|
||||
return (void *)(PageBitmapIndex * PAGE_SIZE);
|
||||
}
|
||||
|
||||
if (this->SwapPage((void *)(PageBitmapIndex * PAGE_SIZE)))
|
||||
{
|
||||
this->LockPage((void *)(PageBitmapIndex * PAGE_SIZE));
|
||||
return (void *)(PageBitmapIndex * PAGE_SIZE);
|
||||
}
|
||||
|
||||
error("Out of memory! (Free: %ldMB; Used: %ldMB; Reserved: %ldMB)", TO_MB(FreeMemory), TO_MB(UsedMemory), TO_MB(ReservedMemory));
|
||||
while (1)
|
||||
CPU::Halt();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void *Physical::RequestPages(uint64_t Count)
|
||||
{
|
||||
SMARTLOCK(this->MemoryLock);
|
||||
for (; PageBitmapIndex < PageBitmap.Size * 8; PageBitmapIndex++)
|
||||
{
|
||||
if (PageBitmap[PageBitmapIndex] == true)
|
||||
continue;
|
||||
|
||||
for (uint64_t Index = PageBitmapIndex; Index < PageBitmap.Size * 8; Index++)
|
||||
{
|
||||
if (PageBitmap[Index] == true)
|
||||
continue;
|
||||
|
||||
for (uint64_t i = 0; i < Count; i++)
|
||||
if (PageBitmap[Index + i] == true)
|
||||
goto NextPage;
|
||||
|
||||
this->LockPages((void *)(Index * PAGE_SIZE), Count);
|
||||
return (void *)(Index * PAGE_SIZE);
|
||||
|
||||
NextPage:
|
||||
Index += Count;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (this->SwapPages((void *)(PageBitmapIndex * PAGE_SIZE), Count))
|
||||
{
|
||||
this->LockPages((void *)(PageBitmapIndex * PAGE_SIZE), Count);
|
||||
return (void *)(PageBitmapIndex * PAGE_SIZE);
|
||||
}
|
||||
|
||||
error("Out of memory! (Free: %ldMB; Used: %ldMB; Reserved: %ldMB)", TO_MB(FreeMemory), TO_MB(UsedMemory), TO_MB(ReservedMemory));
|
||||
while (1)
|
||||
CPU::Halt();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void Physical::FreePage(void *Address)
|
||||
{
|
||||
SMARTLOCK(this->MemoryLock);
|
||||
if (Address == nullptr)
|
||||
{
|
||||
warn("Null pointer passed to FreePage.");
|
||||
return;
|
||||
}
|
||||
uint64_t Index = (uint64_t)Address / PAGE_SIZE;
|
||||
if (PageBitmap[Index] == false)
|
||||
return;
|
||||
|
||||
if (PageBitmap.Set(Index, false))
|
||||
{
|
||||
FreeMemory += PAGE_SIZE;
|
||||
UsedMemory -= PAGE_SIZE;
|
||||
if (PageBitmapIndex > Index)
|
||||
PageBitmapIndex = Index;
|
||||
}
|
||||
}
|
||||
|
||||
void Physical::FreePages(void *Address, uint64_t Count)
|
||||
{
|
||||
if (Address == nullptr || Count == 0)
|
||||
{
|
||||
warn("%s%s passed to FreePages.", Address == nullptr ? "Null pointer" : "", Count == 0 ? "Zero count" : "");
|
||||
return;
|
||||
}
|
||||
|
||||
for (uint64_t t = 0; t < Count; t++)
|
||||
this->FreePage((void *)((uint64_t)Address + (t * PAGE_SIZE)));
|
||||
}
|
||||
|
||||
void Physical::LockPage(void *Address)
|
||||
{
|
||||
if (Address == nullptr)
|
||||
warn("Trying to lock null address.");
|
||||
|
||||
uint64_t Index = (uint64_t)Address / PAGE_SIZE;
|
||||
if (PageBitmap[Index] == true)
|
||||
return;
|
||||
if (PageBitmap.Set(Index, true))
|
||||
{
|
||||
FreeMemory -= PAGE_SIZE;
|
||||
UsedMemory += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
void Physical::LockPages(void *Address, uint64_t PageCount)
|
||||
{
|
||||
if (Address == nullptr || PageCount == 0)
|
||||
warn("Trying to lock %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
|
||||
|
||||
for (uint64_t i = 0; i < PageCount; i++)
|
||||
this->LockPage((void *)((uint64_t)Address + (i * PAGE_SIZE)));
|
||||
}
|
||||
|
||||
void Physical::ReservePage(void *Address)
|
||||
{
|
||||
if (Address == nullptr)
|
||||
warn("Trying to reserve null address.");
|
||||
|
||||
uint64_t Index = (uint64_t)Address / PAGE_SIZE;
|
||||
if (PageBitmap[Index] == true)
|
||||
return;
|
||||
|
||||
if (PageBitmap.Set(Index, true))
|
||||
{
|
||||
FreeMemory -= PAGE_SIZE;
|
||||
ReservedMemory += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
void Physical::ReservePages(void *Address, uint64_t PageCount)
|
||||
{
|
||||
if (Address == nullptr || PageCount == 0)
|
||||
warn("Trying to reserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
|
||||
|
||||
for (uint64_t t = 0; t < PageCount; t++)
|
||||
this->ReservePage((void *)((uint64_t)Address + (t * PAGE_SIZE)));
|
||||
}
|
||||
|
||||
void Physical::UnreservePage(void *Address)
|
||||
{
|
||||
if (Address == nullptr)
|
||||
warn("Trying to unreserve null address.");
|
||||
|
||||
uint64_t Index = (uint64_t)Address / PAGE_SIZE;
|
||||
if (PageBitmap[Index] == false)
|
||||
return;
|
||||
|
||||
if (PageBitmap.Set(Index, false))
|
||||
{
|
||||
FreeMemory += PAGE_SIZE;
|
||||
ReservedMemory -= PAGE_SIZE;
|
||||
if (PageBitmapIndex > Index)
|
||||
PageBitmapIndex = Index;
|
||||
}
|
||||
}
|
||||
|
||||
void Physical::UnreservePages(void *Address, uint64_t PageCount)
|
||||
{
|
||||
if (Address == nullptr || PageCount == 0)
|
||||
warn("Trying to unreserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
|
||||
|
||||
for (uint64_t t = 0; t < PageCount; t++)
|
||||
this->UnreservePage((void *)((uint64_t)Address + (t * PAGE_SIZE)));
|
||||
}
|
||||
|
||||
void Physical::Init(BootInfo *Info)
|
||||
{
|
||||
SMARTLOCK(this->MemoryLock);
|
||||
void *LargestFreeMemorySegment = nullptr;
|
||||
uint64_t LargestFreeMemorySegmentSize = 0;
|
||||
uint64_t MemorySize = Info->Memory.Size;
|
||||
|
||||
for (uint64_t i = 0; i < Info->Memory.Entries; i++)
|
||||
if (Info->Memory.Entry[i].Type == Usable)
|
||||
if (Info->Memory.Entry[i].Length > LargestFreeMemorySegmentSize)
|
||||
{
|
||||
LargestFreeMemorySegment = (void *)Info->Memory.Entry[i].BaseAddress;
|
||||
LargestFreeMemorySegmentSize = Info->Memory.Entry[i].Length;
|
||||
debug("Largest free memory segment: %p (%dKB)",
|
||||
(void *)Info->Memory.Entry[i].BaseAddress,
|
||||
TO_KB(Info->Memory.Entry[i].Length));
|
||||
}
|
||||
TotalMemory = MemorySize;
|
||||
FreeMemory = MemorySize;
|
||||
|
||||
uint64_t BitmapSize = ALIGN_UP((MemorySize / 0x1000) / 8, 0x1000);
|
||||
trace("Initializing Bitmap (%p %dKB)", LargestFreeMemorySegment, TO_KB(BitmapSize));
|
||||
PageBitmap.Size = BitmapSize;
|
||||
PageBitmap.Buffer = (uint8_t *)LargestFreeMemorySegment;
|
||||
for (uint64_t i = 0; i < BitmapSize; i++)
|
||||
*(uint8_t *)(PageBitmap.Buffer + i) = 0;
|
||||
|
||||
this->ReservePages(0, MemorySize / PAGE_SIZE + 1);
|
||||
for (uint64_t i = 0; i < Info->Memory.Entries; i++)
|
||||
if (Info->Memory.Entry[i].Type == Usable)
|
||||
this->UnreservePages((void *)Info->Memory.Entry[i].BaseAddress, Info->Memory.Entry[i].Length / PAGE_SIZE + 1);
|
||||
this->ReservePages(0, 0x100); // Reserve between 0 and 0x100000
|
||||
this->LockPages(PageBitmap.Buffer, PageBitmap.Size / PAGE_SIZE + 1);
|
||||
}
|
||||
|
||||
Physical::Physical() {}
|
||||
Physical::~Physical() {}
|
||||
}
|
@ -1,130 +0,0 @@
|
||||
#include <memory.hpp>
|
||||
|
||||
#include <string.h>
|
||||
#include <debug.h>
|
||||
|
||||
namespace Memory
|
||||
{
|
||||
void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags)
|
||||
{
|
||||
SMARTLOCK(this->MemoryLock);
|
||||
if (!this->Table)
|
||||
{
|
||||
error("No page table");
|
||||
return;
|
||||
}
|
||||
PageMapIndexer Index = PageMapIndexer((uint64_t)VirtualAddress);
|
||||
PageDirectoryEntry PDE = this->Table->Entries[Index.PDP_i];
|
||||
PageTable *PDP;
|
||||
if (!PDE.GetFlag(PTFlag::P))
|
||||
{
|
||||
PDP = (PageTable *)KernelAllocator.RequestPage();
|
||||
memset(PDP, 0, PAGE_SIZE);
|
||||
PDE.SetAddress((uint64_t)PDP >> 12);
|
||||
PDE.SetFlag(PTFlag::P, true);
|
||||
PDE.AddFlag(Flags);
|
||||
this->Table->Entries[Index.PDP_i] = PDE;
|
||||
}
|
||||
else
|
||||
PDP = (PageTable *)((uint64_t)PDE.GetAddress() << 12);
|
||||
|
||||
PDE = PDP->Entries[Index.PD_i];
|
||||
PageTable *PD;
|
||||
if (!PDE.GetFlag(PTFlag::P))
|
||||
{
|
||||
PD = (PageTable *)KernelAllocator.RequestPage();
|
||||
memset(PD, 0, PAGE_SIZE);
|
||||
PDE.SetAddress((uint64_t)PD >> 12);
|
||||
PDE.SetFlag(PTFlag::P, true);
|
||||
PDE.AddFlag(Flags);
|
||||
PDP->Entries[Index.PD_i] = PDE;
|
||||
}
|
||||
else
|
||||
PD = (PageTable *)((uint64_t)PDE.GetAddress() << 12);
|
||||
|
||||
PDE = PD->Entries[Index.PT_i];
|
||||
PageTable *PT;
|
||||
if (!PDE.GetFlag(PTFlag::P))
|
||||
{
|
||||
PT = (PageTable *)KernelAllocator.RequestPage();
|
||||
memset(PT, 0, PAGE_SIZE);
|
||||
PDE.SetAddress((uint64_t)PT >> 12);
|
||||
PDE.SetFlag(PTFlag::P, true);
|
||||
PDE.AddFlag(Flags);
|
||||
PD->Entries[Index.PT_i] = PDE;
|
||||
}
|
||||
else
|
||||
PT = (PageTable *)((uint64_t)PDE.GetAddress() << 12);
|
||||
|
||||
PDE = PT->Entries[Index.P_i];
|
||||
PDE.SetAddress((uint64_t)PhysicalAddress >> 12);
|
||||
PDE.SetFlag(PTFlag::P, true);
|
||||
PDE.AddFlag(Flags);
|
||||
PT->Entries[Index.P_i] = PDE;
|
||||
#if defined(__amd64__) || defined(__i386__)
|
||||
asmv("invlpg (%0)"
|
||||
:
|
||||
: "r"(VirtualAddress)
|
||||
: "memory");
|
||||
#elif defined(__aarch64__)
|
||||
asmv("dsb sy");
|
||||
asmv("tlbi vae1is, %0"
|
||||
:
|
||||
: "r"(VirtualAddress)
|
||||
: "memory");
|
||||
asmv("dsb sy");
|
||||
asmv("isb");
|
||||
#endif
|
||||
}
|
||||
|
||||
void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, uint64_t PageCount, uint64_t Flags)
|
||||
{
|
||||
for (uint64_t i = 0; i < PageCount; i++)
|
||||
this->Map((void *)((uint64_t)VirtualAddress + (i * PAGE_SIZE)), (void *)((uint64_t)PhysicalAddress + (i * PAGE_SIZE)), Flags);
|
||||
}
|
||||
|
||||
void Virtual::Unmap(void *VirtualAddress)
|
||||
{
|
||||
SMARTLOCK(this->MemoryLock);
|
||||
if (!this->Table)
|
||||
{
|
||||
error("No page table");
|
||||
return;
|
||||
}
|
||||
|
||||
PageMapIndexer Index = PageMapIndexer((uint64_t)VirtualAddress);
|
||||
PageDirectoryEntry PDE = this->Table->Entries[Index.PDP_i];
|
||||
PDE.ClearFlags();
|
||||
|
||||
#if defined(__amd64__) || defined(__i386__)
|
||||
asmv("invlpg (%0)"
|
||||
:
|
||||
: "r"(VirtualAddress)
|
||||
: "memory");
|
||||
#elif defined(__aarch64__)
|
||||
asmv("dsb sy");
|
||||
asmv("tlbi vae1is, %0"
|
||||
:
|
||||
: "r"(VirtualAddress)
|
||||
: "memory");
|
||||
asmv("dsb sy");
|
||||
asmv("isb");
|
||||
#endif
|
||||
}
|
||||
|
||||
void Virtual::Unmap(void *VirtualAddress, uint64_t PageCount)
|
||||
{
|
||||
for (uint64_t i = 0; i < PageCount; i++)
|
||||
this->Unmap((void *)((uint64_t)VirtualAddress + (i * PAGE_SIZE)));
|
||||
}
|
||||
|
||||
Virtual::Virtual(PageTable *Table)
|
||||
{
|
||||
if (Table)
|
||||
this->Table = Table;
|
||||
else
|
||||
this->Table = (PageTable *)CPU::PageTable();
|
||||
}
|
||||
|
||||
Virtual::~Virtual() {}
|
||||
}
|
@ -1,22 +0,0 @@
|
||||
# Core components
|
||||
|
||||
This directory contains the core components of the project. These components are used by the kernel to provide the basic functionality of the operating system.
|
||||
|
||||
---
|
||||
|
||||
## 💾 Memory
|
||||
|
||||
Contains the memory management code.
|
||||
It is responsible for allocating and freeing memory.
|
||||
It also provides the `kmalloc`, `kcalloc`, `krealloc` and `kfree` functions that are used by the rest of the kernel.
|
||||
|
||||
## 📺 Video
|
||||
|
||||
Contains the video management code.
|
||||
It is responsible for printing text to the screen.
|
||||
|
||||
## 🖥 CPU
|
||||
|
||||
Contains the CPU management code.
|
||||
It is responsible for initializing the GDT and IDT.
|
||||
More code related is in the `arch` directory.
|
@ -1,58 +0,0 @@
|
||||
#include <types.h>
|
||||
#include <debug.h>
|
||||
|
||||
#ifndef STACK_CHK_GUARD_VALUE
|
||||
#if UINTPTR_MAX == UINT32_MAX
|
||||
#define STACK_CHK_GUARD_VALUE 0x25F6CC8D
|
||||
#else
|
||||
#define STACK_CHK_GUARD_VALUE 0xBADFE2EC255A8572
|
||||
#endif
|
||||
#endif
|
||||
|
||||
__attribute__((weak)) uintptr_t __stack_chk_guard = 0;
|
||||
|
||||
__attribute__((weak)) uintptr_t __stack_chk_guard_init(void)
|
||||
{
|
||||
return STACK_CHK_GUARD_VALUE;
|
||||
}
|
||||
|
||||
static void __attribute__((constructor, no_stack_protector)) __construct_stk_chk_guard()
|
||||
{
|
||||
if (__stack_chk_guard == 0)
|
||||
__stack_chk_guard = __stack_chk_guard_init();
|
||||
}
|
||||
|
||||
// https://opensource.apple.com/source/xnu/xnu-1504.7.4/libkern/stack_protector.c.auto.html
|
||||
// static void __guard_setup(void) __attribute__((constructor));
|
||||
|
||||
// static void __guard_setup(void)
|
||||
// {
|
||||
// read_random(__stack_chk_guard, sizeof(__stack_chk_guard));
|
||||
// }
|
||||
|
||||
__attribute__((weak, noreturn, no_stack_protector)) void __stack_chk_fail(void)
|
||||
{
|
||||
error("Stack smashing detected!", false);
|
||||
for (;;)
|
||||
{
|
||||
#if defined(__amd64__) || defined(__i386__)
|
||||
asmv("hlt");
|
||||
#elif defined(__aarch64__)
|
||||
asmv("wfe");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
// https://github.com/gcc-mirror/gcc/blob/master/libssp/ssp.c
|
||||
__attribute__((weak, noreturn, no_stack_protector)) void __chk_fail(void)
|
||||
{
|
||||
error("Buffer overflow detected!", false);
|
||||
for (;;)
|
||||
{
|
||||
#if defined(__amd64__) || defined(__i386__)
|
||||
asmv("hlt");
|
||||
#elif defined(__aarch64__)
|
||||
asmv("wfe");
|
||||
#endif
|
||||
}
|
||||
}
|
143
core/Symbols.cpp
143
core/Symbols.cpp
@ -1,143 +0,0 @@
|
||||
#include <symbols.hpp>
|
||||
#include <memory.hpp>
|
||||
#include <string.h>
|
||||
#include <debug.h>
|
||||
|
||||
// #pragma GCC diagnostic ignored "-Wignored-qualifiers"
|
||||
|
||||
typedef struct
|
||||
{
|
||||
unsigned char e_ident[16];
|
||||
uint16_t e_type;
|
||||
uint16_t e_machine;
|
||||
uint32_t e_version;
|
||||
uint64_t e_entry;
|
||||
uint64_t e_phoff;
|
||||
uint64_t e_shoff;
|
||||
uint32_t e_flags;
|
||||
uint16_t e_ehsize;
|
||||
uint16_t e_phentsize;
|
||||
uint16_t e_phnum;
|
||||
uint16_t e_shentsize;
|
||||
uint16_t e_shnum;
|
||||
uint16_t e_shstrndx;
|
||||
} Elf64_Ehdr;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
uint32_t sh_name;
|
||||
uint32_t sh_type;
|
||||
uint64_t sh_flags;
|
||||
uint64_t sh_addr;
|
||||
uint64_t sh_offset;
|
||||
uint64_t sh_size;
|
||||
uint32_t sh_link;
|
||||
uint32_t sh_info;
|
||||
uint64_t sh_addralign;
|
||||
uint64_t sh_entsize;
|
||||
} Elf64_Shdr;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
uint32_t st_name;
|
||||
unsigned char st_info;
|
||||
unsigned char st_other;
|
||||
uint16_t st_shndx;
|
||||
uint64_t st_value;
|
||||
uint64_t st_size;
|
||||
} Elf64_Sym;
|
||||
|
||||
#define SHT_SYMTAB 2
|
||||
#define SHT_STRTAB 3
|
||||
|
||||
namespace SymbolResolver
|
||||
{
|
||||
Symbols::SymbolTable *SymTable = nullptr;
|
||||
uint64_t TotalEntries = 0;
|
||||
|
||||
Symbols::Symbols(uint64_t Address)
|
||||
{
|
||||
debug("Solving symbols for address: %#lx", Address);
|
||||
Elf64_Ehdr *Header = (Elf64_Ehdr *)Address;
|
||||
if (Header->e_ident[0] != 0x7F &&
|
||||
Header->e_ident[1] != 'E' &&
|
||||
Header->e_ident[2] != 'L' &&
|
||||
Header->e_ident[3] != 'F')
|
||||
{
|
||||
error("Invalid ELF header");
|
||||
return;
|
||||
}
|
||||
Elf64_Shdr *ElfSections = (Elf64_Shdr *)(Address + Header->e_shoff);
|
||||
Elf64_Sym *ElfSymbols = nullptr;
|
||||
char *strtab = nullptr;
|
||||
|
||||
for (uint64_t i = 0; i < Header->e_shnum; i++)
|
||||
switch (ElfSections[i].sh_type)
|
||||
{
|
||||
case SHT_SYMTAB:
|
||||
ElfSymbols = (Elf64_Sym *)(Address + ElfSections[i].sh_offset);
|
||||
TotalEntries = ElfSections[i].sh_size / sizeof(Elf64_Sym);
|
||||
debug("Symbol table found, %d entries", TotalEntries);
|
||||
break;
|
||||
case SHT_STRTAB:
|
||||
if (Header->e_shstrndx == i)
|
||||
{
|
||||
debug("String table found, %d entries", ElfSections[i].sh_size);
|
||||
}
|
||||
else
|
||||
{
|
||||
strtab = (char *)Address + ElfSections[i].sh_offset;
|
||||
debug("String table found, %d entries", ElfSections[i].sh_size);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (ElfSymbols != nullptr && strtab != nullptr)
|
||||
{
|
||||
size_t Index, MinimumIndex;
|
||||
for (size_t i = 0; i < TotalEntries - 1; i++)
|
||||
{
|
||||
MinimumIndex = i;
|
||||
for (Index = i + 1; Index < TotalEntries; Index++)
|
||||
if (ElfSymbols[Index].st_value < ElfSymbols[MinimumIndex].st_value)
|
||||
MinimumIndex = Index;
|
||||
Elf64_Sym tmp = ElfSymbols[MinimumIndex];
|
||||
ElfSymbols[MinimumIndex] = ElfSymbols[i];
|
||||
ElfSymbols[i] = tmp;
|
||||
}
|
||||
|
||||
while (ElfSymbols[0].st_value == 0)
|
||||
{
|
||||
ElfSymbols++;
|
||||
TotalEntries--;
|
||||
}
|
||||
|
||||
trace("Symbol table loaded, %d entries (%ldKB)", TotalEntries, TO_KB(TotalEntries * sizeof(SymbolTable)));
|
||||
// TODO: broken?
|
||||
// SymTable = new SymbolTable[TotalEntries];
|
||||
SymTable = (SymbolTable *)KernelAllocator.RequestPages((TotalEntries * sizeof(SymbolTable)) / PAGE_SIZE + 1);
|
||||
// do_mem_test();
|
||||
|
||||
for (size_t i = 0, g = TotalEntries; i < g; i++)
|
||||
{
|
||||
SymTable[i].Address = ElfSymbols[i].st_value;
|
||||
SymTable[i].FunctionName = &strtab[ElfSymbols[i].st_name];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Symbols::~Symbols()
|
||||
{
|
||||
// delete SymTable;
|
||||
KernelAllocator.FreePages(SymTable, (TotalEntries * sizeof(SymbolTable)) / PAGE_SIZE + 1);
|
||||
}
|
||||
|
||||
const char *Symbols::GetSymbolFromAddress(uint64_t Address)
|
||||
{
|
||||
Symbols::SymbolTable Result{0, (char *)"<unknown>"};
|
||||
for (size_t i = 0; i < TotalEntries; i++)
|
||||
if (SymTable[i].Address <= Address && SymTable[i].Address > Result.Address)
|
||||
Result = SymTable[i];
|
||||
return Result.FunctionName;
|
||||
}
|
||||
}
|
@ -1,121 +0,0 @@
|
||||
#include <display.hpp>
|
||||
#include <lock.hpp>
|
||||
#include <debug.h>
|
||||
|
||||
extern uint64_t _binary_files_ter_powerline_v12n_psf_start;
|
||||
extern uint64_t _binary_files_ter_powerline_v12n_psf_end;
|
||||
extern uint64_t _binary_files_ter_powerline_v12n_psf_size;
|
||||
|
||||
NEWLOCK(PrintLock);
|
||||
|
||||
namespace Video
|
||||
{
|
||||
char Display::Print(char Char, int Index)
|
||||
{
|
||||
SMARTLOCK(PrintLock);
|
||||
switch (Char)
|
||||
{
|
||||
case '\b':
|
||||
{
|
||||
if (this->Buffers[Index]->CursorX > 0)
|
||||
this->Buffers[Index]->CursorX -= this->GetCurrentFont()->GetInfo().Width;
|
||||
return Char;
|
||||
}
|
||||
case '\t':
|
||||
{
|
||||
this->Buffers[Index]->CursorX = (this->Buffers[Index]->CursorX + 8) & ~(8 - 1);
|
||||
return Char;
|
||||
}
|
||||
case '\r':
|
||||
{
|
||||
this->Buffers[Index]->CursorX = 0;
|
||||
return Char;
|
||||
}
|
||||
case '\n':
|
||||
{
|
||||
this->Buffers[Index]->CursorX = 0;
|
||||
this->Buffers[Index]->CursorY += this->GetCurrentFont()->GetInfo().Height;
|
||||
return Char;
|
||||
}
|
||||
}
|
||||
|
||||
if (this->Buffers[Index]->CursorY + this->GetCurrentFont()->GetInfo().Height >= this->Buffers[Index]->Height)
|
||||
{
|
||||
this->Buffers[Index]->CursorY -= this->GetCurrentFont()->GetInfo().Height;
|
||||
this->Scroll(Index, 1);
|
||||
}
|
||||
|
||||
if (this->Buffers[Index]->CursorX + this->GetCurrentFont()->GetInfo().Width >= this->Buffers[Index]->Width)
|
||||
{
|
||||
this->Buffers[Index]->CursorX = 0;
|
||||
this->Buffers[Index]->CursorY += this->GetCurrentFont()->GetInfo().Height;
|
||||
}
|
||||
|
||||
switch (this->CurrentFont->GetInfo().Type)
|
||||
{
|
||||
case FontType::PCScreenFont1:
|
||||
{
|
||||
uint32_t *PixelPtr = (uint32_t *)this->Buffers[Index]->Buffer;
|
||||
char *FontPtr = (char *)this->CurrentFont->GetInfo().PSF1Font->GlyphBuffer + (Char * this->CurrentFont->GetInfo().PSF1Font->Header->charsize);
|
||||
for (uint64_t Y = this->Buffers[Index]->CursorY; Y < this->Buffers[Index]->CursorY + 16; Y++)
|
||||
{
|
||||
for (uint64_t X = this->Buffers[Index]->CursorX; X < this->Buffers[Index]->CursorX + 8; X++)
|
||||
if ((*FontPtr & (0b10000000 >> (X - this->Buffers[Index]->CursorX))) > 0)
|
||||
*(unsigned int *)(PixelPtr + X + (Y * this->Buffers[Index]->Width)) = this->Buffers[Index]->Color;
|
||||
FontPtr++;
|
||||
}
|
||||
this->Buffers[Index]->CursorX += 8;
|
||||
|
||||
break;
|
||||
}
|
||||
case FontType::PCScreenFont2:
|
||||
{
|
||||
// if (this->CurrentFont->PSF2Font->GlyphBuffer == (uint16_t *)0x01) // HAS UNICODE TABLE
|
||||
// Char = this->CurrentFont->PSF2Font->GlyphBuffer[Char];
|
||||
int BytesPerLine = (this->CurrentFont->GetInfo().PSF2Font->Header->width + 7) / 8;
|
||||
char *FontPtr = (char *)this->CurrentFont->GetInfo().StartAddress +
|
||||
this->CurrentFont->GetInfo().PSF2Font->Header->headersize +
|
||||
(Char > 0 && (unsigned char)Char < this->CurrentFont->GetInfo().PSF2Font->Header->length ? Char : 0) *
|
||||
this->CurrentFont->GetInfo().PSF2Font->Header->charsize;
|
||||
|
||||
uint32_t fonthdrWidth = this->CurrentFont->GetInfo().PSF2Font->Header->width;
|
||||
uint32_t fonthdrHeight = this->CurrentFont->GetInfo().PSF2Font->Header->height;
|
||||
|
||||
for (uint64_t Y = this->Buffers[Index]->CursorY; Y < this->Buffers[Index]->CursorY + fonthdrHeight; Y++)
|
||||
{
|
||||
for (uint64_t X = this->Buffers[Index]->CursorX; X < this->Buffers[Index]->CursorX + fonthdrWidth; X++)
|
||||
if ((*FontPtr & (0b10000000 >> (X - this->Buffers[Index]->CursorX))) > 0)
|
||||
*(uint32_t *)((uint64_t)this->Buffers[Index]->Buffer +
|
||||
(Y * this->Buffers[Index]->Width + X) * (this->framebuffer.BitsPerPixel / 8)) = 0xFFFFFF;
|
||||
|
||||
FontPtr += BytesPerLine;
|
||||
}
|
||||
this->Buffers[Index]->CursorX += fonthdrWidth;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
warn("Unsupported font type");
|
||||
break;
|
||||
}
|
||||
return Char;
|
||||
}
|
||||
|
||||
Display::Display(BootInfo::FramebufferInfo Info, bool LoadDefaultFont)
|
||||
{
|
||||
this->framebuffer = Info;
|
||||
if (LoadDefaultFont)
|
||||
{
|
||||
this->CurrentFont = new Font(&_binary_files_ter_powerline_v12n_psf_start, &_binary_files_ter_powerline_v12n_psf_end, FontType::PCScreenFont2);
|
||||
FontInfo Info = this->CurrentFont->GetInfo();
|
||||
debug("Font loaded: %dx%d %s",
|
||||
Info.Width, Info.Height, Info.Type == FontType::PCScreenFont1 ? "PSF1" : "PSF2");
|
||||
}
|
||||
this->CreateBuffer(Info.Width, Info.Height, 0);
|
||||
}
|
||||
|
||||
Display::~Display()
|
||||
{
|
||||
for (int i = 0; i < 16; i++)
|
||||
DeleteBuffer(i);
|
||||
}
|
||||
}
|
@ -1,54 +0,0 @@
|
||||
#include <display.hpp>
|
||||
#include <debug.h>
|
||||
#include <cstring>
|
||||
|
||||
namespace Video
|
||||
{
|
||||
Font::Font(uint64_t *Start, uint64_t *End, FontType Type)
|
||||
{
|
||||
trace("Initializing font with start %#llx and end %#llx Type: %d", Start, End, Type);
|
||||
this->Info.StartAddress = Start;
|
||||
this->Info.EndAddress = End;
|
||||
this->Info.Type = Type;
|
||||
if (Type == FontType::PCScreenFont2)
|
||||
{
|
||||
this->Info.PSF2Font = new PSF2_FONT;
|
||||
|
||||
uint64_t FontDataLength = End - Start;
|
||||
PSF2_HEADER *font2 = (PSF2_HEADER *)KernelAllocator.RequestPages(FontDataLength / PAGE_SIZE + 1);
|
||||
for (uint64_t i = 0; i < FontDataLength / PAGE_SIZE + 1; i++)
|
||||
Memory::Virtual().Map((void *)(font2 + (i * PAGE_SIZE)), (void *)(font2 + (i * PAGE_SIZE)), Memory::PTFlag::RW);
|
||||
memcpy((void *)font2, Start, FontDataLength);
|
||||
|
||||
this->Info.Width = font2->width;
|
||||
this->Info.Height = font2->height;
|
||||
if (font2->magic[0] != PSF2_MAGIC0 || font2->magic[1] != PSF2_MAGIC1 || font2->magic[2] != PSF2_MAGIC2 || font2->magic[3] != PSF2_MAGIC3)
|
||||
error("Font2 magic mismatch.");
|
||||
|
||||
this->Info.PSF2Font->Header = font2;
|
||||
this->Info.PSF2Font->GlyphBuffer = reinterpret_cast<void *>(reinterpret_cast<uint64_t>(Start) + sizeof(PSF2_HEADER));
|
||||
}
|
||||
else if (Type == FontType::PCScreenFont1)
|
||||
{
|
||||
this->Info.PSF1Font = new PSF1_FONT;
|
||||
PSF1_HEADER *font1 = (PSF1_HEADER *)Start;
|
||||
if (font1->magic[0] != PSF1_MAGIC0 || font1->magic[1] != PSF1_MAGIC1)
|
||||
error("Font1 magic mismatch.");
|
||||
uint32_t glyphBufferSize = font1->charsize * 256;
|
||||
if (font1->mode == 1) // 512 glyph mode
|
||||
glyphBufferSize = font1->charsize * 512;
|
||||
void *glyphBuffer = reinterpret_cast<void *>(reinterpret_cast<uint64_t>(Start) + sizeof(PSF1_HEADER));
|
||||
this->Info.PSF1Font->Header = font1;
|
||||
this->Info.PSF1Font->GlyphBuffer = glyphBuffer;
|
||||
UNUSED(glyphBufferSize); // TODO: Use this in the future?
|
||||
|
||||
// TODO: Get font size.
|
||||
this->Info.Width = 16;
|
||||
this->Info.Height = 8;
|
||||
}
|
||||
}
|
||||
|
||||
Font::~Font()
|
||||
{
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user