mirror of
https://github.com/EnderIce2/Fennix.git
synced 2025-07-16 09:41:42 +00:00
QoL and bug fixes
This commit is contained in:
@@ -20,8 +20,8 @@
|
||||
Xalloc_def;
|
||||
|
||||
#define XALLOC_CONCAT(x, y) x##y
|
||||
#define XStoP(x) (x / Xalloc_PAGE_SIZE + 1)
|
||||
#define XPtoS(x) (x * Xalloc_PAGE_SIZE)
|
||||
#define XStoP(d) (((d) + PAGE_SIZE - 1) / PAGE_SIZE)
|
||||
#define XPtoS(d) ((d)*PAGE_SIZE)
|
||||
#define Xalloc_BlockChecksum 0xA110C
|
||||
|
||||
extern "C" void *Xalloc_REQUEST_PAGES(Xsize_t Pages);
|
||||
@@ -70,14 +70,14 @@ namespace Xalloc
|
||||
|
||||
Block(Xsize_t Size)
|
||||
{
|
||||
this->Address = Xalloc_REQUEST_PAGES(XStoP(Size));
|
||||
this->Address = Xalloc_REQUEST_PAGES(XStoP(Size + 1));
|
||||
this->Size = Size;
|
||||
Xmemset(this->Address, 0, Size);
|
||||
}
|
||||
|
||||
~Block()
|
||||
{
|
||||
Xalloc_FREE_PAGES(this->Address, XStoP(this->Size));
|
||||
Xalloc_FREE_PAGES(this->Address, XStoP(this->Size + 1));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -120,7 +120,7 @@ namespace Xalloc
|
||||
{
|
||||
if (this->SMAPUsed)
|
||||
{
|
||||
#if defined(a64) || defined(a32)
|
||||
#if defined(a86)
|
||||
asm volatile("stac" ::
|
||||
: "cc");
|
||||
#endif
|
||||
@@ -131,7 +131,7 @@ namespace Xalloc
|
||||
{
|
||||
if (this->SMAPUsed)
|
||||
{
|
||||
#if defined(a64) || defined(a32)
|
||||
#if defined(a86)
|
||||
asm volatile("clac" ::
|
||||
: "cc");
|
||||
#endif
|
||||
@@ -209,7 +209,8 @@ namespace Xalloc
|
||||
{
|
||||
if (!CurrentBlock->Check())
|
||||
{
|
||||
Xalloc_err("Block %#lx checksum failed!", (Xuint64_t)CurrentBlock);
|
||||
Xalloc_err("Block %#lx has an invalid checksum! (%#x != %#x)",
|
||||
(Xuint64_t)CurrentBlock, CurrentBlock->Checksum, Xalloc_BlockChecksum);
|
||||
while (Xalloc_StopOnFail)
|
||||
;
|
||||
}
|
||||
|
@@ -48,7 +48,6 @@ using namespace Memory;
|
||||
|
||||
Physical KernelAllocator;
|
||||
PageTable4 *KernelPageTable = nullptr;
|
||||
PageTable4 *UserspaceKernelOnlyPageTable = nullptr;
|
||||
bool Page1GBSupport = false;
|
||||
bool PSESupport = false;
|
||||
|
||||
@@ -102,9 +101,7 @@ NIF void MapFromZero(PageTable4 *PT, BootInfo *Info)
|
||||
else
|
||||
va.Map((void *)0, (void *)0, MemSize, PTFlag::RW);
|
||||
|
||||
void *NullAddress = KernelAllocator.RequestPage();
|
||||
memset(NullAddress, 0, PAGE_SIZE); // TODO: If the CPU instruction pointer hits this page, there should be function to handle it. (memcpy assembly code?)
|
||||
va.Remap((void *)0, (void *)NullAddress, PTFlag::RW | PTFlag::US);
|
||||
va.Unmap((void *)0);
|
||||
}
|
||||
|
||||
NIF void MapFramebuffer(PageTable4 *PT, BootInfo *Info)
|
||||
@@ -171,7 +168,7 @@ NIF void MapKernel(PageTable4 *PT, BootInfo *Info)
|
||||
for (k = KernelStart; k < KernelTextEnd; k += PAGE_SIZE)
|
||||
{
|
||||
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
|
||||
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
|
||||
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
|
||||
BaseKernelMapAddress += PAGE_SIZE;
|
||||
}
|
||||
|
||||
@@ -179,7 +176,7 @@ NIF void MapKernel(PageTable4 *PT, BootInfo *Info)
|
||||
for (k = KernelTextEnd; k < KernelDataEnd; k += PAGE_SIZE)
|
||||
{
|
||||
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
|
||||
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
|
||||
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
|
||||
BaseKernelMapAddress += PAGE_SIZE;
|
||||
}
|
||||
|
||||
@@ -187,7 +184,7 @@ NIF void MapKernel(PageTable4 *PT, BootInfo *Info)
|
||||
for (k = KernelDataEnd; k < KernelRoDataEnd; k += PAGE_SIZE)
|
||||
{
|
||||
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::G);
|
||||
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
|
||||
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
|
||||
BaseKernelMapAddress += PAGE_SIZE;
|
||||
}
|
||||
|
||||
@@ -195,7 +192,7 @@ NIF void MapKernel(PageTable4 *PT, BootInfo *Info)
|
||||
for (k = KernelRoDataEnd; k < KernelEnd; k += PAGE_SIZE)
|
||||
{
|
||||
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
|
||||
KernelAllocator.LockPage((void *)BaseKernelMapAddress);
|
||||
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
|
||||
BaseKernelMapAddress += PAGE_SIZE;
|
||||
}
|
||||
|
||||
@@ -203,7 +200,7 @@ NIF void MapKernel(PageTable4 *PT, BootInfo *Info)
|
||||
for (k = KernelFileStart; k < KernelFileEnd; k += PAGE_SIZE)
|
||||
{
|
||||
va.Map((void *)k, (void *)k, PTFlag::G);
|
||||
KernelAllocator.LockPage((void *)k);
|
||||
KernelAllocator.ReservePage((void *)k);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
@@ -243,8 +240,8 @@ NIF void InitializeMemoryManagement(BootInfo *Info)
|
||||
#ifdef DEBUG
|
||||
for (uint64_t i = 0; i < Info->Memory.Entries; i++)
|
||||
{
|
||||
uintptr_t Base = reinterpret_cast<uintptr_t>(Info->Memory.Entry[i].BaseAddress);
|
||||
uintptr_t Length = Info->Memory.Entry[i].Length;
|
||||
uintptr_t Base = r_cst(uintptr_t, Info->Memory.Entry[i].BaseAddress);
|
||||
size_t Length = Info->Memory.Entry[i].Length;
|
||||
uintptr_t End = Base + Length;
|
||||
const char *Type = "Unknown";
|
||||
|
||||
@@ -293,12 +290,21 @@ NIF void InitializeMemoryManagement(BootInfo *Info)
|
||||
TO_MB(KernelAllocator.GetTotalMemory()),
|
||||
TO_MB(KernelAllocator.GetReservedMemory()));
|
||||
|
||||
trace("Initializing Virtual Memory Manager");
|
||||
KernelPageTable = (PageTable4 *)KernelAllocator.RequestPages(TO_PAGES(PAGE_SIZE));
|
||||
memset(KernelPageTable, 0, PAGE_SIZE);
|
||||
/* -- Debugging --
|
||||
size_t bmap_size = KernelAllocator.GetPageBitmap().Size;
|
||||
for (size_t i = 0; i < bmap_size; i++)
|
||||
{
|
||||
bool idx = KernelAllocator.GetPageBitmap().Get(i);
|
||||
if (idx == true)
|
||||
debug("Page %04d: %#lx", i, i * PAGE_SIZE);
|
||||
}
|
||||
|
||||
UserspaceKernelOnlyPageTable = (PageTable4 *)KernelAllocator.RequestPages(TO_PAGES(PAGE_SIZE));
|
||||
memset(UserspaceKernelOnlyPageTable, 0, PAGE_SIZE);
|
||||
inf_loop debug("Alloc.: %#lx", KernelAllocator.RequestPage());
|
||||
*/
|
||||
|
||||
trace("Initializing Virtual Memory Manager");
|
||||
KernelPageTable = (PageTable4 *)KernelAllocator.RequestPages(TO_PAGES(PAGE_SIZE + 1));
|
||||
memset(KernelPageTable, 0, PAGE_SIZE);
|
||||
|
||||
if (strcmp(CPU::Vendor(), x86_CPUID_VENDOR_AMD) == 0)
|
||||
{
|
||||
@@ -333,16 +339,11 @@ NIF void InitializeMemoryManagement(BootInfo *Info)
|
||||
MapFramebuffer(KernelPageTable, Info);
|
||||
MapKernel(KernelPageTable, Info);
|
||||
|
||||
memcpy(UserspaceKernelOnlyPageTable, KernelPageTable, sizeof(PageTable4));
|
||||
|
||||
trace("Applying new page table from address %p", KernelPageTable);
|
||||
trace("Applying new page table from address %#lx", KernelPageTable);
|
||||
#ifdef DEBUG
|
||||
debug("Kernel:");
|
||||
tracepagetable(KernelPageTable);
|
||||
debug("Userspace:");
|
||||
tracepagetable(UserspaceKernelOnlyPageTable);
|
||||
#endif
|
||||
#if defined(a64) || defined(a32)
|
||||
#if defined(a86)
|
||||
asmv("mov %0, %%cr3" ::"r"(KernelPageTable));
|
||||
#elif defined(aa64)
|
||||
asmv("msr ttbr0_el1, %0" ::"r"(KernelPageTable));
|
||||
@@ -372,7 +373,7 @@ void *malloc(size_t Size)
|
||||
{
|
||||
case MemoryAllocatorType::Pages:
|
||||
{
|
||||
ret = KernelAllocator.RequestPages(TO_PAGES(Size));
|
||||
ret = KernelAllocator.RequestPages(TO_PAGES(Size + 1));
|
||||
memset(ret, 0, Size);
|
||||
break;
|
||||
}
|
||||
@@ -425,7 +426,7 @@ void *calloc(size_t n, size_t Size)
|
||||
{
|
||||
case MemoryAllocatorType::Pages:
|
||||
{
|
||||
ret = KernelAllocator.RequestPages(TO_PAGES(n * Size));
|
||||
ret = KernelAllocator.RequestPages(TO_PAGES(n * Size + 1));
|
||||
memset(ret, 0, n * Size);
|
||||
break;
|
||||
}
|
||||
@@ -478,7 +479,7 @@ void *realloc(void *Address, size_t Size)
|
||||
{
|
||||
case unlikely(MemoryAllocatorType::Pages):
|
||||
{
|
||||
ret = KernelAllocator.RequestPages(TO_PAGES(Size)); // WARNING: Potential memory leak
|
||||
ret = KernelAllocator.RequestPages(TO_PAGES(Size + 1)); // WARNING: Potential memory leak
|
||||
memset(ret, 0, Size);
|
||||
break;
|
||||
}
|
||||
|
@@ -295,7 +295,7 @@ namespace Memory
|
||||
if (unlikely(Address == nullptr))
|
||||
warn("Trying to reserve null address.");
|
||||
|
||||
uintptr_t Index = (uintptr_t)Address / PAGE_SIZE;
|
||||
uintptr_t Index = (Address == NULL) ? 0 : (uintptr_t)Address / PAGE_SIZE;
|
||||
|
||||
if (unlikely(PageBitmap[Index] == true))
|
||||
return;
|
||||
@@ -313,7 +313,18 @@ namespace Memory
|
||||
warn("Trying to reserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
|
||||
|
||||
for (size_t t = 0; t < PageCount; t++)
|
||||
this->ReservePage((void *)((uintptr_t)Address + (t * PAGE_SIZE)));
|
||||
{
|
||||
uintptr_t Index = ((uintptr_t)Address + (t * PAGE_SIZE)) / PAGE_SIZE;
|
||||
|
||||
if (unlikely(PageBitmap[Index] == true))
|
||||
return;
|
||||
|
||||
if (PageBitmap.Set(Index, true))
|
||||
{
|
||||
FreeMemory -= PAGE_SIZE;
|
||||
ReservedMemory += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Physical::UnreservePage(void *Address)
|
||||
@@ -321,7 +332,7 @@ namespace Memory
|
||||
if (unlikely(Address == nullptr))
|
||||
warn("Trying to unreserve null address.");
|
||||
|
||||
uintptr_t Index = (uintptr_t)Address / PAGE_SIZE;
|
||||
uintptr_t Index = (Address == NULL) ? 0 : (uintptr_t)Address / PAGE_SIZE;
|
||||
|
||||
if (unlikely(PageBitmap[Index] == false))
|
||||
return;
|
||||
@@ -341,7 +352,20 @@ namespace Memory
|
||||
warn("Trying to unreserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
|
||||
|
||||
for (size_t t = 0; t < PageCount; t++)
|
||||
this->UnreservePage((void *)((uintptr_t)Address + (t * PAGE_SIZE)));
|
||||
{
|
||||
uintptr_t Index = ((uintptr_t)Address + (t * PAGE_SIZE)) / PAGE_SIZE;
|
||||
|
||||
if (unlikely(PageBitmap[Index] == false))
|
||||
return;
|
||||
|
||||
if (PageBitmap.Set(Index, false))
|
||||
{
|
||||
FreeMemory += PAGE_SIZE;
|
||||
ReservedMemory -= PAGE_SIZE;
|
||||
if (PageBitmapIndex > Index)
|
||||
PageBitmapIndex = Index;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Physical::Init(BootInfo *Info)
|
||||
@@ -349,6 +373,7 @@ namespace Memory
|
||||
SmartLock(this->MemoryLock);
|
||||
|
||||
uint64_t MemorySize = Info->Memory.Size;
|
||||
debug("Memory size: %lld bytes (%ld pages)", MemorySize, TO_PAGES(MemorySize));
|
||||
TotalMemory = MemorySize;
|
||||
FreeMemory = MemorySize;
|
||||
|
||||
@@ -381,8 +406,9 @@ namespace Memory
|
||||
CPU::Stop();
|
||||
}
|
||||
|
||||
/* TODO: Read swap config and make the configure the bitmap size correctly */
|
||||
size_t BitmapSize = (MemorySize / PAGE_SIZE) / 8 + 1;
|
||||
trace("Initializing Bitmap at %llp-%llp (%lld Bytes)",
|
||||
debug("Initializing Bitmap at %llp-%llp (%lld Bytes)",
|
||||
LargestFreeMemorySegment,
|
||||
(void *)((uintptr_t)LargestFreeMemorySegment + BitmapSize),
|
||||
BitmapSize);
|
||||
@@ -392,16 +418,27 @@ namespace Memory
|
||||
for (size_t i = 0; i < BitmapSize; i++)
|
||||
*(uint8_t *)(PageBitmap.Buffer + i) = 0;
|
||||
|
||||
trace("Reserving pages...");
|
||||
debug("Reserving pages...");
|
||||
for (uint64_t i = 0; i < Info->Memory.Entries; i++)
|
||||
{
|
||||
if (Info->Memory.Entry[i].Type != Usable)
|
||||
this->ReservePages(Info->Memory.Entry[i].BaseAddress, TO_PAGES(Info->Memory.Entry[i].Length));
|
||||
}
|
||||
|
||||
trace("Locking bitmap pages...");
|
||||
this->ReservePages(0, 0x100);
|
||||
this->LockPages(PageBitmap.Buffer, TO_PAGES(PageBitmap.Size));
|
||||
/* Making sure that the lower memory area is properly reserved. */
|
||||
this->ReservePages(0, 0x200);
|
||||
for (uint64_t i = 0; i < Info->Memory.Entries; i++)
|
||||
{
|
||||
if (Info->Memory.Entry[i].Type == Usable)
|
||||
this->UnreservePages(Info->Memory.Entry[i].BaseAddress, TO_PAGES(Info->Memory.Entry[i].Length));
|
||||
}
|
||||
|
||||
debug("Reserving pages for SMP...");
|
||||
this->ReservePage((void *)0x0); /* Trampoline stack, gdt, idt, etc... */
|
||||
this->ReservePage((void *)0x2000); /* TRAMPOLINE_START */
|
||||
|
||||
debug("Reserving bitmap pages...");
|
||||
this->ReservePages(PageBitmap.Buffer, TO_PAGES(PageBitmap.Size));
|
||||
}
|
||||
|
||||
Physical::Physical() {}
|
||||
|
@@ -28,7 +28,7 @@ namespace Memory
|
||||
|
||||
if (this->UserMode)
|
||||
{
|
||||
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE));
|
||||
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE + 1));
|
||||
memset(AllocatedStack, 0, USER_STACK_SIZE);
|
||||
debug("AllocatedStack: %p", AllocatedStack);
|
||||
|
||||
@@ -52,7 +52,7 @@ namespace Memory
|
||||
}
|
||||
else
|
||||
{
|
||||
this->StackBottom = KernelAllocator.RequestPages(TO_PAGES(STACK_SIZE));
|
||||
this->StackBottom = KernelAllocator.RequestPages(TO_PAGES(STACK_SIZE + 1));
|
||||
memset(this->StackBottom, 0, STACK_SIZE);
|
||||
debug("StackBottom: %p", this->StackBottom);
|
||||
|
||||
@@ -70,7 +70,7 @@ namespace Memory
|
||||
StackGuard::~StackGuard()
|
||||
{
|
||||
fixme("Temporarily disabled stack guard deallocation");
|
||||
// KernelAllocator.FreePages(this->StackBottom, TO_PAGES(this->Size));
|
||||
// KernelAllocator.FreePages(this->StackBottom, TO_PAGES(this->Size + 1));
|
||||
// debug("Freed stack at %p", this->StackBottom);
|
||||
}
|
||||
|
||||
@@ -85,7 +85,7 @@ namespace Memory
|
||||
}
|
||||
else
|
||||
{
|
||||
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE));
|
||||
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE + 1));
|
||||
debug("AllocatedStack: %p", AllocatedStack);
|
||||
memset(AllocatedStack, 0, USER_STACK_SIZE);
|
||||
for (uintptr_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
|
||||
|
@@ -131,8 +131,8 @@ namespace Memory
|
||||
PageDirectoryPointerTableEntryPtr *PDPTEPtr = nullptr;
|
||||
if (!PML4->Present)
|
||||
{
|
||||
PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageDirectoryPointerTableEntryPtr)));
|
||||
memset(PDPTEPtr, 0, PAGE_SIZE);
|
||||
PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageDirectoryPointerTableEntryPtr) + 1));
|
||||
memset(PDPTEPtr, 0, sizeof(PageDirectoryPointerTableEntryPtr));
|
||||
PML4->Present = true;
|
||||
PML4->SetAddress((uintptr_t)PDPTEPtr >> 12);
|
||||
}
|
||||
@@ -153,8 +153,8 @@ namespace Memory
|
||||
PageDirectoryEntryPtr *PDEPtr = nullptr;
|
||||
if (!PDPTE->Present)
|
||||
{
|
||||
PDEPtr = (PageDirectoryEntryPtr *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageDirectoryEntryPtr)));
|
||||
memset(PDEPtr, 0, PAGE_SIZE);
|
||||
PDEPtr = (PageDirectoryEntryPtr *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageDirectoryEntryPtr) + 1));
|
||||
memset(PDEPtr, 0, sizeof(PageDirectoryEntryPtr));
|
||||
PDPTE->Present = true;
|
||||
PDPTE->SetAddress((uintptr_t)PDEPtr >> 12);
|
||||
}
|
||||
@@ -175,8 +175,8 @@ namespace Memory
|
||||
PageTableEntryPtr *PTEPtr = nullptr;
|
||||
if (!PDE->Present)
|
||||
{
|
||||
PTEPtr = (PageTableEntryPtr *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageTableEntryPtr)));
|
||||
memset(PTEPtr, 0, PAGE_SIZE);
|
||||
PTEPtr = (PageTableEntryPtr *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageTableEntryPtr) + 1));
|
||||
memset(PTEPtr, 0, sizeof(PageTableEntryPtr));
|
||||
PDE->Present = true;
|
||||
PDE->SetAddress((uintptr_t)PTEPtr >> 12);
|
||||
}
|
||||
@@ -217,7 +217,7 @@ namespace Memory
|
||||
(byte & 0x01 ? '1' : '0')
|
||||
|
||||
if (!this->Check(VirtualAddress, (PTFlag)Flags, Type)) // quick workaround just to see where it fails
|
||||
warn("Failed to map %#lx - %#lx with flags: " BYTE_TO_BINARY_PATTERN, VirtualAddress, PhysicalAddress, BYTE_TO_BINARY(Flags));
|
||||
warn("Failed to map v:%#lx p:%#lx with flags: " BYTE_TO_BINARY_PATTERN, VirtualAddress, PhysicalAddress, BYTE_TO_BINARY(Flags));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user