fork() stub and QoL improvements

This commit is contained in:
Alex
2023-05-03 06:37:39 +03:00
parent 6e6d22403c
commit 61aea6aa8d
25 changed files with 426 additions and 185 deletions

View File

@ -47,7 +47,7 @@ NewLock(OperatorAllocatorLock);
using namespace Memory;
Physical KernelAllocator;
PageTable4 *KernelPageTable = nullptr;
PageTable *KernelPageTable = nullptr;
bool Page1GBSupport = false;
bool PSESupport = false;
@ -55,7 +55,7 @@ static MemoryAllocatorType AllocatorType = MemoryAllocatorType::Pages;
Xalloc::V1 *XallocV1Allocator = nullptr;
#ifdef DEBUG
NIF void tracepagetable(PageTable4 *pt)
NIF void tracepagetable(PageTable *pt)
{
for (int i = 0; i < 512; i++)
{
@ -74,7 +74,7 @@ NIF void tracepagetable(PageTable4 *pt)
}
#endif
NIF void MapFromZero(PageTable4 *PT, BootInfo *Info)
NIF void MapFromZero(PageTable *PT, BootInfo *Info)
{
debug("Mapping from 0x0 to %#llx", Info->Memory.Size);
Virtual va = Virtual(PT);
@ -104,7 +104,7 @@ NIF void MapFromZero(PageTable4 *PT, BootInfo *Info)
va.Unmap((void *)0);
}
NIF void MapFramebuffer(PageTable4 *PT, BootInfo *Info)
NIF void MapFramebuffer(PageTable *PT, BootInfo *Info)
{
debug("Mapping Framebuffer");
Virtual va = Virtual(PT);
@ -143,7 +143,7 @@ NIF void MapFramebuffer(PageTable4 *PT, BootInfo *Info)
}
}
NIF void MapKernel(PageTable4 *PT, BootInfo *Info)
NIF void MapKernel(PageTable *PT, BootInfo *Info)
{
debug("Mapping Kernel");
uintptr_t KernelStart = (uintptr_t)&_kernel_start;
@ -303,7 +303,7 @@ NIF void InitializeMemoryManagement(BootInfo *Info)
*/
trace("Initializing Virtual Memory Manager");
KernelPageTable = (PageTable4 *)KernelAllocator.RequestPages(TO_PAGES(PAGE_SIZE + 1));
KernelPageTable = (PageTable *)KernelAllocator.RequestPages(TO_PAGES(PAGE_SIZE + 1));
memset(KernelPageTable, 0, PAGE_SIZE);
if (strcmp(CPU::Vendor(), x86_CPUID_VENDOR_AMD) == 0)

View File

@ -125,7 +125,7 @@ namespace Memory
if (User)
Flags |= Memory::PTFlag::US;
Memory::Virtual(this->PageTable).Remap((void *)((uintptr_t)Address + (i * PAGE_SIZE)), (void *)((uint64_t)Address + (i * PAGE_SIZE)), Flags);
Memory::Virtual(this->Table).Remap((void *)((uintptr_t)Address + (i * PAGE_SIZE)), (void *)((uint64_t)Address + (i * PAGE_SIZE)), Flags);
}
if (this->Directory)
@ -173,8 +173,8 @@ namespace Memory
for (size_t i = 0; i < Count; i++)
{
Memory::Virtual(this->PageTable).Remap((void *)((uintptr_t)Address + (i * PAGE_SIZE)), (void *)((uint64_t)Address + (i * PAGE_SIZE)), Memory::PTFlag::RW);
// Memory::Virtual(this->PageTable).Unmap((void *)((uintptr_t)Address + (i * PAGE_SIZE)));
Memory::Virtual(this->Table).Remap((void *)((uintptr_t)Address + (i * PAGE_SIZE)), (void *)((uint64_t)Address + (i * PAGE_SIZE)), Memory::PTFlag::RW);
// Memory::Virtual(this->Table).Unmap((void *)((uintptr_t)Address + (i * PAGE_SIZE)));
}
if (this->Directory)
@ -213,16 +213,16 @@ namespace Memory
}
}
MemMgr::MemMgr(PageTable4 *PageTable, VirtualFileSystem::Node *Directory)
MemMgr::MemMgr(PageTable *Table, VirtualFileSystem::Node *Directory)
{
if (PageTable)
this->PageTable = PageTable;
if (Table)
this->Table = Table;
else
{
#if defined(a64)
this->PageTable = (PageTable4 *)CPU::x64::readcr3().raw;
this->Table = (PageTable *)CPU::x64::readcr3().raw;
#elif defined(a32)
this->PageTable = (PageTable4 *)CPU::x32::readcr3().raw;
this->Table = (PageTable *)CPU::x32::readcr3().raw;
#endif
}
@ -236,7 +236,7 @@ namespace Memory
{
KernelAllocator.FreePages(ap.Address, ap.PageCount);
for (size_t i = 0; i < ap.PageCount; i++)
Memory::Virtual(this->PageTable).Remap((void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)), (void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)), Memory::PTFlag::RW);
Memory::Virtual(this->Table).Remap((void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)), (void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)), Memory::PTFlag::RW);
}
if (this->Directory)

20
Core/Memory/PageTable.cpp Normal file
View File

@ -0,0 +1,20 @@
#include <memory.hpp>
namespace Memory
{
void PageTable::Update()
{
#if defined(a86)
asmv("mov %0, %%cr3" ::"r"(this));
#elif defined(aa64)
asmv("msr ttbr0_el1, %0" ::"r"(this));
#endif
}
PageTable PageTable::Fork()
{
PageTable NewTable;
memcpy(&NewTable, this, sizeof(PageTable));
return NewTable;
}
}

View File

@ -21,60 +21,6 @@
namespace Memory
{
StackGuard::StackGuard(bool User, PageTable4 *Table)
{
this->UserMode = User;
this->Table = Table;
if (this->UserMode)
{
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE + 1));
memset(AllocatedStack, 0, USER_STACK_SIZE);
debug("AllocatedStack: %p", AllocatedStack);
Virtual va = Virtual(Table);
for (size_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
{
va.Map((void *)(USER_STACK_BASE + (i * PAGE_SIZE)),
(void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE)),
PTFlag::RW | PTFlag::US);
debug("Mapped %p to %p", (void *)(USER_STACK_BASE + (i * PAGE_SIZE)),
(void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE)));
}
this->StackBottom = (void *)USER_STACK_BASE;
this->StackTop = (void *)(USER_STACK_BASE + USER_STACK_SIZE);
this->StackPhyiscalBottom = AllocatedStack;
this->StackPhyiscalTop = (void *)((uintptr_t)AllocatedStack + USER_STACK_SIZE);
this->Size = USER_STACK_SIZE;
}
else
{
this->StackBottom = KernelAllocator.RequestPages(TO_PAGES(STACK_SIZE + 1));
memset(this->StackBottom, 0, STACK_SIZE);
debug("StackBottom: %p", this->StackBottom);
this->StackTop = (void *)((uintptr_t)this->StackBottom + STACK_SIZE);
this->StackPhyiscalBottom = this->StackBottom;
this->StackPhyiscalTop = this->StackTop;
this->Size = STACK_SIZE;
}
debug("Allocated stack at %p", this->StackBottom);
}
StackGuard::~StackGuard()
{
fixme("Temporarily disabled stack guard deallocation");
// KernelAllocator.FreePages(this->StackBottom, TO_PAGES(this->Size + 1));
// debug("Freed stack at %p", this->StackBottom);
}
bool StackGuard::Expand(uintptr_t FaultAddress)
{
if (this->UserMode)
@ -82,22 +28,35 @@ namespace Memory
if (FaultAddress < (uintptr_t)this->StackBottom - USER_STACK_SIZE ||
FaultAddress > (uintptr_t)this->StackTop)
{
info("Fault address %#lx is not in range of stack %#lx - %#lx", FaultAddress,
(uintptr_t)this->StackBottom - USER_STACK_SIZE, (uintptr_t)this->StackTop);
return false; /* It's not about the stack. */
}
else
{
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE + 1));
debug("AllocatedStack: %p", AllocatedStack);
debug("AllocatedStack: %#lx", AllocatedStack);
memset(AllocatedStack, 0, USER_STACK_SIZE);
Virtual va = Virtual(this->Table);
for (uintptr_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
for (size_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
{
va.Map((void *)((uintptr_t)this->StackBottom - (i * PAGE_SIZE)), (void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE)), PTFlag::RW | PTFlag::US);
debug("Mapped %p to %p", (void *)((uintptr_t)this->StackBottom - (i * PAGE_SIZE)), (void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE)));
void *VirtualPage = (void *)((uintptr_t)this->StackBottom - (i * PAGE_SIZE));
void *PhysicalPage = (void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE));
va.Map(VirtualPage, PhysicalPage, PTFlag::RW | PTFlag::US);
AllocatedPages pa = {
.PhysicalAddress = PhysicalPage,
.VirtualAddress = VirtualPage,
};
AllocatedPagesList.push_back(pa);
debug("Mapped %#lx to %#lx", PhysicalPage, VirtualPage);
}
this->StackBottom = (void *)((uintptr_t)this->StackBottom - USER_STACK_SIZE);
this->Size += USER_STACK_SIZE;
info("Stack expanded to %p", this->StackBottom);
info("Stack expanded to %#lx", this->StackBottom);
this->Expanded = true;
return true;
}
}
@ -107,4 +66,116 @@ namespace Memory
return false;
}
}
void StackGuard::Fork(StackGuard *Parent)
{
this->UserMode = Parent->GetUserMode();
this->StackBottom = Parent->GetStackBottom();
this->StackTop = Parent->GetStackTop();
this->StackPhysicalBottom = Parent->GetStackPhysicalBottom();
this->StackPhysicalTop = Parent->GetStackPhysicalTop();
this->Size = Parent->GetSize();
this->Expanded = Parent->IsExpanded();
if (this->UserMode)
{
std::vector<AllocatedPages> ParentAllocatedPages = Parent->GetAllocatedPages();
Virtual va = Virtual(Table);
foreach (auto Page in AllocatedPagesList)
{
va.Unmap(Page.VirtualAddress);
KernelAllocator.FreePage(Page.PhysicalAddress);
debug("Freed %#lx and unmapped %#lx", Page.PhysicalAddress, Page.VirtualAddress);
}
foreach (auto Page in ParentAllocatedPages)
{
void *NewPhysical = KernelAllocator.RequestPage();
memcpy(NewPhysical, Page.PhysicalAddress, PAGE_SIZE);
va.Map(Page.VirtualAddress, NewPhysical, PTFlag::RW | PTFlag::US);
AllocatedPages pa = {
.PhysicalAddress = NewPhysical,
.VirtualAddress = Page.VirtualAddress,
};
AllocatedPagesList.push_back(pa);
debug("Mapped %#lx to %#lx", NewPhysical, Page.VirtualAddress);
}
}
else
{
fixme("Kernel mode stack fork not implemented");
}
}
StackGuard::StackGuard(bool User, PageTable *Table)
{
this->UserMode = User;
this->Table = Table;
if (this->UserMode)
{
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE + 1));
memset(AllocatedStack, 0, USER_STACK_SIZE);
debug("AllocatedStack: %#lx", AllocatedStack);
Virtual va = Virtual(Table);
for (size_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
{
void *VirtualPage = (void *)(USER_STACK_BASE + (i * PAGE_SIZE));
void *PhysicalPage = (void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE));
va.Map(VirtualPage, PhysicalPage, PTFlag::RW | PTFlag::US);
AllocatedPages pa = {
.PhysicalAddress = PhysicalPage,
.VirtualAddress = VirtualPage,
};
AllocatedPagesList.push_back(pa);
debug("Mapped %#lx to %#lx", PhysicalPage, VirtualPage);
}
this->StackBottom = (void *)USER_STACK_BASE;
this->StackTop = (void *)(USER_STACK_BASE + USER_STACK_SIZE);
this->StackPhysicalBottom = AllocatedStack;
this->StackPhysicalTop = (void *)((uintptr_t)AllocatedStack + USER_STACK_SIZE);
this->Size = USER_STACK_SIZE;
}
else
{
this->StackBottom = KernelAllocator.RequestPages(TO_PAGES(STACK_SIZE + 1));
memset(this->StackBottom, 0, STACK_SIZE);
debug("StackBottom: %#lx", this->StackBottom);
this->StackTop = (void *)((uintptr_t)this->StackBottom + STACK_SIZE);
this->StackPhysicalBottom = this->StackBottom;
this->StackPhysicalTop = this->StackTop;
this->Size = STACK_SIZE;
for (size_t i = 0; i < TO_PAGES(STACK_SIZE); i++)
{
AllocatedPages pa = {
.PhysicalAddress = (void *)((uintptr_t)this->StackBottom + (i * PAGE_SIZE)),
.VirtualAddress = (void *)((uintptr_t)this->StackBottom + (i * PAGE_SIZE)),
};
AllocatedPagesList.push_back(pa);
}
}
debug("Allocated stack at %#lx", this->StackBottom);
}
StackGuard::~StackGuard()
{
foreach (auto Page in AllocatedPagesList)
{
KernelAllocator.FreePage(Page.PhysicalAddress);
debug("Freed page at %#lx", Page.PhysicalAddress);
}
}
}

View File

@ -292,12 +292,12 @@ namespace Memory
#endif
}
Virtual::Virtual(PageTable4 *Table)
Virtual::Virtual(PageTable *Table)
{
if (Table)
this->Table = Table;
else
this->Table = (PageTable4 *)CPU::PageTable();
this->Table = (PageTable *)CPU::PageTable();
}
Virtual::~Virtual() {}