Refactor stack expansion and stack fork implementation

This commit is contained in:
EnderIce2 2024-03-21 02:09:48 +02:00
parent 70cbbea3c0
commit 49ef1dc454
Signed by untrusted user who does not match committer: enderice2
GPG Key ID: EACC3AD603BAB4DD
2 changed files with 93 additions and 105 deletions

View File

@ -23,134 +23,119 @@ namespace Memory
{ {
bool StackGuard::Expand(uintptr_t FaultAddress) bool StackGuard::Expand(uintptr_t FaultAddress)
{ {
if (this->UserMode) if (!this->UserMode)
assert(!"Kernel mode stack expansion not implemented");
if (FaultAddress < USER_STACK_END ||
FaultAddress > USER_STACK_BASE)
{ {
if (FaultAddress < (uintptr_t)this->StackBottom - 0x100 || info("Fault address %#lx is not in range of stack %#lx - %#lx",
FaultAddress > (uintptr_t)this->StackTop) FaultAddress, USER_STACK_END, USER_STACK_BASE);
{ return false; /* It's not about the stack. */
info("Fault address %#lx is not in range of stack %#lx - %#lx", FaultAddress,
(uintptr_t)this->StackBottom - 0x100, (uintptr_t)this->StackTop);
return false; /* It's not about the stack. */
}
else
{
void *AllocatedStack = this->vma->RequestPages(TO_PAGES(USER_STACK_SIZE) + 1);
debug("AllocatedStack: %#lx", AllocatedStack);
memset(AllocatedStack, 0, USER_STACK_SIZE);
Virtual vmm = Virtual(this->vma->Table);
for (size_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
{
void *VirtualPage = (void *)((uintptr_t)this->StackBottom - (i * PAGE_SIZE));
void *PhysicalPage = (void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE));
vmm.Map(VirtualPage, PhysicalPage, PTFlag::RW | PTFlag::US);
AllocatedPages ap = {
.PhysicalAddress = PhysicalPage,
.VirtualAddress = VirtualPage,
};
AllocatedPagesList.push_back(ap);
debug("Mapped %#lx to %#lx", PhysicalPage, VirtualPage);
}
this->StackBottom = (void *)((uintptr_t)this->StackBottom - USER_STACK_SIZE);
this->Size += USER_STACK_SIZE;
debug("Stack expanded to %#lx", this->StackBottom);
this->Expanded = true;
return true;
}
} }
else
uintptr_t roundFA = ROUND_DOWN(FaultAddress, PAGE_SIZE);
uintptr_t diff = (uintptr_t)this->StackBottom - roundFA;
size_t stackPages = TO_PAGES(diff);
stackPages = stackPages < 1 ? 1 : stackPages;
debug("roundFA: %#lx, sb: %#lx, diff: %#lx, stackPages: %d",
roundFA, this->StackBottom, diff, stackPages);
void *AllocatedStack = vma->RequestPages(stackPages);
debug("AllocatedStack: %#lx", AllocatedStack);
for (size_t i = 0; i < stackPages; i++)
{ {
fixme("Not implemented and probably not needed"); void *vAddress = (void *)((uintptr_t)this->StackBottom - (i * PAGE_SIZE));
return false; void *pAddress = (void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE));
vma->Map(vAddress, pAddress, PAGE_SIZE, PTFlag::RW | PTFlag::US);
AllocatedPages ap = {
.PhysicalAddress = pAddress,
.VirtualAddress = vAddress,
};
AllocatedPagesList.push_back(ap);
debug("Mapped %#lx to %#lx", pAddress, vAddress);
} }
this->StackBottom = (void *)((uintptr_t)this->StackBottom - (stackPages * PAGE_SIZE));
this->Size += stackPages * PAGE_SIZE;
debug("Stack expanded to %#lx", this->StackBottom);
this->Expanded = true;
return true;
} }
void StackGuard::Fork(StackGuard *Parent) void StackGuard::Fork(StackGuard *Parent)
{ {
this->UserMode = Parent->GetUserMode(); if (!this->UserMode)
this->StackBottom = Parent->GetStackBottom(); assert(!"Kernel mode stack fork not implemented");
this->StackTop = Parent->GetStackTop();
this->StackPhysicalBottom = Parent->GetStackPhysicalBottom();
this->StackPhysicalTop = Parent->GetStackPhysicalTop();
this->Size = Parent->GetSize();
this->Expanded = Parent->IsExpanded();
if (this->UserMode) this->UserMode = Parent->UserMode;
{ this->StackBottom = Parent->StackBottom;
std::list<AllocatedPages> ParentAllocatedPages = Parent->GetAllocatedPages(); this->StackTop = Parent->StackTop;
Virtual vma(this->vma->Table); this->StackPhysicalBottom = Parent->StackPhysicalBottom;
foreach (auto Page in ParentAllocatedPages) this->StackPhysicalTop = Parent->StackPhysicalTop;
{ this->Size = Parent->Size;
void *NewPhysical = this->vma->RequestPages(1); this->Expanded = Parent->Expanded;
debug("Forking address %#lx to %#lx", Page.PhysicalAddress, NewPhysical);
memcpy(NewPhysical, Page.PhysicalAddress, PAGE_SIZE);
vma.Map(Page.VirtualAddress, NewPhysical, PTFlag::RW | PTFlag::US);
AllocatedPages ap = { std::list<AllocatedPages> ParentAllocatedPages = Parent->GetAllocatedPages();
.PhysicalAddress = NewPhysical, foreach (auto Page in ParentAllocatedPages)
.VirtualAddress = Page.VirtualAddress,
};
AllocatedPagesList.push_back(ap);
debug("Mapped %#lx to %#lx", NewPhysical, Page.VirtualAddress);
}
}
else
{ {
fixme("Kernel mode stack fork not implemented"); void *NewPhysical = vma->RequestPages(1);
debug("Forking address %#lx to %#lx", Page.PhysicalAddress, NewPhysical);
memcpy(NewPhysical, Page.PhysicalAddress, PAGE_SIZE);
vma->Remap(Page.VirtualAddress, NewPhysical, PTFlag::RW | PTFlag::US);
AllocatedPages ap = {
.PhysicalAddress = NewPhysical,
.VirtualAddress = Page.VirtualAddress,
};
AllocatedPagesList.push_back(ap);
debug("Mapped %#lx to %#lx", NewPhysical, Page.VirtualAddress);
} }
} }
StackGuard::StackGuard(bool User, VirtualMemoryArea *vma) StackGuard::StackGuard(bool User, VirtualMemoryArea *_vma)
{ {
this->UserMode = User; this->UserMode = User;
this->vma = vma; this->vma = _vma;
if (this->UserMode) if (this->UserMode)
{ {
void *AllocatedStack = vma->RequestPages(TO_PAGES(USER_STACK_SIZE) + 1); void *AllocatedStack = vma->RequestPages(TO_PAGES(USER_STACK_SIZE));
memset(AllocatedStack, 0, USER_STACK_SIZE);
debug("AllocatedStack: %#lx", AllocatedStack);
{
Virtual vmm = Virtual(vma->Table);
for (size_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
{
void *VirtualPage = (void *)(USER_STACK_BASE + (i * PAGE_SIZE));
void *PhysicalPage = (void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE));
vmm.Map(VirtualPage, PhysicalPage, PTFlag::RW | PTFlag::US);
AllocatedPages ap = {
.PhysicalAddress = PhysicalPage,
.VirtualAddress = VirtualPage,
};
AllocatedPagesList.push_back(ap);
debug("Mapped %#lx to %#lx", PhysicalPage, VirtualPage);
}
}
this->StackBottom = (void *)USER_STACK_BASE; this->StackBottom = (void *)USER_STACK_BASE;
this->StackTop = (void *)(USER_STACK_BASE + USER_STACK_SIZE); this->StackTop = (void *)(USER_STACK_BASE + USER_STACK_SIZE);
this->StackPhysicalBottom = AllocatedStack; this->StackPhysicalBottom = AllocatedStack;
this->StackPhysicalTop = (void *)((uintptr_t)AllocatedStack + USER_STACK_SIZE); this->StackPhysicalTop = (void *)((uintptr_t)AllocatedStack + USER_STACK_SIZE);
this->Size = USER_STACK_SIZE; this->Size = USER_STACK_SIZE;
debug("AllocatedStack: %#lx", AllocatedStack);
for (size_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
{
void *vAddress = (void *)(USER_STACK_BASE + (i * PAGE_SIZE));
void *pAddress = (void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE));
vma->Map(vAddress, pAddress, PAGE_SIZE, PTFlag::RW | PTFlag::US);
AllocatedPages ap = {
.PhysicalAddress = pAddress,
.VirtualAddress = vAddress,
};
AllocatedPagesList.push_back(ap);
debug("Mapped %#lx to %#lx", pAddress, vAddress);
}
} }
else else
{ {
this->StackBottom = vma->RequestPages(TO_PAGES(STACK_SIZE) + 1); this->StackBottom = vma->RequestPages(TO_PAGES(STACK_SIZE));
memset(this->StackBottom, 0, STACK_SIZE);
debug("StackBottom: %#lx", this->StackBottom);
this->StackTop = (void *)((uintptr_t)this->StackBottom + STACK_SIZE); this->StackTop = (void *)((uintptr_t)this->StackBottom + STACK_SIZE);
this->StackPhysicalBottom = this->StackBottom; this->StackPhysicalBottom = this->StackBottom;
this->StackPhysicalTop = this->StackTop; this->StackPhysicalTop = this->StackTop;
this->Size = STACK_SIZE; this->Size = STACK_SIZE;
debug("StackBottom: %#lx", this->StackBottom);
for (size_t i = 0; i < TO_PAGES(STACK_SIZE); i++) for (size_t i = 0; i < TO_PAGES(STACK_SIZE); i++)
{ {
AllocatedPages pa = { AllocatedPages pa = {

View File

@ -33,28 +33,31 @@
// exobyte // exobyte
#define TO_EiB(d) ((d) / 1024 / 1024 / 1024 / 1024 / 1024 / 1024) #define TO_EiB(d) ((d) / 1024 / 1024 / 1024 / 1024 / 1024 / 1024)
#define PAGE_SIZE 0x1000 // 4KB #define PAGE_SIZE 0x1000 /* 4 KiB */
#define PAGE_SIZE_4K PAGE_SIZE // 4KB #define PAGE_SIZE_4K PAGE_SIZE /* 4 KiB */
#define PAGE_SIZE_2M 0x200000 // 2MB #define PAGE_SIZE_2M 0x200000 /* 2 MiB */
#define PAGE_SIZE_4M 0x400000 // 4MB #define PAGE_SIZE_4M 0x400000 /* 4 MiB */
#define PAGE_SIZE_1G 0x40000000 // 1GB #define PAGE_SIZE_1G 0x40000000 /* 1 GiB */
#define STACK_SIZE 0x4000 // 16kb #define STACK_SIZE 0x4000 /* 16 KiB */
#define USER_STACK_SIZE 0x2000 // 8kb #define USER_STACK_SIZE 0x2000 /* 8 KiB */
// To pages /* To pages */
#define TO_PAGES(d) (((d) + PAGE_SIZE - 1) / PAGE_SIZE) #define TO_PAGES(d) (((d) + PAGE_SIZE - 1) / PAGE_SIZE)
// From pages /* From pages */
#define FROM_PAGES(d) ((d) * PAGE_SIZE) #define FROM_PAGES(d) ((d) * PAGE_SIZE)
#if defined(a64) || defined(aa64) #if defined(a64) || defined(aa64)
#define KERNEL_VMA_OFFSET 0xFFFFFFFF80000000 #define KERNEL_VMA_OFFSET 0xFFFFFFFF80000000
#define KERNEL_HEAP_BASE 0xFFFFFF0000000000 #define KERNEL_HEAP_BASE 0xFFFFFF0000000000
#define USER_STACK_BASE 0xFFFFEFFFFFFF0000 #define USER_STACK_BASE 0xFFFFEFFFFFFF0000
#define USER_STACK_END 0xFFFFEFFF00000000 /* 256 MiB */
#elif defined(a32) #elif defined(a32)
#define KERNEL_VMA_OFFSET 0xC0000000 #define KERNEL_VMA_OFFSET 0xC0000000
#define KERNEL_HEAP_BASE 0xA0000000 #define KERNEL_HEAP_BASE 0xA0000000
#define USER_STACK_BASE 0xEFFFFFFF #define USER_STACK_BASE 0xEFFFFFFF
#define USER_STACK_END 0xE0000000
#endif #endif
#endif // !__FENNIX_KERNEL_MEMORY_MACROS_H__ #endif // !__FENNIX_KERNEL_MEMORY_MACROS_H__