mirror of
https://github.com/Fennix-Project/Kernel.git
synced 2025-05-28 07:24:37 +00:00
Stack guard implementation
This commit is contained in:
parent
1afe255fed
commit
eaaf8427dc
@ -299,6 +299,24 @@ namespace CrashHandler
|
||||
if (Frame->cs != GDT_USER_CODE && Frame->cs != GDT_USER_DATA)
|
||||
{
|
||||
debug("Exception in kernel mode");
|
||||
if (Frame->InterruptNumber == CPU::x64::PageFault)
|
||||
{
|
||||
CPUData *data = GetCurrentCPU();
|
||||
if (data)
|
||||
{
|
||||
if (data->CurrentThread->Stack->Expand(CPU::x64::readcr2().raw))
|
||||
{
|
||||
debug("Stack expanded");
|
||||
CPU::Interrupts(CPU::Enable);
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
error("Stack expansion failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (TaskManager)
|
||||
TaskManager->Panic();
|
||||
Display->CreateBuffer(0, 0, SBIdx);
|
||||
|
@ -30,7 +30,8 @@ __no_stack_protector void UserModeExceptionHandler(CHArchTrapFrame *Frame)
|
||||
CriticalSection cs;
|
||||
debug("Interrupts? %s.", cs.IsInterruptsEnabled() ? "Yes" : "No");
|
||||
fixme("Handling user mode exception");
|
||||
TaskManager->GetCurrentThread()->Status = Tasking::TaskStatus::Terminated;
|
||||
TaskManager->GetCurrentThread()->Status = Tasking::TaskStatus::Stopped;
|
||||
CPUData *CurCPU = GetCurrentCPU();
|
||||
|
||||
{
|
||||
CPU::x64::CR0 cr0 = CPU::x64::readcr0();
|
||||
@ -41,7 +42,7 @@ __no_stack_protector void UserModeExceptionHandler(CHArchTrapFrame *Frame)
|
||||
CPU::x64::EFER efer;
|
||||
efer.raw = CPU::x64::rdmsr(CPU::x64::MSR_EFER);
|
||||
|
||||
error("Technical Informations on CPU %lld:", GetCurrentCPU()->ID);
|
||||
error("Technical Informations on CPU %lld:", CurCPU->ID);
|
||||
#if defined(__amd64__)
|
||||
uint64_t ds;
|
||||
asmv("mov %%ds, %0"
|
||||
@ -176,6 +177,13 @@ __no_stack_protector void UserModeExceptionHandler(CHArchTrapFrame *Frame)
|
||||
}
|
||||
case CPU::x64::PageFault:
|
||||
{
|
||||
if (CurCPU)
|
||||
if (CurCPU->CurrentThread->Stack->Expand(CPU::x64::readcr2().raw))
|
||||
{
|
||||
debug("Stack expanded");
|
||||
return;
|
||||
}
|
||||
|
||||
CPU::x64::PageFaultErrorCode params = {.raw = (uint32_t)Frame->ErrorCode};
|
||||
#if defined(__amd64__)
|
||||
error("An exception occurred at %#lx by %#lx", CPU::x64::readcr2().PFLA, Frame->rip);
|
||||
@ -226,6 +234,9 @@ __no_stack_protector void UserModeExceptionHandler(CHArchTrapFrame *Frame)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
TaskManager->GetCurrentThread()->Status = Tasking::TaskStatus::Terminated;
|
||||
__sync_synchronize();
|
||||
error("End of report.");
|
||||
CPU::Interrupts(CPU::Enable);
|
||||
debug("Interrupts enabled back.");
|
||||
|
@ -43,8 +43,8 @@ void MapFromZero(PageTable *PT, BootInfo *Info)
|
||||
uint64_t VirtualOffsetNormalVMA = NORMAL_VMA_OFFSET;
|
||||
for (uint64_t t = 0; t < Info->Memory.Size; t += PAGE_SIZE)
|
||||
{
|
||||
va.Map((void *)t, (void *)t, PTFlag::RW | PTFlag::US);
|
||||
va.Map((void *)VirtualOffsetNormalVMA, (void *)t, PTFlag::RW | PTFlag::US);
|
||||
va.Map((void *)t, (void *)t, PTFlag::RW);
|
||||
va.Map((void *)VirtualOffsetNormalVMA, (void *)t, PTFlag::RW);
|
||||
VirtualOffsetNormalVMA += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
@ -174,8 +174,10 @@ void InitializeMemoryManagement(BootInfo *Info)
|
||||
UserspaceKernelOnlyPageTable = (PageTable *)KernelAllocator.RequestPages(TO_PAGES(PAGE_SIZE));
|
||||
memset(UserspaceKernelOnlyPageTable, 0, PAGE_SIZE);
|
||||
|
||||
debug("Mapping from %#llx to %#llx", 0, Info->Memory.Size);
|
||||
debug("Mapping from 0x0 to %#llx", Info->Memory.Size);
|
||||
MapFromZero(KernelPageTable, Info);
|
||||
debug("Mapping from 0x0 %#llx for Userspace Page Table", Info->Memory.Size);
|
||||
MapFromZero(UserspaceKernelOnlyPageTable, Info);
|
||||
|
||||
/* Mapping Framebuffer address */
|
||||
debug("Mapping Framebuffer");
|
||||
@ -289,14 +291,20 @@ void HeapFree(void *Address)
|
||||
}
|
||||
}
|
||||
|
||||
void *operator new(size_t Size) { return HeapMalloc(Size); }
|
||||
void *operator new[](size_t Size) { return HeapMalloc(Size); }
|
||||
void *operator new(size_t Size) {
|
||||
return HeapMalloc(Size); }
|
||||
void *operator new[](size_t Size) {
|
||||
return HeapMalloc(Size); }
|
||||
void *operator new(unsigned long Size, std::align_val_t Alignment)
|
||||
{
|
||||
fixme("operator new with alignment(%#lx) is not implemented", Alignment);
|
||||
return HeapMalloc(Size);
|
||||
}
|
||||
void operator delete(void *Pointer) { HeapFree(Pointer); }
|
||||
void operator delete[](void *Pointer) { HeapFree(Pointer); }
|
||||
void operator delete(void *Pointer, long unsigned int Size) { HeapFree(Pointer); }
|
||||
void operator delete[](void *Pointer, long unsigned int Size) { HeapFree(Pointer); }
|
||||
void operator delete(void *Pointer) {
|
||||
HeapFree(Pointer); }
|
||||
void operator delete[](void *Pointer) {
|
||||
HeapFree(Pointer); }
|
||||
void operator delete(void *Pointer, long unsigned int Size) {
|
||||
HeapFree(Pointer); }
|
||||
void operator delete[](void *Pointer, long unsigned int Size) {
|
||||
HeapFree(Pointer); }
|
||||
|
66
Core/Memory/StackGuard.cpp
Normal file
66
Core/Memory/StackGuard.cpp
Normal file
@ -0,0 +1,66 @@
|
||||
#include <memory.hpp>
|
||||
|
||||
#include <debug.h>
|
||||
|
||||
namespace Memory
|
||||
{
|
||||
StackGuard::StackGuard(bool User, PageTable *Table)
|
||||
{
|
||||
this->UserMode = User;
|
||||
this->Table = Table;
|
||||
if (this->UserMode)
|
||||
{
|
||||
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE));
|
||||
memset(AllocatedStack, 0, USER_STACK_SIZE);
|
||||
for (uint64_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
|
||||
{
|
||||
Virtual(Table).Map((void *)((uint64_t)AllocatedStack + (i * PAGE_SIZE)),
|
||||
(void *)(USER_STACK_BASE + (i * PAGE_SIZE)),
|
||||
PTFlag::RW | PTFlag::US);
|
||||
}
|
||||
this->StackBottom = (void *)USER_STACK_BASE;
|
||||
this->StackTop = (void *)(USER_STACK_BASE + USER_STACK_SIZE);
|
||||
this->Size = USER_STACK_SIZE;
|
||||
}
|
||||
else
|
||||
{
|
||||
this->StackBottom = KernelAllocator.RequestPages(TO_PAGES(STACK_SIZE));
|
||||
memset(this->StackBottom, 0, STACK_SIZE);
|
||||
this->StackTop = (void *)((uint64_t)this->StackBottom + STACK_SIZE);
|
||||
this->Size = STACK_SIZE;
|
||||
}
|
||||
trace("Allocated stack at %p", this->StackBottom);
|
||||
}
|
||||
|
||||
StackGuard::~StackGuard() { KernelAllocator.FreePages(this->StackBottom, TO_PAGES(this->Size)); }
|
||||
|
||||
bool StackGuard::Expand(uint64_t FaultAddress)
|
||||
{
|
||||
if (this->UserMode)
|
||||
{
|
||||
if (FaultAddress < (uint64_t)this->StackBottom - USER_STACK_SIZE ||
|
||||
FaultAddress > (uint64_t)this->StackTop)
|
||||
{
|
||||
return false; // It's not about the stack.
|
||||
}
|
||||
else
|
||||
{
|
||||
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE));
|
||||
memset(AllocatedStack, 0, USER_STACK_SIZE);
|
||||
for (uint64_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
|
||||
Virtual(this->Table).Map((void *)((uint64_t)AllocatedStack + (i * PAGE_SIZE)), (void *)((uint64_t)this->StackBottom - (i * PAGE_SIZE)), PTFlag::RW | PTFlag::US);
|
||||
this->StackBottom = (void *)((uint64_t)this->StackBottom - USER_STACK_SIZE);
|
||||
this->Size += USER_STACK_SIZE;
|
||||
info("Stack expanded to %p", this->StackBottom);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
fixme("Not implemented and probably not needed");
|
||||
return false;
|
||||
}
|
||||
error("Reached end of function! How?");
|
||||
return false;
|
||||
}
|
||||
}
|
@ -92,10 +92,7 @@ namespace Tasking
|
||||
trace("Thread \"%s\"(%d) removed from process \"%s\"(%d)",
|
||||
Thread->Name, Thread->ID, Thread->Parent->Name, Thread->Parent->ID);
|
||||
// Free memory
|
||||
if (Thread->Security.TrustLevel == TaskTrustLevel::User)
|
||||
KernelAllocator.FreePages((void *)((uint64_t)Thread->Stack - USER_STACK_SIZE), TO_PAGES(USER_STACK_SIZE) /* + 1*/);
|
||||
else
|
||||
KernelAllocator.FreePages((void *)((uint64_t)Thread->Stack - STACK_SIZE), TO_PAGES(STACK_SIZE) /* + 1*/);
|
||||
delete Thread->Stack;
|
||||
SecurityManager.DestroyToken(Thread->Security.UniqueToken);
|
||||
delete Thread->Parent->Threads[i];
|
||||
// Remove from the list
|
||||
@ -125,7 +122,8 @@ namespace Tasking
|
||||
// Free memory
|
||||
delete ListProcess[i]->IPCHandles;
|
||||
SecurityManager.DestroyToken(ListProcess[i]->Security.UniqueToken);
|
||||
KernelAllocator.FreePages((void *)ListProcess[i]->PageTable, TO_PAGES(PAGE_SIZE));
|
||||
if (ListProcess[i]->Security.TrustLevel == TaskTrustLevel::User)
|
||||
KernelAllocator.FreePages((void *)ListProcess[i]->PageTable, TO_PAGES(PAGE_SIZE));
|
||||
delete ListProcess[i];
|
||||
// Remove from the list
|
||||
ListProcess.remove(i);
|
||||
@ -356,6 +354,7 @@ namespace Tasking
|
||||
warn("Scheduler stopped.");
|
||||
return;
|
||||
}
|
||||
CPU::x64::writecr3({.raw = (uint64_t)KernelPageTable}); // Restore kernel page table for safety reasons.
|
||||
CPUData *CurrentCPU = GetCurrentCPU();
|
||||
// if (CurrentCPU->ID != 0)
|
||||
// debug("Scheduler called from CPU %d", CurrentCPU->ID);
|
||||
@ -512,7 +511,7 @@ namespace Tasking
|
||||
}
|
||||
}
|
||||
*Frame = CurrentCPU->CurrentThread->Registers;
|
||||
GlobalDescriptorTable::SetKernelStack((void *)((uint64_t)CurrentCPU->CurrentThread->Stack + STACK_SIZE));
|
||||
GlobalDescriptorTable::SetKernelStack((void *)((uint64_t)CurrentCPU->CurrentThread->Stack->GetStackTop()));
|
||||
CPU::x64::writecr3({.raw = (uint64_t)CurrentCPU->CurrentProcess->PageTable});
|
||||
CPU::x64::fxrstor(CurrentCPU->CurrentThread->FXRegion);
|
||||
CPU::x64::wrmsr(CPU::x64::MSR_GS_BASE, CurrentCPU->CurrentThread->GSBase);
|
||||
@ -737,8 +736,7 @@ namespace Tasking
|
||||
case TaskTrustLevel::Idle:
|
||||
case TaskTrustLevel::Kernel:
|
||||
{
|
||||
Thread->Stack = KernelAllocator.RequestPages(TO_PAGES(STACK_SIZE) + 1);
|
||||
memset(Thread->Stack, 0, STACK_SIZE);
|
||||
Thread->Stack = new Memory::StackGuard(false, Parent->PageTable);
|
||||
#if defined(__amd64__)
|
||||
SecurityManager.TrustToken(Thread->Security.UniqueToken, TokenTrustLevel::TrustedByKernel);
|
||||
Thread->GSBase = CPU::x64::rdmsr(CPU::x64::MSRID::MSR_GS_BASE);
|
||||
@ -748,7 +746,7 @@ namespace Tasking
|
||||
Thread->Registers.rflags.AlwaysOne = 1;
|
||||
Thread->Registers.rflags.IF = 1;
|
||||
Thread->Registers.rflags.ID = 1;
|
||||
Thread->Registers.rsp = ((uint64_t)Thread->Stack + STACK_SIZE);
|
||||
Thread->Registers.rsp = ((uint64_t)Thread->Stack->GetStackTop());
|
||||
POKE(uint64_t, Thread->Registers.rsp) = (uint64_t)ThreadDoExit;
|
||||
#elif defined(__i386__)
|
||||
#elif defined(__aarch64__)
|
||||
@ -757,8 +755,7 @@ namespace Tasking
|
||||
}
|
||||
case TaskTrustLevel::User:
|
||||
{
|
||||
Thread->Stack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE) + 1);
|
||||
memset(Thread->Stack, 0, USER_STACK_SIZE);
|
||||
Thread->Stack = new Memory::StackGuard(true, Parent->PageTable);
|
||||
#if defined(__amd64__)
|
||||
SecurityManager.TrustToken(Thread->Security.UniqueToken, TokenTrustLevel::Untrusted);
|
||||
Thread->GSBase = 0;
|
||||
@ -771,7 +768,7 @@ namespace Tasking
|
||||
// Thread->Registers.rflags.IOPL = 3;
|
||||
Thread->Registers.rflags.IF = 1;
|
||||
Thread->Registers.rflags.ID = 1;
|
||||
Thread->Registers.rsp = ((uint64_t)Thread->Stack + USER_STACK_SIZE);
|
||||
Thread->Registers.rsp = ((uint64_t)Thread->Stack->GetStackTop());
|
||||
|
||||
if (Compatibility == TaskCompatibility::Linux)
|
||||
{
|
||||
@ -846,7 +843,7 @@ namespace Tasking
|
||||
TmpStack -= sizeof(uint64_t);
|
||||
// POKE(uint64_t, TmpStack) = argv.size() - 1;
|
||||
|
||||
Thread->Registers.rsp -= (uint64_t)Thread->Stack + STACK_SIZE - TmpStack;
|
||||
Thread->Registers.rsp -= (uint64_t)Thread->Stack->GetStackTop() - TmpStack;
|
||||
}
|
||||
else // Native
|
||||
{
|
||||
@ -906,9 +903,6 @@ namespace Tasking
|
||||
/* We need to leave the libc's crt to make a syscall when the Thread is exited or we are going to get GPF or PF exception. */
|
||||
|
||||
Memory::Virtual uva = Memory::Virtual(Parent->PageTable);
|
||||
for (uint64_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
|
||||
uva.Map((void *)((uint64_t)Thread->Stack + (i * USER_STACK_SIZE)), (void *)((uint64_t)Thread->Stack + (i * USER_STACK_SIZE)), Memory::PTFlag::RW | Memory::PTFlag::US);
|
||||
|
||||
if (!uva.Check((void *)Offset, Memory::PTFlag::US))
|
||||
{
|
||||
error("Offset is not user accessible");
|
||||
@ -925,7 +919,7 @@ namespace Tasking
|
||||
default:
|
||||
{
|
||||
error("Unknown elevation.");
|
||||
KernelAllocator.FreePages(Thread->Stack, TO_PAGES(STACK_SIZE));
|
||||
delete Thread->Stack;
|
||||
this->NextTID--;
|
||||
delete Thread;
|
||||
return nullptr;
|
||||
@ -954,9 +948,9 @@ namespace Tasking
|
||||
|
||||
debug("Thread offset is %#lx (EntryPoint:%#lx)", Thread->Offset, Thread->EntryPoint);
|
||||
if (Parent->Security.TrustLevel == TaskTrustLevel::User)
|
||||
debug("Thread stack region is %#lx-%#lx (U) and rsp is %#lx", Thread->Stack, (uint64_t)Thread->Stack + USER_STACK_SIZE, Thread->Registers.rsp);
|
||||
debug("Thread stack region is %#lx-%#lx (U) and rsp is %#lx", Thread->Stack->GetStackBottom(), Thread->Stack->GetStackTop(), Thread->Registers.rsp);
|
||||
else
|
||||
debug("Thread stack region is %#lx-%#lx (K) and rsp is %#lx", Thread->Stack, (uint64_t)Thread->Stack + STACK_SIZE, Thread->Registers.rsp);
|
||||
debug("Thread stack region is %#lx-%#lx (K) and rsp is %#lx", Thread->Stack->GetStackBottom(), Thread->Stack->GetStackTop(), Thread->Registers.rsp);
|
||||
debug("Created thread \"%s\"(%d) in process \"%s\"(%d)",
|
||||
Thread->Name, Thread->ID,
|
||||
Thread->Parent->Name, Thread->Parent->ID);
|
||||
@ -995,9 +989,7 @@ namespace Tasking
|
||||
{
|
||||
SecurityManager.TrustToken(Process->Security.UniqueToken, TokenTrustLevel::TrustedByKernel);
|
||||
#if defined(__amd64__)
|
||||
Process->PageTable = (Memory::PageTable *)KernelAllocator.RequestPages(TO_PAGES(PAGE_SIZE));
|
||||
memset(Process->PageTable, 0, PAGE_SIZE);
|
||||
memcpy(Process->PageTable, (void *)CPU::x64::readcr3().raw, PAGE_SIZE);
|
||||
Process->PageTable = (Memory::PageTable *)CPU::x64::readcr3().raw;
|
||||
#elif defined(__i386__)
|
||||
#elif defined(__aarch64__)
|
||||
#endif
|
||||
@ -1008,10 +1000,9 @@ namespace Tasking
|
||||
SecurityManager.TrustToken(Process->Security.UniqueToken, TokenTrustLevel::Untrusted);
|
||||
#if defined(__amd64__)
|
||||
Process->PageTable = (Memory::PageTable *)KernelAllocator.RequestPages(TO_PAGES(PAGE_SIZE));
|
||||
memset(Process->PageTable, 0, PAGE_SIZE);
|
||||
memcpy(Process->PageTable, (void *)CPU::x64::readcr3().raw, PAGE_SIZE);
|
||||
fixme("User mode process page table is not implemented.");
|
||||
// memcpy(Process->PageTable, (void *)UserspaceKernelOnlyPageTable, PAGE_SIZE);
|
||||
memcpy(Process->PageTable, (void *)UserspaceKernelOnlyPageTable, PAGE_SIZE);
|
||||
for (uint64_t i = 0; i < TO_PAGES(PAGE_SIZE); i++)
|
||||
Memory::Virtual(Process->PageTable).Map((void *)Process->PageTable, (void *)Process->PageTable, Memory::PTFlag::RW); // Make sure the page table is mapped.
|
||||
#elif defined(__i386__)
|
||||
#elif defined(__aarch64__)
|
||||
#endif
|
||||
|
@ -34,9 +34,9 @@ extern uint64_t _kernel_text_end, _kernel_data_end, _kernel_rodata_end;
|
||||
// geopbyte
|
||||
#define TO_GPB(d) (d / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024)
|
||||
|
||||
#define PAGE_SIZE 0x1000
|
||||
#define STACK_SIZE 0x1000000
|
||||
#define USER_STACK_SIZE 0x1000000
|
||||
#define PAGE_SIZE 0x1000 // 4KB
|
||||
#define STACK_SIZE 0x4000 // 16kb
|
||||
#define USER_STACK_SIZE 0x2000 // 8kb
|
||||
|
||||
// to pages
|
||||
#define TO_PAGES(d) (d / PAGE_SIZE + 1)
|
||||
@ -49,11 +49,16 @@ extern uint64_t _kernel_text_end, _kernel_data_end, _kernel_rodata_end;
|
||||
/**
|
||||
* @brief KERNEL_HEAP_BASE is the base address of the kernel heap
|
||||
*/
|
||||
#define KERNEL_HEAP_BASE 0xFFFFC00000000000
|
||||
#define KERNEL_HEAP_BASE 0xFFFFA00000000000
|
||||
/**
|
||||
* @brief USER_HEAP_BASE is the base address of the user heap allocated by the kernel
|
||||
*/
|
||||
#define USER_HEAP_BASE 0xFFFFD00000000000
|
||||
#define USER_HEAP_BASE 0xFFFFB00000000000
|
||||
|
||||
/**
|
||||
* @brief USER_STACK_BASE is the base address of the user stack
|
||||
*/
|
||||
#define USER_STACK_BASE 0xFFFFEFFFFFFF0000
|
||||
|
||||
namespace Memory
|
||||
{
|
||||
@ -401,6 +406,35 @@ namespace Memory
|
||||
*/
|
||||
~Virtual();
|
||||
};
|
||||
|
||||
class StackGuard
|
||||
{
|
||||
private:
|
||||
void *StackBottom = nullptr;
|
||||
void *StackTop = nullptr;
|
||||
void *SGB = nullptr;
|
||||
void *SGT = nullptr;
|
||||
uint64_t Size = 0;
|
||||
bool UserMode = false;
|
||||
PageTable *Table = nullptr;
|
||||
|
||||
public:
|
||||
/** @brief For general info */
|
||||
void *GetStackBottom() { return StackBottom; }
|
||||
/** @brief For RSP */
|
||||
void *GetStackTop() { return StackTop; }
|
||||
/** @brief Called by exception handler */
|
||||
bool Expand(uint64_t FaultAddress);
|
||||
/**
|
||||
* @brief Construct a new Stack Guard object
|
||||
* @param User Stack for user mode?
|
||||
*/
|
||||
StackGuard(bool User, PageTable *Table);
|
||||
/**
|
||||
* @brief Destroy the Stack Guard object
|
||||
*/
|
||||
~StackGuard();
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -85,7 +85,7 @@ namespace Tasking
|
||||
IP EntryPoint;
|
||||
IPOffset Offset;
|
||||
int ExitCode;
|
||||
void *Stack __attribute__((aligned(16)));
|
||||
Memory::StackGuard *Stack;
|
||||
TaskStatus Status;
|
||||
#if defined(__amd64__)
|
||||
CPU::x64::TrapFrame Registers;
|
||||
|
Loading…
x
Reference in New Issue
Block a user