Stack guard implementation

This commit is contained in:
Alex 2022-11-21 03:20:13 +02:00
parent 1afe255fed
commit eaaf8427dc
Signed by untrusted user who does not match committer: enderice2
GPG Key ID: EACC3AD603BAB4DD
7 changed files with 171 additions and 43 deletions

View File

@ -299,6 +299,24 @@ namespace CrashHandler
if (Frame->cs != GDT_USER_CODE && Frame->cs != GDT_USER_DATA) if (Frame->cs != GDT_USER_CODE && Frame->cs != GDT_USER_DATA)
{ {
debug("Exception in kernel mode"); debug("Exception in kernel mode");
if (Frame->InterruptNumber == CPU::x64::PageFault)
{
CPUData *data = GetCurrentCPU();
if (data)
{
if (data->CurrentThread->Stack->Expand(CPU::x64::readcr2().raw))
{
debug("Stack expanded");
CPU::Interrupts(CPU::Enable);
return;
}
else
{
error("Stack expansion failed");
}
}
}
if (TaskManager) if (TaskManager)
TaskManager->Panic(); TaskManager->Panic();
Display->CreateBuffer(0, 0, SBIdx); Display->CreateBuffer(0, 0, SBIdx);

View File

@ -30,7 +30,8 @@ __no_stack_protector void UserModeExceptionHandler(CHArchTrapFrame *Frame)
CriticalSection cs; CriticalSection cs;
debug("Interrupts? %s.", cs.IsInterruptsEnabled() ? "Yes" : "No"); debug("Interrupts? %s.", cs.IsInterruptsEnabled() ? "Yes" : "No");
fixme("Handling user mode exception"); fixme("Handling user mode exception");
TaskManager->GetCurrentThread()->Status = Tasking::TaskStatus::Terminated; TaskManager->GetCurrentThread()->Status = Tasking::TaskStatus::Stopped;
CPUData *CurCPU = GetCurrentCPU();
{ {
CPU::x64::CR0 cr0 = CPU::x64::readcr0(); CPU::x64::CR0 cr0 = CPU::x64::readcr0();
@ -41,7 +42,7 @@ __no_stack_protector void UserModeExceptionHandler(CHArchTrapFrame *Frame)
CPU::x64::EFER efer; CPU::x64::EFER efer;
efer.raw = CPU::x64::rdmsr(CPU::x64::MSR_EFER); efer.raw = CPU::x64::rdmsr(CPU::x64::MSR_EFER);
error("Technical Informations on CPU %lld:", GetCurrentCPU()->ID); error("Technical Informations on CPU %lld:", CurCPU->ID);
#if defined(__amd64__) #if defined(__amd64__)
uint64_t ds; uint64_t ds;
asmv("mov %%ds, %0" asmv("mov %%ds, %0"
@ -176,6 +177,13 @@ __no_stack_protector void UserModeExceptionHandler(CHArchTrapFrame *Frame)
} }
case CPU::x64::PageFault: case CPU::x64::PageFault:
{ {
if (CurCPU)
if (CurCPU->CurrentThread->Stack->Expand(CPU::x64::readcr2().raw))
{
debug("Stack expanded");
return;
}
CPU::x64::PageFaultErrorCode params = {.raw = (uint32_t)Frame->ErrorCode}; CPU::x64::PageFaultErrorCode params = {.raw = (uint32_t)Frame->ErrorCode};
#if defined(__amd64__) #if defined(__amd64__)
error("An exception occurred at %#lx by %#lx", CPU::x64::readcr2().PFLA, Frame->rip); error("An exception occurred at %#lx by %#lx", CPU::x64::readcr2().PFLA, Frame->rip);
@ -226,6 +234,9 @@ __no_stack_protector void UserModeExceptionHandler(CHArchTrapFrame *Frame)
break; break;
} }
} }
TaskManager->GetCurrentThread()->Status = Tasking::TaskStatus::Terminated;
__sync_synchronize();
error("End of report."); error("End of report.");
CPU::Interrupts(CPU::Enable); CPU::Interrupts(CPU::Enable);
debug("Interrupts enabled back."); debug("Interrupts enabled back.");

View File

@ -43,8 +43,8 @@ void MapFromZero(PageTable *PT, BootInfo *Info)
uint64_t VirtualOffsetNormalVMA = NORMAL_VMA_OFFSET; uint64_t VirtualOffsetNormalVMA = NORMAL_VMA_OFFSET;
for (uint64_t t = 0; t < Info->Memory.Size; t += PAGE_SIZE) for (uint64_t t = 0; t < Info->Memory.Size; t += PAGE_SIZE)
{ {
va.Map((void *)t, (void *)t, PTFlag::RW | PTFlag::US); va.Map((void *)t, (void *)t, PTFlag::RW);
va.Map((void *)VirtualOffsetNormalVMA, (void *)t, PTFlag::RW | PTFlag::US); va.Map((void *)VirtualOffsetNormalVMA, (void *)t, PTFlag::RW);
VirtualOffsetNormalVMA += PAGE_SIZE; VirtualOffsetNormalVMA += PAGE_SIZE;
} }
} }
@ -174,8 +174,10 @@ void InitializeMemoryManagement(BootInfo *Info)
UserspaceKernelOnlyPageTable = (PageTable *)KernelAllocator.RequestPages(TO_PAGES(PAGE_SIZE)); UserspaceKernelOnlyPageTable = (PageTable *)KernelAllocator.RequestPages(TO_PAGES(PAGE_SIZE));
memset(UserspaceKernelOnlyPageTable, 0, PAGE_SIZE); memset(UserspaceKernelOnlyPageTable, 0, PAGE_SIZE);
debug("Mapping from %#llx to %#llx", 0, Info->Memory.Size); debug("Mapping from 0x0 to %#llx", Info->Memory.Size);
MapFromZero(KernelPageTable, Info); MapFromZero(KernelPageTable, Info);
debug("Mapping from 0x0 %#llx for Userspace Page Table", Info->Memory.Size);
MapFromZero(UserspaceKernelOnlyPageTable, Info);
/* Mapping Framebuffer address */ /* Mapping Framebuffer address */
debug("Mapping Framebuffer"); debug("Mapping Framebuffer");
@ -289,14 +291,20 @@ void HeapFree(void *Address)
} }
} }
void *operator new(size_t Size) { return HeapMalloc(Size); } void *operator new(size_t Size) {
void *operator new[](size_t Size) { return HeapMalloc(Size); } return HeapMalloc(Size); }
void *operator new[](size_t Size) {
return HeapMalloc(Size); }
void *operator new(unsigned long Size, std::align_val_t Alignment) void *operator new(unsigned long Size, std::align_val_t Alignment)
{ {
fixme("operator new with alignment(%#lx) is not implemented", Alignment); fixme("operator new with alignment(%#lx) is not implemented", Alignment);
return HeapMalloc(Size); return HeapMalloc(Size);
} }
void operator delete(void *Pointer) { HeapFree(Pointer); } void operator delete(void *Pointer) {
void operator delete[](void *Pointer) { HeapFree(Pointer); } HeapFree(Pointer); }
void operator delete(void *Pointer, long unsigned int Size) { HeapFree(Pointer); } void operator delete[](void *Pointer) {
void operator delete[](void *Pointer, long unsigned int Size) { HeapFree(Pointer); } HeapFree(Pointer); }
void operator delete(void *Pointer, long unsigned int Size) {
HeapFree(Pointer); }
void operator delete[](void *Pointer, long unsigned int Size) {
HeapFree(Pointer); }

View File

@ -0,0 +1,66 @@
#include <memory.hpp>
#include <debug.h>
namespace Memory
{
StackGuard::StackGuard(bool User, PageTable *Table)
{
this->UserMode = User;
this->Table = Table;
if (this->UserMode)
{
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE));
memset(AllocatedStack, 0, USER_STACK_SIZE);
for (uint64_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
{
Virtual(Table).Map((void *)((uint64_t)AllocatedStack + (i * PAGE_SIZE)),
(void *)(USER_STACK_BASE + (i * PAGE_SIZE)),
PTFlag::RW | PTFlag::US);
}
this->StackBottom = (void *)USER_STACK_BASE;
this->StackTop = (void *)(USER_STACK_BASE + USER_STACK_SIZE);
this->Size = USER_STACK_SIZE;
}
else
{
this->StackBottom = KernelAllocator.RequestPages(TO_PAGES(STACK_SIZE));
memset(this->StackBottom, 0, STACK_SIZE);
this->StackTop = (void *)((uint64_t)this->StackBottom + STACK_SIZE);
this->Size = STACK_SIZE;
}
trace("Allocated stack at %p", this->StackBottom);
}
StackGuard::~StackGuard() { KernelAllocator.FreePages(this->StackBottom, TO_PAGES(this->Size)); }
bool StackGuard::Expand(uint64_t FaultAddress)
{
if (this->UserMode)
{
if (FaultAddress < (uint64_t)this->StackBottom - USER_STACK_SIZE ||
FaultAddress > (uint64_t)this->StackTop)
{
return false; // It's not about the stack.
}
else
{
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE));
memset(AllocatedStack, 0, USER_STACK_SIZE);
for (uint64_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
Virtual(this->Table).Map((void *)((uint64_t)AllocatedStack + (i * PAGE_SIZE)), (void *)((uint64_t)this->StackBottom - (i * PAGE_SIZE)), PTFlag::RW | PTFlag::US);
this->StackBottom = (void *)((uint64_t)this->StackBottom - USER_STACK_SIZE);
this->Size += USER_STACK_SIZE;
info("Stack expanded to %p", this->StackBottom);
return true;
}
}
else
{
fixme("Not implemented and probably not needed");
return false;
}
error("Reached end of function! How?");
return false;
}
}

View File

@ -92,10 +92,7 @@ namespace Tasking
trace("Thread \"%s\"(%d) removed from process \"%s\"(%d)", trace("Thread \"%s\"(%d) removed from process \"%s\"(%d)",
Thread->Name, Thread->ID, Thread->Parent->Name, Thread->Parent->ID); Thread->Name, Thread->ID, Thread->Parent->Name, Thread->Parent->ID);
// Free memory // Free memory
if (Thread->Security.TrustLevel == TaskTrustLevel::User) delete Thread->Stack;
KernelAllocator.FreePages((void *)((uint64_t)Thread->Stack - USER_STACK_SIZE), TO_PAGES(USER_STACK_SIZE) /* + 1*/);
else
KernelAllocator.FreePages((void *)((uint64_t)Thread->Stack - STACK_SIZE), TO_PAGES(STACK_SIZE) /* + 1*/);
SecurityManager.DestroyToken(Thread->Security.UniqueToken); SecurityManager.DestroyToken(Thread->Security.UniqueToken);
delete Thread->Parent->Threads[i]; delete Thread->Parent->Threads[i];
// Remove from the list // Remove from the list
@ -125,7 +122,8 @@ namespace Tasking
// Free memory // Free memory
delete ListProcess[i]->IPCHandles; delete ListProcess[i]->IPCHandles;
SecurityManager.DestroyToken(ListProcess[i]->Security.UniqueToken); SecurityManager.DestroyToken(ListProcess[i]->Security.UniqueToken);
KernelAllocator.FreePages((void *)ListProcess[i]->PageTable, TO_PAGES(PAGE_SIZE)); if (ListProcess[i]->Security.TrustLevel == TaskTrustLevel::User)
KernelAllocator.FreePages((void *)ListProcess[i]->PageTable, TO_PAGES(PAGE_SIZE));
delete ListProcess[i]; delete ListProcess[i];
// Remove from the list // Remove from the list
ListProcess.remove(i); ListProcess.remove(i);
@ -356,6 +354,7 @@ namespace Tasking
warn("Scheduler stopped."); warn("Scheduler stopped.");
return; return;
} }
CPU::x64::writecr3({.raw = (uint64_t)KernelPageTable}); // Restore kernel page table for safety reasons.
CPUData *CurrentCPU = GetCurrentCPU(); CPUData *CurrentCPU = GetCurrentCPU();
// if (CurrentCPU->ID != 0) // if (CurrentCPU->ID != 0)
// debug("Scheduler called from CPU %d", CurrentCPU->ID); // debug("Scheduler called from CPU %d", CurrentCPU->ID);
@ -512,7 +511,7 @@ namespace Tasking
} }
} }
*Frame = CurrentCPU->CurrentThread->Registers; *Frame = CurrentCPU->CurrentThread->Registers;
GlobalDescriptorTable::SetKernelStack((void *)((uint64_t)CurrentCPU->CurrentThread->Stack + STACK_SIZE)); GlobalDescriptorTable::SetKernelStack((void *)((uint64_t)CurrentCPU->CurrentThread->Stack->GetStackTop()));
CPU::x64::writecr3({.raw = (uint64_t)CurrentCPU->CurrentProcess->PageTable}); CPU::x64::writecr3({.raw = (uint64_t)CurrentCPU->CurrentProcess->PageTable});
CPU::x64::fxrstor(CurrentCPU->CurrentThread->FXRegion); CPU::x64::fxrstor(CurrentCPU->CurrentThread->FXRegion);
CPU::x64::wrmsr(CPU::x64::MSR_GS_BASE, CurrentCPU->CurrentThread->GSBase); CPU::x64::wrmsr(CPU::x64::MSR_GS_BASE, CurrentCPU->CurrentThread->GSBase);
@ -737,8 +736,7 @@ namespace Tasking
case TaskTrustLevel::Idle: case TaskTrustLevel::Idle:
case TaskTrustLevel::Kernel: case TaskTrustLevel::Kernel:
{ {
Thread->Stack = KernelAllocator.RequestPages(TO_PAGES(STACK_SIZE) + 1); Thread->Stack = new Memory::StackGuard(false, Parent->PageTable);
memset(Thread->Stack, 0, STACK_SIZE);
#if defined(__amd64__) #if defined(__amd64__)
SecurityManager.TrustToken(Thread->Security.UniqueToken, TokenTrustLevel::TrustedByKernel); SecurityManager.TrustToken(Thread->Security.UniqueToken, TokenTrustLevel::TrustedByKernel);
Thread->GSBase = CPU::x64::rdmsr(CPU::x64::MSRID::MSR_GS_BASE); Thread->GSBase = CPU::x64::rdmsr(CPU::x64::MSRID::MSR_GS_BASE);
@ -748,7 +746,7 @@ namespace Tasking
Thread->Registers.rflags.AlwaysOne = 1; Thread->Registers.rflags.AlwaysOne = 1;
Thread->Registers.rflags.IF = 1; Thread->Registers.rflags.IF = 1;
Thread->Registers.rflags.ID = 1; Thread->Registers.rflags.ID = 1;
Thread->Registers.rsp = ((uint64_t)Thread->Stack + STACK_SIZE); Thread->Registers.rsp = ((uint64_t)Thread->Stack->GetStackTop());
POKE(uint64_t, Thread->Registers.rsp) = (uint64_t)ThreadDoExit; POKE(uint64_t, Thread->Registers.rsp) = (uint64_t)ThreadDoExit;
#elif defined(__i386__) #elif defined(__i386__)
#elif defined(__aarch64__) #elif defined(__aarch64__)
@ -757,8 +755,7 @@ namespace Tasking
} }
case TaskTrustLevel::User: case TaskTrustLevel::User:
{ {
Thread->Stack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE) + 1); Thread->Stack = new Memory::StackGuard(true, Parent->PageTable);
memset(Thread->Stack, 0, USER_STACK_SIZE);
#if defined(__amd64__) #if defined(__amd64__)
SecurityManager.TrustToken(Thread->Security.UniqueToken, TokenTrustLevel::Untrusted); SecurityManager.TrustToken(Thread->Security.UniqueToken, TokenTrustLevel::Untrusted);
Thread->GSBase = 0; Thread->GSBase = 0;
@ -771,7 +768,7 @@ namespace Tasking
// Thread->Registers.rflags.IOPL = 3; // Thread->Registers.rflags.IOPL = 3;
Thread->Registers.rflags.IF = 1; Thread->Registers.rflags.IF = 1;
Thread->Registers.rflags.ID = 1; Thread->Registers.rflags.ID = 1;
Thread->Registers.rsp = ((uint64_t)Thread->Stack + USER_STACK_SIZE); Thread->Registers.rsp = ((uint64_t)Thread->Stack->GetStackTop());
if (Compatibility == TaskCompatibility::Linux) if (Compatibility == TaskCompatibility::Linux)
{ {
@ -846,7 +843,7 @@ namespace Tasking
TmpStack -= sizeof(uint64_t); TmpStack -= sizeof(uint64_t);
// POKE(uint64_t, TmpStack) = argv.size() - 1; // POKE(uint64_t, TmpStack) = argv.size() - 1;
Thread->Registers.rsp -= (uint64_t)Thread->Stack + STACK_SIZE - TmpStack; Thread->Registers.rsp -= (uint64_t)Thread->Stack->GetStackTop() - TmpStack;
} }
else // Native else // Native
{ {
@ -906,9 +903,6 @@ namespace Tasking
/* We need to leave the libc's crt to make a syscall when the Thread is exited or we are going to get GPF or PF exception. */ /* We need to leave the libc's crt to make a syscall when the Thread is exited or we are going to get GPF or PF exception. */
Memory::Virtual uva = Memory::Virtual(Parent->PageTable); Memory::Virtual uva = Memory::Virtual(Parent->PageTable);
for (uint64_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
uva.Map((void *)((uint64_t)Thread->Stack + (i * USER_STACK_SIZE)), (void *)((uint64_t)Thread->Stack + (i * USER_STACK_SIZE)), Memory::PTFlag::RW | Memory::PTFlag::US);
if (!uva.Check((void *)Offset, Memory::PTFlag::US)) if (!uva.Check((void *)Offset, Memory::PTFlag::US))
{ {
error("Offset is not user accessible"); error("Offset is not user accessible");
@ -925,7 +919,7 @@ namespace Tasking
default: default:
{ {
error("Unknown elevation."); error("Unknown elevation.");
KernelAllocator.FreePages(Thread->Stack, TO_PAGES(STACK_SIZE)); delete Thread->Stack;
this->NextTID--; this->NextTID--;
delete Thread; delete Thread;
return nullptr; return nullptr;
@ -954,9 +948,9 @@ namespace Tasking
debug("Thread offset is %#lx (EntryPoint:%#lx)", Thread->Offset, Thread->EntryPoint); debug("Thread offset is %#lx (EntryPoint:%#lx)", Thread->Offset, Thread->EntryPoint);
if (Parent->Security.TrustLevel == TaskTrustLevel::User) if (Parent->Security.TrustLevel == TaskTrustLevel::User)
debug("Thread stack region is %#lx-%#lx (U) and rsp is %#lx", Thread->Stack, (uint64_t)Thread->Stack + USER_STACK_SIZE, Thread->Registers.rsp); debug("Thread stack region is %#lx-%#lx (U) and rsp is %#lx", Thread->Stack->GetStackBottom(), Thread->Stack->GetStackTop(), Thread->Registers.rsp);
else else
debug("Thread stack region is %#lx-%#lx (K) and rsp is %#lx", Thread->Stack, (uint64_t)Thread->Stack + STACK_SIZE, Thread->Registers.rsp); debug("Thread stack region is %#lx-%#lx (K) and rsp is %#lx", Thread->Stack->GetStackBottom(), Thread->Stack->GetStackTop(), Thread->Registers.rsp);
debug("Created thread \"%s\"(%d) in process \"%s\"(%d)", debug("Created thread \"%s\"(%d) in process \"%s\"(%d)",
Thread->Name, Thread->ID, Thread->Name, Thread->ID,
Thread->Parent->Name, Thread->Parent->ID); Thread->Parent->Name, Thread->Parent->ID);
@ -995,9 +989,7 @@ namespace Tasking
{ {
SecurityManager.TrustToken(Process->Security.UniqueToken, TokenTrustLevel::TrustedByKernel); SecurityManager.TrustToken(Process->Security.UniqueToken, TokenTrustLevel::TrustedByKernel);
#if defined(__amd64__) #if defined(__amd64__)
Process->PageTable = (Memory::PageTable *)KernelAllocator.RequestPages(TO_PAGES(PAGE_SIZE)); Process->PageTable = (Memory::PageTable *)CPU::x64::readcr3().raw;
memset(Process->PageTable, 0, PAGE_SIZE);
memcpy(Process->PageTable, (void *)CPU::x64::readcr3().raw, PAGE_SIZE);
#elif defined(__i386__) #elif defined(__i386__)
#elif defined(__aarch64__) #elif defined(__aarch64__)
#endif #endif
@ -1008,10 +1000,9 @@ namespace Tasking
SecurityManager.TrustToken(Process->Security.UniqueToken, TokenTrustLevel::Untrusted); SecurityManager.TrustToken(Process->Security.UniqueToken, TokenTrustLevel::Untrusted);
#if defined(__amd64__) #if defined(__amd64__)
Process->PageTable = (Memory::PageTable *)KernelAllocator.RequestPages(TO_PAGES(PAGE_SIZE)); Process->PageTable = (Memory::PageTable *)KernelAllocator.RequestPages(TO_PAGES(PAGE_SIZE));
memset(Process->PageTable, 0, PAGE_SIZE); memcpy(Process->PageTable, (void *)UserspaceKernelOnlyPageTable, PAGE_SIZE);
memcpy(Process->PageTable, (void *)CPU::x64::readcr3().raw, PAGE_SIZE); for (uint64_t i = 0; i < TO_PAGES(PAGE_SIZE); i++)
fixme("User mode process page table is not implemented."); Memory::Virtual(Process->PageTable).Map((void *)Process->PageTable, (void *)Process->PageTable, Memory::PTFlag::RW); // Make sure the page table is mapped.
// memcpy(Process->PageTable, (void *)UserspaceKernelOnlyPageTable, PAGE_SIZE);
#elif defined(__i386__) #elif defined(__i386__)
#elif defined(__aarch64__) #elif defined(__aarch64__)
#endif #endif

View File

@ -34,9 +34,9 @@ extern uint64_t _kernel_text_end, _kernel_data_end, _kernel_rodata_end;
// geopbyte // geopbyte
#define TO_GPB(d) (d / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024) #define TO_GPB(d) (d / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024 / 1024)
#define PAGE_SIZE 0x1000 #define PAGE_SIZE 0x1000 // 4KB
#define STACK_SIZE 0x1000000 #define STACK_SIZE 0x4000 // 16kb
#define USER_STACK_SIZE 0x1000000 #define USER_STACK_SIZE 0x2000 // 8kb
// to pages // to pages
#define TO_PAGES(d) (d / PAGE_SIZE + 1) #define TO_PAGES(d) (d / PAGE_SIZE + 1)
@ -49,11 +49,16 @@ extern uint64_t _kernel_text_end, _kernel_data_end, _kernel_rodata_end;
/** /**
* @brief KERNEL_HEAP_BASE is the base address of the kernel heap * @brief KERNEL_HEAP_BASE is the base address of the kernel heap
*/ */
#define KERNEL_HEAP_BASE 0xFFFFC00000000000 #define KERNEL_HEAP_BASE 0xFFFFA00000000000
/** /**
* @brief USER_HEAP_BASE is the base address of the user heap allocated by the kernel * @brief USER_HEAP_BASE is the base address of the user heap allocated by the kernel
*/ */
#define USER_HEAP_BASE 0xFFFFD00000000000 #define USER_HEAP_BASE 0xFFFFB00000000000
/**
* @brief USER_STACK_BASE is the base address of the user stack
*/
#define USER_STACK_BASE 0xFFFFEFFFFFFF0000
namespace Memory namespace Memory
{ {
@ -401,6 +406,35 @@ namespace Memory
*/ */
~Virtual(); ~Virtual();
}; };
class StackGuard
{
private:
void *StackBottom = nullptr;
void *StackTop = nullptr;
void *SGB = nullptr;
void *SGT = nullptr;
uint64_t Size = 0;
bool UserMode = false;
PageTable *Table = nullptr;
public:
/** @brief For general info */
void *GetStackBottom() { return StackBottom; }
/** @brief For RSP */
void *GetStackTop() { return StackTop; }
/** @brief Called by exception handler */
bool Expand(uint64_t FaultAddress);
/**
* @brief Construct a new Stack Guard object
* @param User Stack for user mode?
*/
StackGuard(bool User, PageTable *Table);
/**
* @brief Destroy the Stack Guard object
*/
~StackGuard();
};
} }
/** /**

View File

@ -85,7 +85,7 @@ namespace Tasking
IP EntryPoint; IP EntryPoint;
IPOffset Offset; IPOffset Offset;
int ExitCode; int ExitCode;
void *Stack __attribute__((aligned(16))); Memory::StackGuard *Stack;
TaskStatus Status; TaskStatus Status;
#if defined(__amd64__) #if defined(__amd64__)
CPU::x64::TrapFrame Registers; CPU::x64::TrapFrame Registers;