Implement VirtualAllocation class

This commit is contained in:
EnderIce2 2024-03-27 19:51:15 +02:00
parent d7cc1f5c47
commit 3dc1981820
Signed by untrusted user who does not match committer: enderice2
GPG Key ID: EACC3AD603BAB4DD
5 changed files with 220 additions and 8 deletions

142
core/memory/va.cpp Normal file
View File

@ -0,0 +1,142 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory/table.hpp>
#include <memory/va.hpp>
#include <cpu.hpp>
#include <debug.h>
#include <bitset>
#include "../../kernel.h"
namespace Memory
{
VirtualAllocation::AllocatedPages VirtualAllocation::RequestPages(size_t Count)
{
function("%lld", Count);
void *pAddress = KernelAllocator.RequestPages(Count);
memset(pAddress, 0, FROM_PAGES(Count));
Virtual vmm(this->Table);
SmartLock(MgrLock);
forItr(itr, AllocatedPagesList)
{
if (likely(itr->Free == false))
continue;
if (itr->PageCount == Count)
{
itr->Free = false;
vmm.Map(itr->VirtualAddress, pAddress, FROM_PAGES(Count), RW | KRsv | G);
return *itr;
}
if (itr->PageCount > Count)
{
/* Split the block */
void *vAddress = itr->VirtualAddress;
void *pAddress = itr->PhysicalAddress;
size_t PageCount = itr->PageCount;
AllocatedPagesList.erase(itr);
AllocatedPagesList.push_back({(void *)((uintptr_t)pAddress + FROM_PAGES(Count)),
(void *)((uintptr_t)vAddress + FROM_PAGES(Count)),
PageCount - Count, true});
AllocatedPagesList.push_back({pAddress, vAddress, Count, false});
vmm.Map(vAddress, pAddress, FROM_PAGES(Count), RW | KRsv | G);
debug("Split region %#lx-%#lx", vAddress, (uintptr_t)vAddress + FROM_PAGES(Count));
debug("Free region %#lx-%#lx", (uintptr_t)vAddress + FROM_PAGES(Count), (uintptr_t)vAddress + FROM_PAGES(PageCount - Count));
return AllocatedPagesList.back();
}
}
/* Allocate new region */
void *vAddress = CurrentBase;
vmm.Map(vAddress, pAddress, FROM_PAGES(Count), RW | KRsv | G);
AllocatedPagesList.push_back({pAddress, vAddress, Count, false});
debug("New region %#lx-%#lx", vAddress, (uintptr_t)vAddress + FROM_PAGES(Count));
CurrentBase = (void *)((uintptr_t)CurrentBase + FROM_PAGES(Count));
assert(USER_ALLOC_END > (uintptr_t)CurrentBase);
return AllocatedPagesList.back();
}
void VirtualAllocation::FreePages(void *Address, size_t Count)
{
function("%#lx, %lld", Address, Count);
SmartLock(MgrLock);
foreach (auto &apl in AllocatedPagesList)
{
if (apl.VirtualAddress != Address)
continue;
if (apl.PageCount != Count)
{
error("Page count mismatch! (Allocated: %lld, Requested: %lld)",
apl.PageCount, Count);
return;
}
Virtual vmm(this->Table);
for (size_t i = 0; i < Count; i++)
{
void *AddressToUnmap = (void *)((uintptr_t)Address + FROM_PAGES(i));
vmm.Unmap(AddressToUnmap);
}
KernelAllocator.FreePages(Address, Count);
apl.Free = true;
debug("Freed region %#lx-%#lx", Address, (uintptr_t)Address + FROM_PAGES(Count));
return;
}
}
void VirtualAllocation::MapTo(AllocatedPages ap, PageTable *TargetTable)
{
function("%#lx, %#lx", ap.VirtualAddress, TargetTable);
Virtual vmm(TargetTable);
vmm.Map(ap.VirtualAddress, ap.PhysicalAddress, FROM_PAGES(ap.PageCount), RW | KRsv | G);
}
VirtualAllocation::VirtualAllocation(void *Base)
: BaseAddress(Base), CurrentBase(Base),
Table((PageTable *)CPU::PageTable())
{
function("%#lx", Base);
}
VirtualAllocation::~VirtualAllocation()
{
/* No need to remap pages, the page table will be destroyed */
Virtual vmm(this->Table);
foreach (auto ap in AllocatedPagesList)
{
KernelAllocator.FreePages(ap.PhysicalAddress, ap.PageCount);
for (size_t i = 0; i < ap.PageCount; i++)
{
void *AddressToUnmap = (void *)((uintptr_t)ap.VirtualAddress + FROM_PAGES(i));
vmm.Unmap(AddressToUnmap);
}
}
}
}

View File

@ -49,13 +49,18 @@
#if defined(a64) || defined(aa64)
#define KERNEL_VMA_OFFSET 0xFFFFFFFF80000000
#define KERNEL_HEAP_BASE 0xFFFFFF0000000000
#define USER_STACK_BASE 0xFFFFEFFFFFFF0000
#define USER_ALLOC_BASE 0xFFFFA00000000000 /* 256 GiB */
#define USER_ALLOC_END 0xFFFFB00000000000
#define USER_STACK_END 0xFFFFEFFF00000000 /* 256 MiB */
#define USER_STACK_BASE 0xFFFFEFFFFFFF0000
#elif defined(a32)
#define KERNEL_VMA_OFFSET 0xC0000000
#define KERNEL_HEAP_BASE 0xA0000000
#define USER_ALLOC_BASE 0x80000000
#define USER_ALLOC_END 0xA0000000
#define USER_STACK_BASE 0xEFFFFFFF
#define USER_STACK_END 0xE0000000
#endif

60
include/memory/va.hpp Normal file
View File

@ -0,0 +1,60 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef __FENNIX_KERNEL_MEMORY_VA_H__
#define __FENNIX_KERNEL_MEMORY_VA_H__
#include <types.h>
#include <lock.hpp>
#include <list>
#include <memory/table.hpp>
namespace Memory
{
class VirtualAllocation
{
public:
struct AllocatedPages
{
void *PhysicalAddress;
void *VirtualAddress;
size_t PageCount;
bool Free;
};
private:
NewLock(MgrLock);
std::list<AllocatedPages> AllocatedPagesList;
void *BaseAddress;
void *CurrentBase;
public:
PageTable *Table;
AllocatedPages RequestPages(size_t Count);
void FreePages(void *Address, size_t Count);
void MapTo(AllocatedPages ap, PageTable *Table);
VirtualAllocation(void *Base);
~VirtualAllocation();
};
}
#endif // !__FENNIX_KERNEL_MEMORY_VA_H__

View File

@ -21,6 +21,7 @@
#include <types.h>
#include <filesystem.hpp>
#include <memory/va.hpp>
#include <symbols.hpp>
#include <memory.hpp>
#include <signal.hpp>
@ -498,11 +499,10 @@ namespace Tasking
NewLock(TaskingLock);
PID NextPID = 0;
PCB *KernelProcess = nullptr;
void *Scheduler = nullptr;
void *__sched_ctx = nullptr;
Memory::VirtualAllocation va = (void *)0xFFFFA00000000000;
constexpr TaskArchitecture GetKArch()
{

View File

@ -516,13 +516,18 @@ namespace Tasking
{
this->Stack = new Memory::StackGuard(true, this->vma);
gsTCB *gsT = (gsTCB *)this->vma->RequestPages(TO_PAGES(sizeof(gsTCB)), false, true);
Memory::VirtualAllocation::AllocatedPages gst = this->ctx->va.RequestPages(TO_PAGES(sizeof(gsTCB)));
this->ctx->va.MapTo(gst, this->Parent->PageTable);
gsTCB *gsT = (gsTCB *)gst.PhysicalAddress;
#ifdef DEBUG
gsT->__stub = 0xFFFFFFFFFFFFFFFF;
#endif
gsT->ScPages = TO_PAGES(STACK_SIZE);
gsT->SyscallStackBase = this->vma->RequestPages(gsT->ScPages, false, true);
Memory::VirtualAllocation::AllocatedPages ssb = this->ctx->va.RequestPages(gsT->ScPages);
this->ctx->va.MapTo(ssb, this->Parent->PageTable);
gsT->SyscallStackBase = ssb.VirtualAddress;
gsT->SyscallStack = (void *)((uintptr_t)gsT->SyscallStackBase + STACK_SIZE - 0x10);
debug("New syscall stack created: %#lx (base: %#lx) with gs base at %#lx",
gsT->SyscallStack, gsT->SyscallStackBase, gsT);
@ -530,7 +535,7 @@ namespace Tasking
gsT->TempStack = 0x0;
gsT->t = this;
#if defined(a64)
this->ShadowGSBase = (uintptr_t)gsT;
this->ShadowGSBase = (uintptr_t)gst.VirtualAddress;
this->GSBase = 0;
this->FSBase = 0;
this->Registers.cs = GDT_USER_CODE;