Merge remote-tracking branch 'Kernel/mb2_32_64_test' into Kernel-mb2_32_64_test

This commit is contained in:
EnderIce2
2024-11-20 05:15:06 +02:00
parent 47cf2c24d1
commit b348932172
353 changed files with 77068 additions and 0 deletions

View File

@ -0,0 +1,137 @@
# Xalloc
Xalloc is a custom memory allocator designed for hobby operating systems. It is written in C++ and provides a simple and efficient way to manage memory in your hobby OS.
#### ❗ This project is still in development and is not ready for use in production environments. ❗
---
## Features
- **Simple API** - Xalloc provides a simple API for allocating and freeing memory. It is designed to be easy to use and understand.
- [ ] todo complete this
---
## Getting Started
### Implementing missing functions
You will need to implement the following functions in your OS:
##### Wrapper.cpp
```cpp
extern "C" void *Xalloc_REQUEST_PAGES(Xsize_t Pages)
{
// ...
}
extern "C" void Xalloc_FREE_PAGES(void *Address, Xsize_t Pages)
{
// ...
}
extern "C" void Xalloc_MAP_MEMORY(void *VirtualAddress, void *PhysicalAddress, Xsize_t Flags)
{
// ...
}
extern "C" void Xalloc_UNMAP_MEMORY(void *VirtualAddress)
{
// ...
}
```
##### Xalloc.hpp
```cpp
#define Xalloc_PAGE_SIZE <page size> /* <-- Replace with your page size */
#define Xalloc_trace(m, ...) <trace function>
#define Xalloc_warn(m, ...) <warning function>
#define Xalloc_err(m, ...) <error function>
#define Xalloc_def <define a lock> /* eg. std::mutex Xalloc_lock; */
#define Xalloc_lock <lock function>
#define Xalloc_unlock <unlock function>
```
### Typical usage
```cpp
#include "Xalloc.hpp"
Xalloc::V1 *XallocV1Allocator = nullptr;
int main()
{
/* Virtual Base User SMAP */
XallocV1Allocator = new Xalloc::V1((void *)0xFFFFA00000000000, false, false);
void *p = XallocV1Allocator->malloc(1234);
/* ... */
XallocV1Allocator->free(p);
delete XallocV1Allocator;
return 0;
}
```
or
```cpp
#include "Xalloc.hpp"
int main()
{
/* Virtual Base User SMAP */
Xalloc::V1 XallocV1Allocator((void *)0xFFFFA00000000000, false, false);
void *p = XallocV1Allocator.malloc(1234);
/* ... */
XallocV1Allocator.free(p);
return 0;
}
```
---
## API
### Xalloc::V1
```cpp
void *malloc(Xsize_t Size);
```
Allocates a block of memory of size `Size` bytes.
If `Size` is 0, then `nullptr` is returned.
- `Size` - The size of the block to allocate in bytes.
<br><br>
```cpp
void free(void *Address);
```
Frees the memory block pointed to by `Address`.
If `Address` is `nullptr`, then no operation is performed.
- `Address` - The address of the memory block to free.
<br><br>
```cpp
void *calloc(Xsize_t NumberOfBlocks, Xsize_t Size);
```
Allocates a block of memory for an array of `NumberOfBlocks` elements, each of them `Size` bytes long.
If `NumberOfBlocks` or `Size` is 0, then `nullptr` is returned.
- `NumberOfBlocks` - The number of elements to allocate.
- `Size` - The size of each element in bytes.
<br><br>
```cpp
void *realloc(void *Address, Xsize_t Size);
```
Changes the size of the memory block pointed to by `Address` to `Size` bytes.
If `Address` is `nullptr`, then the call is equivalent to `malloc(Size)`.
If `Size` is equal to zero, and `Address` is not `nullptr`, then the call is equivalent to `free(Address)`.
- `Address` - The address of the memory block to resize.
- `Size` - The new size of the memory block in bytes.
---

View File

@ -0,0 +1,40 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include "Xalloc.hpp"
#include <memory.hpp>
extern "C" void *Xalloc_REQUEST_PAGES(Xsize_t Pages)
{
return KernelAllocator.RequestPages(Pages);
}
extern "C" void Xalloc_FREE_PAGES(void *Address, Xsize_t Pages)
{
KernelAllocator.FreePages(Address, Pages);
}
extern "C" void Xalloc_MAP_MEMORY(void *VirtualAddress, void *PhysicalAddress, Xsize_t Flags)
{
Memory::Virtual(KernelPageTable).Map(VirtualAddress, PhysicalAddress, Flags);
}
extern "C" void Xalloc_UNMAP_MEMORY(void *VirtualAddress)
{
Memory::Virtual(KernelPageTable).Unmap(VirtualAddress);
}

View File

@ -0,0 +1,119 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef __FENNIX_KERNEL_Xalloc_H__
#define __FENNIX_KERNEL_Xalloc_H__
#include <memory.hpp>
#include <lock.hpp>
#include <debug.h>
typedef long unsigned Xuint64_t;
typedef long unsigned Xsize_t;
#define Xalloc_StopOnFail true
#define Xalloc_PAGE_SIZE PAGE_SIZE
#define Xalloc_trace(m, ...) trace(m, ##__VA_ARGS__)
#define Xalloc_warn(m, ...) warn(m, ##__VA_ARGS__)
#define Xalloc_err(m, ...) error(m, ##__VA_ARGS__)
#define Xalloc_def NewLock(XallocLock)
#define Xalloc_lock XallocLock.Lock(__FUNCTION__)
#define Xalloc_unlock XallocLock.Unlock()
namespace Xalloc
{
class V1
{
private:
void *BaseVirtualAddress = nullptr;
void *FirstBlock = nullptr;
void *LastBlock = nullptr;
bool UserMapping = false;
bool SMAPUsed = false;
public:
/** @brief Execute "stac" instruction if the kernel has SMAP enabled */
void Xstac();
/** @brief Execute "clac" instruction if the kernel has SMAP enabled */
void Xclac();
/**
* @brief Arrange the blocks to optimize the memory usage
* The allocator is not arranged by default
* to avoid performance issues.
* This function will defragment the memory
* and free the unused blocks.
*
* You should call this function when the
* kernel is idle or when is not using
* the allocator.
*/
void Arrange();
/**
* @brief Allocate a new memory block
*
* @param Size Size of the block to allocate.
* @return void* Pointer to the allocated block.
*/
void *malloc(Xsize_t Size);
/**
* @brief Free a previously allocated block
*
* @param Address Address of the block to free.
*/
void free(void *Address);
/**
* @brief Allocate a new memory block
*
* @param NumberOfBlocks Number of blocks to allocate.
* @param Size Size of the block to allocate.
* @return void* Pointer to the allocated block.
*/
void *calloc(Xsize_t NumberOfBlocks, Xsize_t Size);
/**
* @brief Reallocate a previously allocated block
*
* @param Address Address of the block to reallocate.
* @param Size New size of the block.
* @return void* Pointer to the reallocated block.
*/
void *realloc(void *Address, Xsize_t Size);
/**
* @brief Construct a new Allocator object
*
* @param BaseVirtualAddress Virtual address to map the pages.
* @param UserMode Map the new pages with USER flag?
* @param SMAPEnabled Does the kernel has Supervisor Mode Access Prevention enabled?
*/
V1(void *BaseVirtualAddress, bool UserMode, bool SMAPEnabled);
/**
* @brief Destroy the Allocator object
*
*/
~V1();
};
}
#endif // !__FENNIX_KERNEL_Xalloc_H__

View File

@ -0,0 +1,290 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include "Xalloc.hpp"
Xalloc_def;
#define XALLOC_CONCAT(x, y) x##y
#define XStoP(d) (((d) + PAGE_SIZE - 1) / PAGE_SIZE)
#define XPtoS(d) ((d)*PAGE_SIZE)
#define Xalloc_BlockChecksum 0xA110C
extern "C" void *Xalloc_REQUEST_PAGES(Xsize_t Pages);
extern "C" void Xalloc_FREE_PAGES(void *Address, Xsize_t Pages);
extern "C" void Xalloc_MAP_MEMORY(void *VirtualAddress, void *PhysicalAddress, Xsize_t Flags);
extern "C" void Xalloc_UNMAP_MEMORY(void *VirtualAddress);
// TODO: Change memcpy with an optimized version
void *Xmemcpy(void *__restrict__ Destination, const void *__restrict__ Source, Xuint64_t Length)
{
unsigned char *dst = (unsigned char *)Destination;
const unsigned char *src = (const unsigned char *)Source;
for (Xuint64_t i = 0; i < Length; i++)
dst[i] = src[i];
return Destination;
}
// TODO: Change memset with an optimized version
void *Xmemset(void *__restrict__ Destination, int Data, Xuint64_t Length)
{
unsigned char *Buffer = (unsigned char *)Destination;
for (Xuint64_t i = 0; i < Length; i++)
Buffer[i] = (unsigned char)Data;
return Destination;
}
namespace Xalloc
{
class Block
{
public:
void *Address = nullptr;
int Checksum = Xalloc_BlockChecksum;
Xsize_t Size = 0;
Block *Next = nullptr;
Block *Last = nullptr;
bool IsFree = true;
bool Check()
{
if (this->Checksum != Xalloc_BlockChecksum)
return false;
return true;
}
Block(Xsize_t Size)
{
this->Address = Xalloc_REQUEST_PAGES(XStoP(Size + 1));
this->Size = Size;
Xmemset(this->Address, 0, Size);
}
~Block()
{
Xalloc_FREE_PAGES(this->Address, XStoP(this->Size + 1));
}
/**
* @brief Overload new operator to allocate memory from the heap
* @param Size Unused
* @return void* Pointer to the allocated memory
*/
void *operator new(Xsize_t Size)
{
void *ptr = Xalloc_REQUEST_PAGES(XStoP(sizeof(Block)));
return ptr;
(void)(Size);
}
/**
* @brief Overload delete operator to free memory from the heap
* @param Address Pointer to the memory to free
*/
void operator delete(void *Address)
{
Xalloc_FREE_PAGES(Address, XStoP(sizeof(Block)));
}
} __attribute__((packed, aligned((16))));
class SmartSMAPClass
{
private:
V1 *allocator = nullptr;
public:
SmartSMAPClass(V1 *allocator)
{
this->allocator = allocator;
this->allocator->Xstac();
}
~SmartSMAPClass() { this->allocator->Xclac(); }
};
#define SmartSMAP SmartSMAPClass XALLOC_CONCAT(SmartSMAP##_, __COUNTER__)(this)
void V1::Xstac()
{
if (this->SMAPUsed)
{
#if defined(a86)
asm volatile("stac" ::
: "cc");
#endif
}
}
void V1::Xclac()
{
if (this->SMAPUsed)
{
#if defined(a86)
asm volatile("clac" ::
: "cc");
#endif
}
}
void V1::Arrange()
{
Xalloc_err("Arrange() is not implemented yet!");
}
void *V1::malloc(Xsize_t Size)
{
if (Size == 0)
{
Xalloc_warn("Attempted to allocate 0 bytes!");
return nullptr;
}
SmartSMAP;
Xalloc_lock;
if (this->FirstBlock == nullptr)
{
this->FirstBlock = new Block(Size);
((Block *)this->FirstBlock)->IsFree = false;
Xalloc_unlock;
return ((Block *)this->FirstBlock)->Address;
}
Block *CurrentBlock = ((Block *)this->FirstBlock);
while (CurrentBlock != nullptr)
{
if (!CurrentBlock->Check())
{
Xalloc_err("Block %#lx has an invalid checksum! (%#x != %#x)",
(Xuint64_t)CurrentBlock, CurrentBlock->Checksum, Xalloc_BlockChecksum);
while (Xalloc_StopOnFail)
;
}
else if (CurrentBlock->IsFree && CurrentBlock->Size >= Size)
{
CurrentBlock->IsFree = false;
Xmemset(CurrentBlock->Address, 0, Size);
Xalloc_unlock;
return CurrentBlock->Address;
}
CurrentBlock = CurrentBlock->Next;
}
CurrentBlock = ((Block *)this->FirstBlock);
while (CurrentBlock->Next != nullptr)
CurrentBlock = CurrentBlock->Next;
CurrentBlock->Next = new Block(Size);
((Block *)CurrentBlock->Next)->Last = CurrentBlock;
((Block *)CurrentBlock->Next)->IsFree = false;
Xalloc_unlock;
return ((Block *)CurrentBlock->Next)->Address;
}
void V1::free(void *Address)
{
if (Address == nullptr)
{
Xalloc_warn("Attempted to free a null pointer!");
return;
}
SmartSMAP;
Xalloc_lock;
Block *CurrentBlock = ((Block *)this->FirstBlock);
while (CurrentBlock != nullptr)
{
if (!CurrentBlock->Check())
{
Xalloc_err("Block %#lx has an invalid checksum! (%#x != %#x)",
(Xuint64_t)CurrentBlock, CurrentBlock->Checksum, Xalloc_BlockChecksum);
while (Xalloc_StopOnFail)
;
}
else if (CurrentBlock->Address == Address)
{
if (CurrentBlock->IsFree)
{
Xalloc_warn("Attempted to free an already freed pointer!");
Xalloc_unlock;
return;
}
CurrentBlock->IsFree = true;
Xalloc_unlock;
return;
}
CurrentBlock = CurrentBlock->Next;
}
Xalloc_err("Invalid address %#lx.", Address);
Xalloc_unlock;
}
void *V1::calloc(Xsize_t NumberOfBlocks, Xsize_t Size)
{
if (NumberOfBlocks == 0 || Size == 0)
{
Xalloc_warn("The %s%s%s is 0!",
NumberOfBlocks == 0 ? "NumberOfBlocks" : "",
NumberOfBlocks == 0 && Size == 0 ? " and " : "",
Size == 0 ? "Size" : "");
return nullptr;
}
return this->malloc(NumberOfBlocks * Size);
}
void *V1::realloc(void *Address, Xsize_t Size)
{
if (Address == nullptr)
return this->malloc(Size);
if (Size == 0)
{
this->free(Address);
return nullptr;
}
// SmartSMAP;
// Xalloc_lock;
// ...
// Xalloc_unlock;
// TODO: Implement realloc
this->free(Address);
return this->malloc(Size);
}
V1::V1(void *BaseVirtualAddress, bool UserMode, bool SMAPEnabled)
{
SmartSMAP;
Xalloc_lock;
this->SMAPUsed = SMAPEnabled;
this->UserMapping = UserMode;
this->BaseVirtualAddress = BaseVirtualAddress;
Xalloc_unlock;
}
V1::~V1()
{
SmartSMAP;
Xalloc_lock;
Xalloc_trace("Destructor not implemented yet.");
Xalloc_unlock;
}
}

View File

@ -0,0 +1,818 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
#include <convert.h>
#include <lock.hpp>
#include <debug.h>
#ifdef DEBUG
#include <uart.hpp>
#endif
#include "HeapAllocators/Xalloc/Xalloc.hpp"
#include "../Library/liballoc_1_1.h"
#include "../../kernel.h"
// #define DEBUG_ALLOCATIONS_SL 1
// #define DEBUG_ALLOCATIONS 1
#ifdef DEBUG_ALLOCATIONS
#define memdbg(m, ...) \
debug(m, ##__VA_ARGS__); \
__sync
#else
#define memdbg(m, ...)
#endif
#ifdef DEBUG_ALLOCATIONS_SL
NewLock(AllocatorLock);
NewLock(OperatorAllocatorLock);
#endif
using namespace Memory;
Physical KernelAllocator;
PageTable4 *KernelPageTable = nullptr;
bool Page1GBSupport = false;
bool PSESupport = false;
static MemoryAllocatorType AllocatorType = MemoryAllocatorType::Pages;
Xalloc::V1 *XallocV1Allocator = nullptr;
#ifdef DEBUG
NIF void tracepagetable(PageTable4 *pt)
{
for (int i = 0; i < 512; i++)
{
#if defined(a64)
if (pt->Entries[i].Present)
debug("Entry %03d: %x %x %x %x %x %x %x %p-%#llx", i,
pt->Entries[i].Present, pt->Entries[i].ReadWrite,
pt->Entries[i].UserSupervisor, pt->Entries[i].WriteThrough,
pt->Entries[i].CacheDisable, pt->Entries[i].Accessed,
pt->Entries[i].ExecuteDisable, pt->Entries[i].Address << 12,
pt->Entries[i]);
#elif defined(a32)
#elif defined(aa64)
#endif
}
}
#endif
NIF void MapFromZero(PageTable4 *PT, BootInfo *Info)
{
debug("Mapping from 0x0 to %#llx", Info->Memory.Size);
Virtual va = Virtual(PT);
size_t MemSize = Info->Memory.Size;
if (Page1GBSupport && PSESupport)
{
/* Map the first 100MB of memory as 4KB pages */
// uintptr_t Physical4KBSectionStart = 0x10000000;
// va.Map((void *)0,
// (void *)0,
// Physical4KBSectionStart,
// PTFlag::RW);
// va.Map((void *)Physical4KBSectionStart,
// (void *)Physical4KBSectionStart,
// MemSize - Physical4KBSectionStart,
// PTFlag::RW,
// Virtual::MapType::OneGB);
va.Map((void *)0, (void *)0, MemSize, PTFlag::RW);
}
else
va.Map((void *)0, (void *)0, MemSize, PTFlag::RW);
va.Unmap((void *)0);
}
NIF void MapFramebuffer(PageTable4 *PT, BootInfo *Info)
{
debug("Mapping Framebuffer");
Virtual va = Virtual(PT);
int itrfb = 0;
while (1)
{
if (!Info->Framebuffer[itrfb].BaseAddress)
break;
va.OptimizedMap((void *)Info->Framebuffer[itrfb].BaseAddress,
(void *)Info->Framebuffer[itrfb].BaseAddress,
Info->Framebuffer[itrfb].Pitch * Info->Framebuffer[itrfb].Height,
PTFlag::RW | PTFlag::US | PTFlag::G);
itrfb++;
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "Rsrv( %p %ld )\n\r",
Info->Framebuffer[itrfb].BaseAddress,
(Info->Framebuffer[itrfb].Pitch * Info->Framebuffer[itrfb].Height) + PAGE_SIZE);
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
}
}
NIF void MapKernel(PageTable4 *PT, BootInfo *Info)
{
debug("Mapping Kernel");
uintptr_t BootstrapStart = (uintptr_t)&_bootstrap_start;
uintptr_t KernelStart = (uintptr_t)&_kernel_start;
uintptr_t KernelTextEnd = (uintptr_t)&_kernel_text_end;
uintptr_t KernelDataEnd = (uintptr_t)&_kernel_data_end;
uintptr_t KernelRoDataEnd = (uintptr_t)&_kernel_rodata_end;
uintptr_t KernelEnd = (uintptr_t)&_kernel_end;
uintptr_t KernelFileStart = (uintptr_t)Info->Kernel.FileBase;
uintptr_t KernelFileEnd = KernelFileStart + Info->Kernel.Size;
debug("Kernel physical address: %#lx - %#lx", Info->Kernel.PhysicalBase, (uintptr_t)Info->Kernel.PhysicalBase + Info->Kernel.Size);
debug("Kernel file base: %#lx - %#lx", KernelFileStart, KernelFileEnd);
debug("File size: %ld KB", TO_KB(Info->Kernel.Size));
debug(".text size: %ld KB", TO_KB(KernelTextEnd - KernelStart));
debug(".data size: %ld KB", TO_KB(KernelDataEnd - KernelTextEnd));
debug(".rodata size: %ld KB", TO_KB(KernelRoDataEnd - KernelDataEnd));
debug(".bss size: %ld KB", TO_KB(KernelEnd - KernelRoDataEnd));
uintptr_t BaseKernelMapAddress = (uintptr_t)Info->Kernel.PhysicalBase;
uintptr_t k;
Virtual va = Virtual(PT);
/* Bootstrap section */
for (k = BootstrapStart; k < KernelStart - KERNEL_VMA_OFFSET; k += PAGE_SIZE)
{
#ifdef DEBUG /* vscode debugging */
void *BKMA = (void *)BaseKernelMapAddress, *K_ = (void *)k;
#endif
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
/* Text section */
for (k = KernelStart; k < KernelTextEnd; k += PAGE_SIZE)
{
#ifdef DEBUG /* vscode debugging */
void *BKMA = (void *)BaseKernelMapAddress, *K_ = (void *)k;
#endif
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
/* Data section */
for (k = KernelTextEnd; k < KernelDataEnd; k += PAGE_SIZE)
{
#ifdef DEBUG /* vscode debugging */
void *BKMA = (void *)BaseKernelMapAddress, *K_ = (void *)k;
#endif
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
/* Read only data section */
for (k = KernelDataEnd; k < KernelRoDataEnd; k += PAGE_SIZE)
{
#ifdef DEBUG /* vscode debugging */
void *BKMA = (void *)BaseKernelMapAddress, *K_ = (void *)k;
#endif
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::G);
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
/* BSS section */
for (k = KernelRoDataEnd; k < KernelEnd; k += PAGE_SIZE)
{
#ifdef DEBUG /* vscode debugging */
void *BKMA = (void *)BaseKernelMapAddress, *K_ = (void *)k;
#endif
va.Map((void *)k, (void *)BaseKernelMapAddress, PTFlag::RW | PTFlag::G);
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
debug("BaseKernelMapAddress: %#lx - %#lx", Info->Kernel.PhysicalBase, BaseKernelMapAddress);
/* Kernel file */
if (KernelFileStart != 0)
for (k = KernelFileStart; k < KernelFileEnd; k += PAGE_SIZE)
{
va.Map((void *)k, (void *)k, PTFlag::G);
KernelAllocator.ReservePage((void *)k);
}
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "Rsrv( %p %ld )\n\r",
Info->Kernel.PhysicalBase,
Info->Kernel.Size);
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
sprintf(mExtTrkLog, "Rsrv( %p %ld )\n\r",
Info->Kernel.VirtualBase,
Info->Kernel.Size);
mExtTrkLock.Unlock();
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
}
#endif
}
NIF void InitializeMemoryManagement(BootInfo *Info)
{
#ifdef DEBUG
for (uint64_t i = 0; i < Info->Memory.Entries; i++)
{
uintptr_t Base = r_cst(uintptr_t, Info->Memory.Entry[i].BaseAddress);
size_t Length = Info->Memory.Entry[i].Length;
uintptr_t End = Base + Length;
const char *Type = "Unknown";
switch (Info->Memory.Entry[i].Type)
{
case likely(Usable):
Type = "Usable";
break;
case Reserved:
Type = "Reserved";
break;
case ACPIReclaimable:
Type = "ACPI Reclaimable";
break;
case ACPINVS:
Type = "ACPI NVS";
break;
case BadMemory:
Type = "Bad Memory";
break;
case BootloaderReclaimable:
Type = "Bootloader Reclaimable";
break;
case KernelAndModules:
Type = "Kernel and Modules";
break;
case Framebuffer:
Type = "Framebuffer";
break;
default:
break;
}
debug("%ld: %p-%p %s",
i,
Base,
End,
Type);
}
#endif
trace("Initializing Physical Memory Manager");
// KernelAllocator = Physical(); <- Already called in the constructor
KernelAllocator.Init(Info);
debug("Memory Info: %lldMB / %lldMB (%lldMB reserved)",
TO_MB(KernelAllocator.GetUsedMemory()),
TO_MB(KernelAllocator.GetTotalMemory()),
TO_MB(KernelAllocator.GetReservedMemory()));
/* -- Debugging --
size_t bmap_size = KernelAllocator.GetPageBitmap().Size;
for (size_t i = 0; i < bmap_size; i++)
{
bool idx = KernelAllocator.GetPageBitmap().Get(i);
if (idx == true)
debug("Page %04d: %#lx", i, i * PAGE_SIZE);
}
inf_loop debug("Alloc.: %#lx", KernelAllocator.RequestPage());
*/
trace("Initializing Virtual Memory Manager");
KernelPageTable = (PageTable4 *)KernelAllocator.RequestPages(TO_PAGES(PAGE_SIZE + 1));
debug("Page table allocated at %#lx", KernelPageTable);
memset(KernelPageTable, 0, PAGE_SIZE);
if (strcmp(CPU::Vendor(), x86_CPUID_VENDOR_AMD) == 0)
{
CPU::x86::AMD::CPUID0x80000001 cpuid;
cpuid.Get();
PSESupport = cpuid.EDX.PSE;
Page1GBSupport = cpuid.EDX.Page1GB;
}
else if (strcmp(CPU::Vendor(), x86_CPUID_VENDOR_INTEL) == 0)
{
CPU::x86::Intel::CPUID0x80000001 cpuid;
cpuid.Get();
fixme("Intel PSE support");
}
if (Page1GBSupport && PSESupport)
{
debug("1GB Page Support Enabled");
#if defined(a64)
CPU::x64::CR4 cr4 = CPU::x64::readcr4();
cr4.PSE = 1;
CPU::x64::writecr4(cr4);
#elif defined(a32)
CPU::x32::CR4 cr4 = CPU::x32::readcr4();
cr4.PSE = 1;
CPU::x32::writecr4(cr4);
#elif defined(aa64)
#endif
}
MapFromZero(KernelPageTable, Info);
MapFramebuffer(KernelPageTable, Info);
MapKernel(KernelPageTable, Info);
trace("Applying new page table from address %#lx", KernelPageTable);
#ifdef DEBUG
tracepagetable(KernelPageTable);
#endif
#if defined(a86)
asmv("mov %0, %%cr3" ::"r"(KernelPageTable));
#elif defined(aa64)
asmv("msr ttbr0_el1, %0" ::"r"(KernelPageTable));
#endif
debug("Page table updated.");
if (strstr(Info->Kernel.CommandLine, "xallocv1"))
{
XallocV1Allocator = new Xalloc::V1((void *)KERNEL_HEAP_BASE, false, false);
AllocatorType = MemoryAllocatorType::XallocV1;
trace("XallocV1 Allocator initialized (%p)", XallocV1Allocator);
}
else if (strstr(Info->Kernel.CommandLine, "liballoc11"))
{
AllocatorType = MemoryAllocatorType::liballoc11;
}
}
void *malloc(size_t Size)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(AllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
#endif
memdbg("malloc(%d)->[%s]", Size, KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown");
void *ret = nullptr;
switch (AllocatorType)
{
case MemoryAllocatorType::Pages:
{
ret = KernelAllocator.RequestPages(TO_PAGES(Size + 1));
memset(ret, 0, Size);
break;
}
case MemoryAllocatorType::XallocV1:
{
ret = XallocV1Allocator->malloc(Size);
break;
}
case MemoryAllocatorType::liballoc11:
{
ret = PREFIX(malloc)(Size);
memset(ret, 0, Size);
break;
}
default:
throw;
}
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "malloc( %ld )=%p~%p\n\r",
Size,
ret, __builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
return ret;
}
void *calloc(size_t n, size_t Size)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(AllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
#endif
memdbg("calloc(%d, %d)->[%s]", n, Size, KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown");
void *ret = nullptr;
switch (AllocatorType)
{
case MemoryAllocatorType::Pages:
{
ret = KernelAllocator.RequestPages(TO_PAGES(n * Size + 1));
memset(ret, 0, n * Size);
break;
}
case MemoryAllocatorType::XallocV1:
{
ret = XallocV1Allocator->calloc(n, Size);
break;
}
case MemoryAllocatorType::liballoc11:
{
void *ret = PREFIX(calloc)(n, Size);
memset(ret, 0, Size);
return ret;
}
default:
throw;
}
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "calloc( %ld %ld )=%p~%p\n\r",
n, Size,
ret, __builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
return ret;
}
void *realloc(void *Address, size_t Size)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(AllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
#endif
memdbg("realloc(%#lx, %d)->[%s]", Address, Size, KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown");
void *ret = nullptr;
switch (AllocatorType)
{
case unlikely(MemoryAllocatorType::Pages):
{
ret = KernelAllocator.RequestPages(TO_PAGES(Size + 1)); // WARNING: Potential memory leak
memset(ret, 0, Size);
break;
}
case MemoryAllocatorType::XallocV1:
{
ret = XallocV1Allocator->realloc(Address, Size);
break;
}
case MemoryAllocatorType::liballoc11:
{
void *ret = PREFIX(realloc)(Address, Size);
memset(ret, 0, Size);
return ret;
}
default:
throw;
}
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "realloc( %p %ld )=%p~%p\n\r",
Address, Size,
ret, __builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
return ret;
}
void free(void *Address)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(AllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
#endif
memdbg("free(%#lx)->[%s]", Address, KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown");
switch (AllocatorType)
{
case unlikely(MemoryAllocatorType::Pages):
{
KernelAllocator.FreePage(Address); // WARNING: Potential memory leak
break;
}
case MemoryAllocatorType::XallocV1:
{
XallocV1Allocator->free(Address);
break;
}
case MemoryAllocatorType::liballoc11:
{
PREFIX(free)
(Address);
break;
}
default:
throw;
}
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "free( %p )~%p\n\r",
Address,
__builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
}
void *operator new(size_t Size)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(OperatorAllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
#endif
memdbg("new(%d)->[%s]", Size, KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown");
void *ret = malloc(Size);
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "new( %ld )=%p~%p\n\r",
Size,
ret, __builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
return ret;
}
void *operator new[](size_t Size)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(OperatorAllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
#endif
memdbg("new[](%d)->[%s]", Size, KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown");
void *ret = malloc(Size);
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "new[]( %ld )=%p~%p\n\r",
Size,
ret, __builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
return ret;
}
void *operator new(unsigned long Size, std::align_val_t Alignment)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(OperatorAllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
#endif
memdbg("new(%d, %d)->[%s]", Size, Alignment, KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown");
fixme("operator new with alignment(%#lx) is not implemented", Alignment);
void *ret = malloc(Size);
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "new( %ld %#lx )=%p~%p\n\r",
Size, (uintptr_t)Alignment,
ret, __builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
return ret;
}
void operator delete(void *Pointer)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(OperatorAllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
#endif
memdbg("delete(%#lx)->[%s]", Pointer, KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown");
free(Pointer);
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "delete( %p )~%p\n\r",
Pointer,
__builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
}
void operator delete[](void *Pointer)
{
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(OperatorAllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
#endif
memdbg("delete[](%#lx)->[%s]", Pointer, KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown");
free(Pointer);
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "delete[]( %p )~%p\n\r",
Pointer,
__builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
}
void operator delete(void *Pointer, long unsigned int Size)
{
UNUSED(Size);
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(OperatorAllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
#endif
memdbg("delete(%#lx, %d)->[%s]", Pointer, Size, KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown");
free(Pointer);
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "delete( %p %ld )~%p\n\r",
Pointer, Size,
__builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
}
void operator delete[](void *Pointer, long unsigned int Size)
{
UNUSED(Size);
#ifdef DEBUG_ALLOCATIONS_SL
SmartLockClass lock___COUNTER__(OperatorAllocatorLock, (KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown"));
#endif
memdbg("delete[](%#lx, %d)->[%s]", Pointer, Size, KernelSymbolTable ? KernelSymbolTable->GetSymbolFromAddress((uintptr_t)__builtin_return_address(0)) : "Unknown");
free(Pointer);
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "delete[]( %p %ld )~%p\n\r",
Pointer, Size,
__builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
}

View File

@ -0,0 +1,250 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
#include <debug.h>
#include "../../kernel.h"
namespace Memory
{
ReadFSFunction(MEM_Read)
{
if (!Size)
Size = node->Length;
if (Offset > node->Length)
return 0;
if (Offset + Size > node->Length)
Size = node->Length - Offset;
memcpy(Buffer, (uint8_t *)(node->Address + Offset), Size);
return Size;
}
WriteFSFunction(MEM_Write)
{
if (!Size)
Size = node->Length;
if (Offset > node->Length)
return 0;
if (Offset + Size > node->Length)
Size = node->Length - Offset;
memcpy((uint8_t *)(node->Address + Offset), Buffer, Size);
return Size;
}
VirtualFileSystem::FileSystemOperations mem_op = {
.Name = "mem",
.Read = MEM_Read,
.Write = MEM_Write,
};
uint64_t MemMgr::GetAllocatedMemorySize()
{
uint64_t Size = 0;
foreach (auto ap in AllocatedPagesList)
Size += ap.PageCount;
return FROM_PAGES(Size);
}
bool MemMgr::Add(void *Address, size_t Count)
{
if (Address == nullptr)
{
error("Address is null!");
return false;
}
if (Count == 0)
{
error("Count is 0!");
return false;
}
for (size_t i = 0; i < AllocatedPagesList.size(); i++)
{
if (AllocatedPagesList[i].Address == Address)
{
error("Address already exists!");
return false;
}
else if ((uintptr_t)Address < (uintptr_t)AllocatedPagesList[i].Address)
{
if ((uintptr_t)Address + (Count * PAGE_SIZE) > (uintptr_t)AllocatedPagesList[i].Address)
{
error("Address intersects with an allocated page!");
return false;
}
}
else
{
if ((uintptr_t)AllocatedPagesList[i].Address + (AllocatedPagesList[i].PageCount * PAGE_SIZE) > (uintptr_t)Address)
{
error("Address intersects with an allocated page!");
return false;
}
}
}
if (this->Directory)
{
char FileName[64];
sprintf(FileName, "%lx-%ld", (uintptr_t)Address, Count);
VirtualFileSystem::Node *n = vfs->Create(FileName, VirtualFileSystem::NodeFlags::FILE, this->Directory);
if (n)
{
n->Address = (uintptr_t)Address;
n->Length = Count * PAGE_SIZE;
n->Operator = &mem_op;
}
}
AllocatedPagesList.push_back({Address, Count});
return true;
}
void *MemMgr::RequestPages(size_t Count, bool User)
{
void *Address = KernelAllocator.RequestPages(Count);
for (size_t i = 0; i < Count; i++)
{
int Flags = Memory::PTFlag::RW;
if (User)
Flags |= Memory::PTFlag::US;
Memory::Virtual(this->PageTable).Remap((void *)((uintptr_t)Address + (i * PAGE_SIZE)), (void *)((uint64_t)Address + (i * PAGE_SIZE)), Flags);
}
if (this->Directory)
{
char FileName[64];
sprintf(FileName, "%lx-%ld", (uintptr_t)Address, Count);
VirtualFileSystem::Node *n = vfs->Create(FileName, VirtualFileSystem::NodeFlags::FILE, this->Directory);
if (n) // If null, error or file already exists
{
n->Address = (uintptr_t)Address;
n->Length = Count * PAGE_SIZE;
n->Operator = &mem_op;
}
}
AllocatedPagesList.push_back({Address, Count});
/* For security reasons, we clear the allocated page
if it's a user page. */
if (User)
memset(Address, 0, Count * PAGE_SIZE);
return Address;
}
void MemMgr::FreePages(void *Address, size_t Count)
{
for (size_t i = 0; i < AllocatedPagesList.size(); i++)
{
if (AllocatedPagesList[i].Address == Address)
{
/** TODO: Advanced checks. Allow if the page count is less than the requested one.
* This will allow the user to free only a part of the allocated pages.
*
* But this will be in a separate function because we need to specify if we
* want to free from the start or from the end and return the new address.
*/
if (AllocatedPagesList[i].PageCount != Count)
{
error("Page count mismatch! (Allocated: %lld, Requested: %lld)", AllocatedPagesList[i].PageCount, Count);
return;
}
KernelAllocator.FreePages(Address, Count);
for (size_t i = 0; i < Count; i++)
{
Memory::Virtual(this->PageTable).Remap((void *)((uintptr_t)Address + (i * PAGE_SIZE)), (void *)((uint64_t)Address + (i * PAGE_SIZE)), Memory::PTFlag::RW);
// Memory::Virtual(this->PageTable).Unmap((void *)((uintptr_t)Address + (i * PAGE_SIZE)));
}
if (this->Directory)
{
char FileName[64];
sprintf(FileName, "%lx-%ld", (uintptr_t)Address, Count);
VirtualFileSystem::FileStatus s = vfs->Delete(FileName, false, this->Directory);
if (s != VirtualFileSystem::FileStatus::OK)
error("Failed to delete file %s", FileName);
}
AllocatedPagesList.remove(i);
return;
}
}
}
void MemMgr::DetachAddress(void *Address)
{
for (size_t i = 0; i < AllocatedPagesList.size(); i++)
{
if (AllocatedPagesList[i].Address == Address)
{
if (this->Directory)
{
char FileName[64];
sprintf(FileName, "%lx-%ld", (uintptr_t)Address, AllocatedPagesList[i].PageCount);
VirtualFileSystem::FileStatus s = vfs->Delete(FileName, false, this->Directory);
if (s != VirtualFileSystem::FileStatus::OK)
error("Failed to delete file %s", FileName);
}
AllocatedPagesList.remove(i);
return;
}
}
}
MemMgr::MemMgr(PageTable4 *PageTable, VirtualFileSystem::Node *Directory)
{
if (PageTable)
this->PageTable = PageTable;
else
{
#if defined(a64)
this->PageTable = (PageTable4 *)CPU::x64::readcr3().raw;
#elif defined(a32)
this->PageTable = (PageTable4 *)CPU::x32::readcr3().raw;
#endif
}
this->Directory = Directory;
debug("+ %#lx", this);
}
MemMgr::~MemMgr()
{
foreach (auto ap in AllocatedPagesList)
{
KernelAllocator.FreePages(ap.Address, ap.PageCount);
for (size_t i = 0; i < ap.PageCount; i++)
Memory::Virtual(this->PageTable).Remap((void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)), (void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)), Memory::PTFlag::RW);
}
if (this->Directory)
{
foreach (auto Child in this->Directory->Children)
vfs->Delete(Child, true);
}
debug("- %#lx", this);
}
}

View File

@ -0,0 +1,45 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
namespace Memory
{
Virtual::PageMapIndexer::PageMapIndexer(uintptr_t VirtualAddress)
{
#if defined(a64)
uintptr_t Address = VirtualAddress;
Address >>= 12;
this->PTEIndex = Address & 0x1FF;
Address >>= 9;
this->PDEIndex = Address & 0x1FF;
Address >>= 9;
this->PDPTEIndex = Address & 0x1FF;
Address >>= 9;
this->PMLIndex = Address & 0x1FF;
#elif defined(a32)
uintptr_t Address = VirtualAddress;
Address >>= 12;
this->PTEIndex = Address & 0x3FF;
Address >>= 10;
this->PDEIndex = Address & 0x3FF;
Address >>= 10;
this->PDPTEIndex = Address & 0x3FF;
#elif defined(aa64)
#endif
}
}

View File

@ -0,0 +1,541 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
#include <debug.h>
#ifdef DEBUG
#include <uart.hpp>
#endif
#include "../../Architecture/amd64/acpi.hpp"
#include "../../kernel.h"
extern "C" char BootPageTable[]; // 0x10000 in length
namespace Memory
{
uint64_t Physical::GetTotalMemory()
{
SmartLock(this->MemoryLock);
return this->TotalMemory;
}
uint64_t Physical::GetFreeMemory()
{
SmartLock(this->MemoryLock);
return this->FreeMemory;
}
uint64_t Physical::GetReservedMemory()
{
SmartLock(this->MemoryLock);
return this->ReservedMemory;
}
uint64_t Physical::GetUsedMemory()
{
SmartLock(this->MemoryLock);
return this->UsedMemory;
}
bool Physical::SwapPage(void *Address)
{
fixme("%p", Address);
return false;
}
bool Physical::SwapPages(void *Address, size_t PageCount)
{
for (size_t i = 0; i < PageCount; i++)
{
if (!this->SwapPage((void *)((uintptr_t)Address + (i * PAGE_SIZE))))
return false;
}
return false;
}
bool Physical::UnswapPage(void *Address)
{
fixme("%p", Address);
return false;
}
bool Physical::UnswapPages(void *Address, size_t PageCount)
{
for (size_t i = 0; i < PageCount; i++)
{
if (!this->UnswapPage((void *)((uintptr_t)Address + (i * PAGE_SIZE))))
return false;
}
return false;
}
void *Physical::RequestPage()
{
SmartLock(this->MemoryLock);
for (; PageBitmapIndex < PageBitmap.Size * 8; PageBitmapIndex++)
{
if (PageBitmap[PageBitmapIndex] == true)
continue;
this->LockPage((void *)(PageBitmapIndex * PAGE_SIZE));
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "RequestPage( )=%p~%p\n\r",
(void *)(PageBitmapIndex * PAGE_SIZE), __builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
if (this->SwapPage((void *)(PageBitmapIndex * PAGE_SIZE)))
{
this->LockPage((void *)(PageBitmapIndex * PAGE_SIZE));
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
error("Out of memory! (Free: %ldMB; Used: %ldMB; Reserved: %ldMB)", TO_MB(FreeMemory), TO_MB(UsedMemory), TO_MB(ReservedMemory));
CPU::Stop();
__builtin_unreachable();
}
void *Physical::RequestPages(size_t Count)
{
SmartLock(this->MemoryLock);
for (; PageBitmapIndex < PageBitmap.Size * 8; PageBitmapIndex++)
{
if (PageBitmap[PageBitmapIndex] == true)
continue;
for (uint64_t Index = PageBitmapIndex; Index < PageBitmap.Size * 8; Index++)
{
if (PageBitmap[Index] == true)
continue;
for (size_t i = 0; i < Count; i++)
{
if (PageBitmap[Index + i] == true)
goto NextPage;
}
this->LockPages((void *)(Index * PAGE_SIZE), Count);
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "RequestPages( %ld )=%p~%p\n\r",
Count,
(void *)(Index * PAGE_SIZE), __builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
return (void *)(Index * PAGE_SIZE);
NextPage:
Index += Count;
continue;
}
}
if (this->SwapPages((void *)(PageBitmapIndex * PAGE_SIZE), Count))
{
this->LockPages((void *)(PageBitmapIndex * PAGE_SIZE), Count);
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
error("Out of memory! (Free: %ldMB; Used: %ldMB; Reserved: %ldMB)", TO_MB(FreeMemory), TO_MB(UsedMemory), TO_MB(ReservedMemory));
CPU::Halt(true);
__builtin_unreachable();
}
void Physical::FreePage(void *Address)
{
SmartLock(this->MemoryLock);
if (unlikely(Address == nullptr))
{
warn("Null pointer passed to FreePage.");
return;
}
size_t Index = (size_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == false))
{
warn("Tried to free an already free page. (%p)", Address);
return;
}
if (PageBitmap.Set(Index, false))
{
FreeMemory += PAGE_SIZE;
UsedMemory -= PAGE_SIZE;
if (PageBitmapIndex > Index)
PageBitmapIndex = Index;
}
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "FreePage( %p )~%p\n\r",
Address,
__builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
}
void Physical::FreePages(void *Address, size_t Count)
{
if (unlikely(Address == nullptr || Count == 0))
{
warn("%s%s%s passed to FreePages.", Address == nullptr ? "Null pointer " : "", Address == nullptr && Count == 0 ? "and " : "", Count == 0 ? "Zero count" : "");
return;
}
#ifdef DEBUG
if (EnableExternalMemoryTracer)
{
char LockTmpStr[64];
strcpy_unsafe(LockTmpStr, __FUNCTION__);
strcat_unsafe(LockTmpStr, "_memTrk");
mExtTrkLock.TimeoutLock(LockTmpStr, 10000);
sprintf(mExtTrkLog, "!FreePages( %p %ld )~%p\n\r",
Address, Count,
__builtin_return_address(0));
UniversalAsynchronousReceiverTransmitter::UART mTrkUART = UniversalAsynchronousReceiverTransmitter::UART(UniversalAsynchronousReceiverTransmitter::COM3);
for (short i = 0; i < MEM_TRK_MAX_SIZE; i++)
{
if (mExtTrkLog[i] == '\r')
break;
mTrkUART.Write(mExtTrkLog[i]);
}
mExtTrkLock.Unlock();
}
#endif
for (size_t t = 0; t < Count; t++)
this->FreePage((void *)((uintptr_t)Address + (t * PAGE_SIZE)));
}
void Physical::LockPage(void *Address)
{
if (unlikely(Address == nullptr))
warn("Trying to lock null address.");
uintptr_t Index = (uintptr_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == true))
return;
if (PageBitmap.Set(Index, true))
{
FreeMemory -= PAGE_SIZE;
UsedMemory += PAGE_SIZE;
}
}
void Physical::LockPages(void *Address, size_t PageCount)
{
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to lock %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
for (size_t i = 0; i < PageCount; i++)
this->LockPage((void *)((uintptr_t)Address + (i * PAGE_SIZE)));
}
void Physical::ReservePage(void *Address)
{
if (unlikely(Address == nullptr))
warn("Trying to reserve null address.");
uintptr_t Index = (Address == NULL) ? 0 : (uintptr_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == true))
return;
if (PageBitmap.Set(Index, true))
{
FreeMemory -= PAGE_SIZE;
ReservedMemory += PAGE_SIZE;
}
}
void Physical::ReservePages(void *Address, size_t PageCount)
{
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to reserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
for (size_t t = 0; t < PageCount; t++)
{
uintptr_t Index = ((uintptr_t)Address + (t * PAGE_SIZE)) / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == true))
return;
if (PageBitmap.Set(Index, true))
{
FreeMemory -= PAGE_SIZE;
ReservedMemory += PAGE_SIZE;
}
}
}
void Physical::UnreservePage(void *Address)
{
if (unlikely(Address == nullptr))
warn("Trying to unreserve null address.");
uintptr_t Index = (Address == NULL) ? 0 : (uintptr_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == false))
return;
if (PageBitmap.Set(Index, false))
{
FreeMemory += PAGE_SIZE;
ReservedMemory -= PAGE_SIZE;
if (PageBitmapIndex > Index)
PageBitmapIndex = Index;
}
}
void Physical::UnreservePages(void *Address, size_t PageCount)
{
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to unreserve %s%s.", Address ? "null address" : "", PageCount ? "0 pages" : "");
for (size_t t = 0; t < PageCount; t++)
{
uintptr_t Index = ((uintptr_t)Address + (t * PAGE_SIZE)) / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == false))
return;
if (PageBitmap.Set(Index, false))
{
FreeMemory += PAGE_SIZE;
ReservedMemory -= PAGE_SIZE;
if (PageBitmapIndex > Index)
PageBitmapIndex = Index;
}
}
}
void Physical::Init(BootInfo *Info)
{
SmartLock(this->MemoryLock);
uint64_t MemorySize = Info->Memory.Size;
debug("Memory size: %lld bytes (%ld pages)", MemorySize, TO_PAGES(MemorySize));
TotalMemory = MemorySize;
FreeMemory = MemorySize;
size_t BitmapSize = (MemorySize / PAGE_SIZE) / 8 + 1;
void *LargestFreeMemorySegment = nullptr;
uint64_t LargestFreeMemorySegmentSize = 0;
for (uint64_t i = 0; i < Info->Memory.Entries; i++)
{
if (Info->Memory.Entry[i].Type == Usable)
{
if (Info->Memory.Entry[i].Length > LargestFreeMemorySegmentSize)
{
/* We don't want to use 0 as a memory address. */
if (Info->Memory.Entry[i].BaseAddress == 0x0)
{
debug("Ignoring memory segment at 0x0");
continue;
}
if (Info->Memory.Entry[i].Length > BitmapSize + 0x1000)
{
LargestFreeMemorySegment = (void *)Info->Memory.Entry[i].BaseAddress;
LargestFreeMemorySegmentSize = Info->Memory.Entry[i].Length;
#define ROUND_UP(N, S) ((((N) + (S)-1) / (S)) * (S))
if (LargestFreeMemorySegment >= Info->Kernel.PhysicalBase &&
LargestFreeMemorySegment <= (void *)((uintptr_t)Info->Kernel.PhysicalBase + Info->Kernel.Size))
{
debug("Kernel range: %#lx-%#lx", Info->Kernel.PhysicalBase, (void *)((uintptr_t)Info->Kernel.PhysicalBase + Info->Kernel.Size));
void *NewLargestFreeMemorySegment = (void *)((uintptr_t)Info->Kernel.PhysicalBase + Info->Kernel.Size);
void *RoundNewLargestFreeMemorySegment = (void *)ROUND_UP((uintptr_t)NewLargestFreeMemorySegment, PAGE_SIZE);
RoundNewLargestFreeMemorySegment = (void *)((uintptr_t)RoundNewLargestFreeMemorySegment + PAGE_SIZE); /* Leave a page between the kernel and the bitmap */
debug("Rounding %p to %p", NewLargestFreeMemorySegment, RoundNewLargestFreeMemorySegment);
info("Memory bitmap's memory segment is in the kernel, moving it to %p", RoundNewLargestFreeMemorySegment);
LargestFreeMemorySegmentSize = (uintptr_t)LargestFreeMemorySegmentSize - ((uintptr_t)RoundNewLargestFreeMemorySegment - (uintptr_t)LargestFreeMemorySegment);
LargestFreeMemorySegment = RoundNewLargestFreeMemorySegment;
}
#undef ROUND_UP
if (LargestFreeMemorySegmentSize < BitmapSize + 0x1000)
{
trace("Largest free memory segment is too small (%lld bytes), skipping...",
LargestFreeMemorySegmentSize);
continue;
}
debug("Found a memory segment of %lld bytes (%lldMB) at %llp (out segment is %lld bytes (%lldKB)))",
LargestFreeMemorySegmentSize,
TO_MB(LargestFreeMemorySegmentSize),
LargestFreeMemorySegment,
BitmapSize,
TO_KB(BitmapSize));
break;
}
// LargestFreeMemorySegment = (void *)Info->Memory.Entry[i].BaseAddress;
// LargestFreeMemorySegmentSize = Info->Memory.Entry[i].Length;
// debug("Largest free memory segment: %llp (%lldMB)",
// (void *)Info->Memory.Entry[i].BaseAddress,
// TO_MB(Info->Memory.Entry[i].Length));
}
}
}
if (LargestFreeMemorySegment == nullptr)
{
error("No free memory found!");
CPU::Stop();
}
/* TODO: Read swap config and make the configure the bitmap size correctly */
debug("Initializing Bitmap at %llp-%llp (%lld Bytes)",
LargestFreeMemorySegment,
(void *)((uintptr_t)LargestFreeMemorySegment + BitmapSize),
BitmapSize);
PageBitmap.Size = BitmapSize;
PageBitmap.Buffer = (uint8_t *)LargestFreeMemorySegment;
for (size_t i = 0; i < BitmapSize; i++)
*(uint8_t *)(PageBitmap.Buffer + i) = 0;
debug("Reserving pages...");
this->ReservePages(0, TO_PAGES(Info->Memory.Size));
debug("Unreserving usable pages...");
for (uint64_t i = 0; i < Info->Memory.Entries; i++)
{
if (Info->Memory.Entry[i].Type == Usable && Info->Memory.Entry[i].BaseAddress != 0x0)
this->UnreservePages(Info->Memory.Entry[i].BaseAddress, TO_PAGES(Info->Memory.Entry[i].Length));
}
debug("Reserving pages for SMP...");
this->ReservePage((void *)0x0); /* Trampoline stack, gdt, idt, etc... */
this->ReservePages((void *)0x2000, 4); /* TRAMPOLINE_START */
debug("Reserving bitmap region %#lx-%#lx...", PageBitmap.Buffer, (void *)((uintptr_t)PageBitmap.Buffer + PageBitmap.Size));
this->ReservePages(PageBitmap.Buffer, TO_PAGES(PageBitmap.Size));
// debug("Reserving page table...");
// this->ReservePages(BootPageTable, TO_PAGES(0x10000)); << in the bootstrap region
debug("Reserving kernel bootstrap region %#lx-%#lx...", &_bootstrap_start, &_bootstrap_end);
this->ReservePages(&_bootstrap_start, TO_PAGES((uintptr_t)&_bootstrap_end - (uintptr_t)&_bootstrap_start));
void *KernelPhysicalStart = (void *)(((uintptr_t)&_kernel_start - KERNEL_VMA_OFFSET));
void *KernelPhysicalEnd = (void *)(((uintptr_t)&_kernel_end - KERNEL_VMA_OFFSET));
debug("Reserving kernel region %#lx-%#lx...", KernelPhysicalStart, KernelPhysicalEnd);
this->ReservePages((void *)KernelPhysicalStart, TO_PAGES((uintptr_t)&_kernel_end - (uintptr_t)&_kernel_start));
ACPI::ACPI::ACPIHeader *hdr = nullptr;
bool XSDT = false;
if (Info->RSDP->Revision >= 2 && Info->RSDP->XSDTAddress)
{
hdr = (ACPI::ACPI::ACPIHeader *)(Info->RSDP->XSDTAddress);
XSDT = true;
}
else
{
hdr = (ACPI::ACPI::ACPIHeader *)(uintptr_t)Info->RSDP->RSDTAddress;
}
debug("Reserving RSDT...");
this->ReservePages((void*)Info->RSDP, TO_PAGES(sizeof(BootInfo::RSDPInfo)));
debug("Reserving ACPI tables...");
uint64_t TableSize = ((hdr->Length - sizeof(ACPI::ACPI::ACPIHeader)) / (XSDT ? 8 : 4));
debug("Table size: %lld", TableSize);
for (uint64_t t = 0; t < TableSize; t++)
{
// TODO: Should I be concerned about unaligned memory access?
ACPI::ACPI::ACPIHeader *SDTHdr = nullptr;
if (XSDT)
SDTHdr = (ACPI::ACPI::ACPIHeader *)(*(uint64_t *)((uint64_t)hdr + sizeof(ACPI::ACPI::ACPIHeader) + (t * 8)));
else
SDTHdr = (ACPI::ACPI::ACPIHeader *)(*(uint32_t *)((uint64_t)hdr + sizeof(ACPI::ACPI::ACPIHeader) + (t * 4)));
this->ReservePages(SDTHdr, TO_PAGES(SDTHdr->Length));
}
debug("Reserving kernel modules...");
for (uint64_t i = 0; i < MAX_MODULES; i++)
{
if (Info->Modules[i].Address == 0x0)
continue;
debug("Reserving module %s (%#lx-%#lx)...", Info->Modules[i].CommandLine,
Info->Modules[i].Address, (void *)((uintptr_t)Info->Modules[i].Address + Info->Modules[i].Size));
this->ReservePages((void *)Info->Modules[i].Address, TO_PAGES(Info->Modules[i].Size));
}
}
Physical::Physical() {}
Physical::~Physical() {}
}

View File

@ -0,0 +1,110 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
#include <debug.h>
namespace Memory
{
StackGuard::StackGuard(bool User, PageTable4 *Table)
{
this->UserMode = User;
this->Table = Table;
if (this->UserMode)
{
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE + 1));
memset(AllocatedStack, 0, USER_STACK_SIZE);
debug("AllocatedStack: %p", AllocatedStack);
Virtual va = Virtual(Table);
for (size_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
{
va.Map((void *)(USER_STACK_BASE + (i * PAGE_SIZE)),
(void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE)),
PTFlag::RW | PTFlag::US);
debug("Mapped %p to %p", (void *)(USER_STACK_BASE + (i * PAGE_SIZE)),
(void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE)));
}
this->StackBottom = (void *)USER_STACK_BASE;
this->StackTop = (void *)(USER_STACK_BASE + USER_STACK_SIZE);
this->StackPhyiscalBottom = AllocatedStack;
this->StackPhyiscalTop = (void *)((uintptr_t)AllocatedStack + USER_STACK_SIZE);
this->Size = USER_STACK_SIZE;
}
else
{
this->StackBottom = KernelAllocator.RequestPages(TO_PAGES(STACK_SIZE + 1));
memset(this->StackBottom, 0, STACK_SIZE);
debug("StackBottom: %p", this->StackBottom);
this->StackTop = (void *)((uintptr_t)this->StackBottom + STACK_SIZE);
this->StackPhyiscalBottom = this->StackBottom;
this->StackPhyiscalTop = this->StackTop;
this->Size = STACK_SIZE;
}
debug("Allocated stack at %p", this->StackBottom);
}
StackGuard::~StackGuard()
{
fixme("Temporarily disabled stack guard deallocation");
// KernelAllocator.FreePages(this->StackBottom, TO_PAGES(this->Size + 1));
// debug("Freed stack at %p", this->StackBottom);
}
bool StackGuard::Expand(uintptr_t FaultAddress)
{
if (this->UserMode)
{
if (FaultAddress < (uintptr_t)this->StackBottom - USER_STACK_SIZE ||
FaultAddress > (uintptr_t)this->StackTop)
{
return false; /* It's not about the stack. */
}
else
{
void *AllocatedStack = KernelAllocator.RequestPages(TO_PAGES(USER_STACK_SIZE + 1));
debug("AllocatedStack: %p", AllocatedStack);
memset(AllocatedStack, 0, USER_STACK_SIZE);
Virtual va = Virtual(this->Table);
for (uintptr_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
{
va.Map((void *)((uintptr_t)this->StackBottom - (i * PAGE_SIZE)), (void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE)), PTFlag::RW | PTFlag::US);
debug("Mapped %p to %p", (void *)((uintptr_t)this->StackBottom - (i * PAGE_SIZE)), (void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE)));
}
this->StackBottom = (void *)((uintptr_t)this->StackBottom - USER_STACK_SIZE);
this->Size += USER_STACK_SIZE;
info("Stack expanded to %p", this->StackBottom);
return true;
}
}
else
{
fixme("Not implemented and probably not needed");
return false;
}
}
}

View File

@ -0,0 +1,304 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
#include <convert.h>
#include <debug.h>
namespace Memory
{
bool Virtual::Check(void *VirtualAddress, PTFlag Flag, MapType Type)
{
// 0x1000 aligned
uintptr_t Address = (uintptr_t)VirtualAddress;
Address &= 0xFFFFFFFFFFFFF000;
PageMapIndexer Index = PageMapIndexer(Address);
PageMapLevel4 PML4 = this->Table->Entries[Index.PMLIndex];
PageDirectoryPointerTableEntryPtr *PDPTE = nullptr;
PageDirectoryEntryPtr *PDE = nullptr;
PageTableEntryPtr *PTE = nullptr;
if ((PML4.raw & Flag) > 0)
{
PDPTE = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4.GetAddress() << 12);
if (PDPTE)
{
if ((PDPTE->Entries[Index.PDPTEIndex].Present))
{
if (Type == MapType::OneGB && PDPTE->Entries[Index.PDPTEIndex].PageSize)
return true;
PDE = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE->Entries[Index.PDPTEIndex].GetAddress() << 12);
if (PDE)
{
if (Type == MapType::TwoMB && PDE->Entries[Index.PDEIndex].PageSize)
return true;
if ((PDE->Entries[Index.PDEIndex].Present))
{
PTE = (PageTableEntryPtr *)((uintptr_t)PDE->Entries[Index.PDEIndex].GetAddress() << 12);
if (PTE)
{
if ((PTE->Entries[Index.PTEIndex].Present))
return true;
}
}
}
}
}
}
return false;
}
void *Virtual::GetPhysical(void *VirtualAddress)
{
// 0x1000 aligned
uintptr_t Address = (uintptr_t)VirtualAddress;
Address &= 0xFFFFFFFFFFFFF000;
PageMapIndexer Index = PageMapIndexer(Address);
PageMapLevel4 PML4 = this->Table->Entries[Index.PMLIndex];
PageDirectoryPointerTableEntryPtr *PDPTE = nullptr;
PageDirectoryEntryPtr *PDE = nullptr;
PageTableEntryPtr *PTE = nullptr;
if (PML4.Present)
{
PDPTE = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4.GetAddress() << 12);
if (PDPTE)
{
if (PDPTE->Entries[Index.PDPTEIndex].Present)
{
if (PDPTE->Entries[Index.PDPTEIndex].PageSize)
return (void *)((uintptr_t)PDPTE->Entries[Index.PDPTEIndex].GetAddress() << 12);
PDE = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE->Entries[Index.PDPTEIndex].GetAddress() << 12);
if (PDE)
{
if (PDE->Entries[Index.PDEIndex].Present)
{
if (PDE->Entries[Index.PDEIndex].PageSize)
return (void *)((uintptr_t)PDE->Entries[Index.PDEIndex].GetAddress() << 12);
PTE = (PageTableEntryPtr *)((uintptr_t)PDE->Entries[Index.PDEIndex].GetAddress() << 12);
if (PTE)
{
if (PTE->Entries[Index.PTEIndex].Present)
return (void *)((uintptr_t)PTE->Entries[Index.PTEIndex].GetAddress() << 12);
}
}
}
}
}
}
return nullptr;
}
void Virtual::Map(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags, MapType Type)
{
SmartLock(this->MemoryLock);
if (unlikely(!this->Table))
{
error("No page table");
return;
}
Flags |= PTFlag::P;
PageMapIndexer Index = PageMapIndexer((uintptr_t)VirtualAddress);
// Clear any flags that are not 1 << 0 (Present) - 1 << 5 (Accessed) because rest are for page table entries only
uint64_t DirectoryFlags = Flags & 0x3F;
PageMapLevel4 *PML4 = &this->Table->Entries[Index.PMLIndex];
PageDirectoryPointerTableEntryPtr *PDPTEPtr = nullptr;
if (!PML4->Present)
{
PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageDirectoryPointerTableEntryPtr) + 1));
memset(PDPTEPtr, 0, sizeof(PageDirectoryPointerTableEntryPtr));
PML4->Present = true;
PML4->SetAddress((uintptr_t)PDPTEPtr >> 12);
}
else
PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)(PML4->GetAddress() << 12);
PML4->raw |= DirectoryFlags;
PageDirectoryPointerTableEntry *PDPTE = &PDPTEPtr->Entries[Index.PDPTEIndex];
if (Type == MapType::OneGB)
{
PDPTE->raw |= Flags;
PDPTE->PageSize = true;
PDPTE->SetAddress((uintptr_t)PhysicalAddress >> 12);
debug("Mapped 1GB page at %p to %p", VirtualAddress, PhysicalAddress);
return;
}
PageDirectoryEntryPtr *PDEPtr = nullptr;
if (!PDPTE->Present)
{
PDEPtr = (PageDirectoryEntryPtr *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageDirectoryEntryPtr) + 1));
memset(PDEPtr, 0, sizeof(PageDirectoryEntryPtr));
PDPTE->Present = true;
PDPTE->SetAddress((uintptr_t)PDEPtr >> 12);
}
else
PDEPtr = (PageDirectoryEntryPtr *)(PDPTE->GetAddress() << 12);
PDPTE->raw |= DirectoryFlags;
PageDirectoryEntry *PDE = &PDEPtr->Entries[Index.PDEIndex];
if (Type == MapType::TwoMB)
{
PDE->raw |= Flags;
PDE->PageSize = true;
PDE->SetAddress((uintptr_t)PhysicalAddress >> 12);
debug("Mapped 2MB page at %p to %p", VirtualAddress, PhysicalAddress);
return;
}
PageTableEntryPtr *PTEPtr = nullptr;
if (!PDE->Present)
{
PTEPtr = (PageTableEntryPtr *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageTableEntryPtr) + 1));
memset(PTEPtr, 0, sizeof(PageTableEntryPtr));
PDE->Present = true;
PDE->SetAddress((uintptr_t)PTEPtr >> 12);
}
else
PTEPtr = (PageTableEntryPtr *)(PDE->GetAddress() << 12);
PDE->raw |= DirectoryFlags;
PageTableEntry *PTE = &PTEPtr->Entries[Index.PTEIndex];
PTE->Present = true;
PTE->raw |= Flags;
PTE->SetAddress((uintptr_t)PhysicalAddress >> 12);
#if defined(a64)
CPU::x64::invlpg(VirtualAddress);
#elif defined(a32)
CPU::x32::invlpg(VirtualAddress);
#elif defined(aa64)
asmv("dsb sy");
asmv("tlbi vae1is, %0"
:
: "r"(VirtualAddress)
: "memory");
asmv("dsb sy");
asmv("isb");
#endif
#ifdef DEBUG
/* https://stackoverflow.com/a/3208376/9352057 */
#define BYTE_TO_BINARY_PATTERN "%c%c%c%c%c%c%c%c"
#define BYTE_TO_BINARY(byte) \
(byte & 0x80 ? '1' : '0'), \
(byte & 0x40 ? '1' : '0'), \
(byte & 0x20 ? '1' : '0'), \
(byte & 0x10 ? '1' : '0'), \
(byte & 0x08 ? '1' : '0'), \
(byte & 0x04 ? '1' : '0'), \
(byte & 0x02 ? '1' : '0'), \
(byte & 0x01 ? '1' : '0')
if (!this->Check(VirtualAddress, (PTFlag)Flags, Type)) // quick workaround just to see where it fails
warn("Failed to map v:%#lx p:%#lx with flags: " BYTE_TO_BINARY_PATTERN, VirtualAddress, PhysicalAddress, BYTE_TO_BINARY(Flags));
#endif
}
void Virtual::Unmap(void *VirtualAddress, MapType Type)
{
SmartLock(this->MemoryLock);
if (!this->Table)
{
error("No page table");
return;
}
PageMapIndexer Index = PageMapIndexer((uintptr_t)VirtualAddress);
PageMapLevel4 *PML4 = &this->Table->Entries[Index.PMLIndex];
if (!PML4->Present)
{
error("Page %#lx not present", PML4->GetAddress());
return;
}
PageDirectoryPointerTableEntryPtr *PDPTEPtr = (PageDirectoryPointerTableEntryPtr *)((uintptr_t)PML4->Address << 12);
PageDirectoryPointerTableEntry *PDPTE = &PDPTEPtr->Entries[Index.PDPTEIndex];
if (!PDPTE->Present)
{
error("Page %#lx not present", PDPTE->GetAddress());
return;
}
if (Type == MapType::OneGB && PDPTE->PageSize)
{
PDPTE->Present = false;
return;
}
PageDirectoryEntryPtr *PDEPtr = (PageDirectoryEntryPtr *)((uintptr_t)PDPTE->Address << 12);
PageDirectoryEntry *PDE = &PDEPtr->Entries[Index.PDEIndex];
if (!PDE->Present)
{
error("Page %#lx not present", PDE->GetAddress());
return;
}
if (Type == MapType::TwoMB && PDE->PageSize)
{
PDE->Present = false;
return;
}
PageTableEntryPtr *PTEPtr = (PageTableEntryPtr *)((uintptr_t)PDE->Address << 12);
PageTableEntry PTE = PTEPtr->Entries[Index.PTEIndex];
if (!PTE.Present)
{
error("Page %#lx not present", PTE.GetAddress());
return;
}
PTE.Present = false;
PTEPtr->Entries[Index.PTEIndex] = PTE;
#if defined(a64)
CPU::x64::invlpg(VirtualAddress);
#elif defined(a32)
CPU::x32::invlpg(VirtualAddress);
#elif defined(aa64)
asmv("dsb sy");
asmv("tlbi vae1is, %0"
:
: "r"(VirtualAddress)
: "memory");
asmv("dsb sy");
asmv("isb");
#endif
}
Virtual::Virtual(PageTable4 *Table)
{
if (Table)
this->Table = Table;
else
this->Table = (PageTable4 *)CPU::PageTable();
}
Virtual::~Virtual() {}
}