Merge remote-tracking branch 'Kernel/master'

This commit is contained in:
EnderIce2
2024-11-20 05:00:33 +02:00
468 changed files with 112800 additions and 1 deletions

104
Kernel/core/memory/brk.cpp Normal file
View File

@ -0,0 +1,104 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory/brk.hpp>
#include <memory/virtual.hpp>
#include <memory/vma.hpp>
#include <assert.h>
#include <errno.h>
#include <debug.h>
namespace Memory
{
void *ProgramBreak::brk(void *Address)
{
if (HeapStart == 0x0 || Break == 0x0)
{
error("HeapStart or Break is 0x0");
return (void *)-EAGAIN;
}
/* Get the current program break. */
if (Address == nullptr)
return (void *)Break;
/* Check if the address is valid. */
if ((uintptr_t)Address < HeapStart)
{
debug("Address %#lx is less than HeapStart %#lx", Address, HeapStart);
return (void *)-ENOMEM;
}
Virtual vmm(this->Table);
if ((uintptr_t)Address > Break)
{
/* Allocate more memory. */
ssize_t Pages = TO_PAGES(uintptr_t(Address) - Break);
void *Allocated = vma->RequestPages(Pages);
if (Allocated == nullptr)
return (void *)-ENOMEM;
/* Map the allocated pages. */
for (ssize_t i = 0; i < Pages; i++)
{
void *VirtAddr = (void *)(Break + (i * PAGE_SIZE));
void *PhysAddr = (void *)(uintptr_t(Allocated) + (i * PAGE_SIZE));
debug("Mapping %#lx to %#lx", VirtAddr, PhysAddr);
vmm.Map(VirtAddr, PhysAddr, RW | US);
}
Break = ROUND_UP(uintptr_t(Address), PAGE_SIZE);
debug("Round up %#lx to %#lx", Address, Break);
return Address;
}
/* Free memory. */
ssize_t Pages = TO_PAGES(Break - uintptr_t(Address));
vma->FreePages((void *)Break, Pages);
/* Unmap the freed pages. */
for (ssize_t i = 0; i < Pages; i++)
{
uint64_t Page = Break - (i * 0x1000);
vmm.Remap((void *)Page, (void *)Page, RW);
debug("Unmapping %#lx", Page);
}
Break = (uint64_t)Address;
return (void *)Break;
}
ProgramBreak::ProgramBreak(PageTable *Table, VirtualMemoryArea *vma)
{
assert(Table != nullptr);
assert(vma != nullptr);
debug("+ %#lx", this);
this->Table = Table;
this->vma = vma;
}
ProgramBreak::~ProgramBreak()
{
debug("- %#lx", this);
/* Do nothing because VirtualMemoryArea
will be destroyed later. */
}
}

View File

@ -0,0 +1,212 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
#include <acpi.hpp>
#include <debug.h>
#include <elf.h>
#ifdef DEBUG
#include <uart.hpp>
#endif
#include "../../kernel.h"
namespace Memory
{
__no_sanitize("alignment") void Physical::FindBitmapRegion(uintptr_t &BitmapAddress,
size_t &BitmapAddressSize)
{
size_t BitmapSize = (size_t)(bInfo.Memory.Size / PAGE_SIZE) / 8 + 1;
uintptr_t KernelStart = (uintptr_t)bInfo.Kernel.PhysicalBase;
uintptr_t KernelEnd = (uintptr_t)bInfo.Kernel.PhysicalBase + bInfo.Kernel.Size;
uintptr_t SectionsStart = 0x0;
uintptr_t SectionsEnd = 0x0;
uintptr_t Symbols = 0x0;
uintptr_t StringAddress = 0x0;
size_t SymbolSize = 0;
size_t StringSize = 0;
uintptr_t RSDPStart = 0x0;
uintptr_t RSDPEnd = 0x0;
if (bInfo.Kernel.Symbols.Num &&
bInfo.Kernel.Symbols.EntSize &&
bInfo.Kernel.Symbols.Shndx)
{
char *sections = r_cst(char *, bInfo.Kernel.Symbols.Sections);
SectionsStart = (uintptr_t)sections;
SectionsEnd = (uintptr_t)sections + bInfo.Kernel.Symbols.EntSize *
bInfo.Kernel.Symbols.Num;
for (size_t i = 0; i < bInfo.Kernel.Symbols.Num; ++i)
{
Elf_Shdr *sym = (Elf_Shdr *)&sections[bInfo.Kernel.Symbols.EntSize * i];
Elf_Shdr *str = (Elf_Shdr *)&sections[bInfo.Kernel.Symbols.EntSize *
sym->sh_link];
if (sym->sh_type == SHT_SYMTAB &&
str->sh_type == SHT_STRTAB)
{
Symbols = (uintptr_t)sym->sh_addr;
StringAddress = (uintptr_t)str->sh_addr;
SymbolSize = (size_t)sym->sh_size;
StringSize = (size_t)str->sh_size;
break;
}
}
}
#if defined(a86)
if (bInfo.RSDP)
{
RSDPStart = (uintptr_t)bInfo.RSDP;
RSDPEnd = (uintptr_t)bInfo.RSDP + sizeof(BootInfo::RSDPInfo);
#ifdef DEBUG
ACPI::ACPI::ACPIHeader *ACPIPtr;
bool XSDT = false;
if (bInfo.RSDP->Revision >= 2 && bInfo.RSDP->XSDTAddress)
{
ACPIPtr = (ACPI::ACPI::ACPIHeader *)bInfo.RSDP->XSDTAddress;
XSDT = true;
}
else
ACPIPtr = (ACPI::ACPI::ACPIHeader *)(uintptr_t)bInfo.RSDP->RSDTAddress;
if (Memory::Virtual().Check(ACPIPtr))
{
size_t TableSize = ((ACPIPtr->Length - sizeof(ACPI::ACPI::ACPIHeader)) /
(XSDT ? 8 : 4));
debug("There are %d ACPI tables", TableSize);
}
#endif
}
#elif defined(aa64)
#endif
for (uint64_t i = 0; i < bInfo.Memory.Entries; i++)
{
if (bInfo.Memory.Entry[i].Type == Usable)
{
uintptr_t RegionAddress = (uintptr_t)bInfo.Memory.Entry[i].BaseAddress;
uintptr_t RegionSize = bInfo.Memory.Entry[i].Length;
/* We don't want to use the first 1MB of memory. */
if (RegionAddress <= 0xFFFFF)
continue;
if ((BitmapSize + 0x100) > RegionSize)
{
debug("Region %p-%p (%d MiB) is too small for bitmap.",
(void *)RegionAddress,
(void *)(RegionAddress + RegionSize),
TO_MiB(RegionSize));
continue;
}
BitmapAddress = RegionAddress;
BitmapAddressSize = RegionSize;
struct AddrRange
{
uintptr_t Start;
uintptr_t End;
};
auto SortAddresses = [](AddrRange *Array, size_t n)
{
size_t MinimumIndex;
for (size_t i = 0; i < n - 1; i++)
{
MinimumIndex = i;
for (size_t j = i + 1; j < n; j++)
if (Array[j].Start < Array[MinimumIndex].Start)
MinimumIndex = j;
AddrRange tmp = Array[MinimumIndex];
Array[MinimumIndex] = Array[i];
Array[i] = tmp;
}
};
AddrRange PtrArray[] =
{
{KernelStart,
KernelEnd},
{SectionsStart,
SectionsEnd},
{Symbols,
Symbols + SymbolSize},
{StringAddress,
StringAddress + StringSize},
{RSDPStart,
RSDPEnd},
{(uintptr_t)bInfo.Kernel.FileBase,
(uintptr_t)bInfo.Kernel.FileBase + bInfo.Kernel.Size},
{(uintptr_t)bInfo.Modules[0].Address,
(uintptr_t)bInfo.Modules[0].Address + bInfo.Modules[0].Size},
{(uintptr_t)bInfo.Modules[1].Address,
(uintptr_t)bInfo.Modules[1].Address + bInfo.Modules[1].Size},
{(uintptr_t)bInfo.Modules[2].Address,
(uintptr_t)bInfo.Modules[2].Address + bInfo.Modules[2].Size},
{(uintptr_t)bInfo.Modules[3].Address,
(uintptr_t)bInfo.Modules[3].Address + bInfo.Modules[3].Size},
/* MAX_MODULES == 4 */
};
SortAddresses(PtrArray, sizeof(PtrArray) / sizeof(PtrArray[0]));
for (size_t i = 0; i < sizeof(PtrArray) / sizeof(PtrArray[0]); i++)
{
if (PtrArray[i].Start == 0x0)
continue;
uintptr_t Start = PtrArray[i].Start;
uintptr_t End = PtrArray[i].End;
debug("%#lx - %#lx", Start, End);
if (RegionAddress >= Start &&
End <= (RegionAddress + RegionSize))
{
BitmapAddress = End;
BitmapAddressSize = RegionSize - (End - RegionAddress);
}
}
if ((BitmapSize + 0x100) > BitmapAddressSize)
{
debug("Region %p-%p (%d MiB) is too small for bitmap.",
(void *)BitmapAddress,
(void *)(BitmapAddress + BitmapAddressSize),
TO_MiB(BitmapAddressSize));
continue;
}
debug("Found free memory for bitmap: %p (%d MiB)",
(void *)BitmapAddress,
TO_MiB(BitmapAddressSize));
break;
}
}
}
}

View File

@ -0,0 +1,154 @@
# Xalloc
Xalloc is a custom memory allocator designed for hobby operating systems.
Written in C++ and provides a simple and efficient way to manage memory in your hobby OS.
#### ❗ This project is still in development and is not ready for use in production environments. ❗
---
## Features
- **Simple API** - Simple API for allocating and freeing memory.
- **Efficient** - Uses a free-list to manage memory and is designed to be fast.
- **No dependencies** - No dependencies and is designed to be easy to integrate into your OS.
---
## Getting Started
### Implementing missing functions
You will need to implement the following functions in your OS:
##### Wrapper.cpp
```cpp
extern "C" void *Xalloc_REQUEST_PAGES(Xsize_t Pages)
{
// ...
}
extern "C" void Xalloc_FREE_PAGES(void *Address, Xsize_t Pages)
{
// ...
}
/* Mandatory only if Xalloc_MapPages is set to true */
extern "C" void Xalloc_MAP_MEMORY(void *VirtualAddress, void *PhysicalAddress, Xsize_t Flags)
{
// ...
}
/* Mandatory only if Xalloc_MapPages is set to true */
extern "C" void Xalloc_UNMAP_MEMORY(void *VirtualAddress)
{
// ...
}
```
##### Xalloc.hpp
```cpp
#define Xalloc_StopOnFail <bool> /* Infinite loop on failure */
#define Xalloc_MapPages <bool> /* Map pages on allocation */
#define Xalloc_PAGE_SIZE <page size> /* <-- Replace with your page size */
#define Xalloc_trace(m, ...) <trace function>
#define Xalloc_warn(m, ...) <warning function>
#define Xalloc_err(m, ...) <error function>
#define XallocV1_def <define a lock> /* eg. std::mutex Xalloc_lock; */
#define XallocV1_lock <lock function>
#define XallocV1_unlock <unlock function>
/* Same as above */
#define XallocV2_def <define a lock>
#define XallocV2_lock <lock function>
#define XallocV2_unlock <unlock function>
```
### Typical usage
```cpp
#include "Xalloc.hpp"
Xalloc::V1 *XallocV1Allocator = nullptr;
int main()
{
/* Virtual Base User SMAP */
XallocV1Allocator = new Xalloc::V1((void *)0xFFFFA00000000000, false, false);
void *p = XallocV1Allocator->malloc(1234);
/* ... */
XallocV1Allocator->free(p);
delete XallocV1Allocator;
return 0;
}
```
or
```cpp
#include "Xalloc.hpp"
int main()
{
/* Virtual Base User SMAP */
Xalloc::V1 XallocV1Allocator((void *)0xFFFFA00000000000, false, false);
void *p = XallocV1Allocator.malloc(1234);
/* ... */
XallocV1Allocator.free(p);
return 0;
}
```
---
## API
### Xalloc::V1
```cpp
void *malloc(Xsize_t Size);
```
Allocates a block of memory of size `Size` bytes.
If `Size` is 0, then `nullptr` is returned.
- `Size` - The size of the block to allocate in bytes.
<br><br>
```cpp
void free(void *Address);
```
Frees the memory block pointed to by `Address`.
If `Address` is `nullptr`, then no operation is performed.
- `Address` - The address of the memory block to free.
<br><br>
```cpp
void *calloc(Xsize_t NumberOfBlocks, Xsize_t Size);
```
Allocates a block of memory for an array of `NumberOfBlocks` elements, each of them `Size` bytes long.
If `NumberOfBlocks` or `Size` is 0, then `nullptr` is returned.
- `NumberOfBlocks` - The number of elements to allocate.
- `Size` - The size of each element in bytes.
<br><br>
```cpp
void *realloc(void *Address, Xsize_t Size);
```
Changes the size of the memory block pointed to by `Address` to `Size` bytes.
If `Address` is `nullptr`, then the call is equivalent to `malloc(Size)`.
If `Size` is equal to zero, and `Address` is not `nullptr`, then the call is equivalent to `free(Address)`.
- `Address` - The address of the memory block to resize.
- `Size` - The new size of the memory block in bytes.
---
## To-do
- [ ] Multiple free-lists for different block sizes

View File

@ -0,0 +1,40 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include "Xalloc.hpp"
#include <memory.hpp>
extern "C" void *Xalloc_REQUEST_PAGES(Xsize_t Pages)
{
return KernelAllocator.RequestPages(Pages);
}
extern "C" void Xalloc_FREE_PAGES(void *Address, Xsize_t Pages)
{
KernelAllocator.FreePages(Address, Pages);
}
extern "C" void Xalloc_MAP_MEMORY(void *VirtualAddress, void *PhysicalAddress, Xsize_t Flags)
{
Memory::Virtual(KernelPageTable).Map(VirtualAddress, PhysicalAddress, Flags);
}
extern "C" void Xalloc_UNMAP_MEMORY(void *VirtualAddress)
{
Memory::Virtual(KernelPageTable).Unmap(VirtualAddress);
}

View File

@ -0,0 +1,236 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef __FENNIX_KERNEL_Xalloc_H__
#define __FENNIX_KERNEL_Xalloc_H__
#include <memory.hpp>
#include <lock.hpp>
#include <debug.h>
typedef __UINT8_TYPE__ Xuint8_t;
typedef __SIZE_TYPE__ Xsize_t;
typedef __UINTPTR_TYPE__ Xuintptr_t;
#define Xalloc_StopOnFail true
#define Xalloc_MapPages false
#define Xalloc_PAGE_SIZE PAGE_SIZE
#define Xalloc_trace(m, ...) trace(m, ##__VA_ARGS__)
#define Xalloc_warn(m, ...) warn(m, ##__VA_ARGS__)
#define Xalloc_err(m, ...) error(m, ##__VA_ARGS__)
#define XallocV1_def NewLock(XallocV1Lock)
#define XallocV1_lock XallocV1Lock.Lock(__FUNCTION__)
#define XallocV1_unlock XallocV1Lock.Unlock()
#define XallocV2_def NewLock(XallocV2Lock)
#define XallocV2_lock XallocV2Lock.Lock(__FUNCTION__)
#define XallocV2_unlock XallocV2Lock.Unlock()
namespace Xalloc
{
class V1
{
private:
void *BaseVirtualAddress = nullptr;
void *FirstBlock = nullptr;
void *LastBlock = nullptr;
bool UserMapping = false;
bool SMAPUsed = false;
public:
/** @brief Execute "stac" instruction if the kernel has SMAP enabled */
void Xstac();
/** @brief Execute "clac" instruction if the kernel has SMAP enabled */
void Xclac();
/**
* @brief Arrange the blocks to optimize the memory usage
* The allocator is not arranged by default
* to avoid performance issues.
* This function will defragment the memory
* and free the unused blocks.
*
* You should call this function when the
* kernel is idle or when is not using
* the allocator.
*/
void Arrange();
/**
* @brief Allocate a new memory block
*
* @param Size Size of the block to allocate.
* @return void* Pointer to the allocated block.
*/
void *malloc(Xsize_t Size);
/**
* @brief Free a previously allocated block
*
* @param Address Address of the block to free.
*/
void free(void *Address);
/**
* @brief Allocate a new memory block
*
* @param NumberOfBlocks Number of blocks to allocate.
* @param Size Size of the block to allocate.
* @return void* Pointer to the allocated block.
*/
void *calloc(Xsize_t NumberOfBlocks, Xsize_t Size);
/**
* @brief Reallocate a previously allocated block
*
* @param Address Address of the block to reallocate.
* @param Size New size of the block.
* @return void* Pointer to the reallocated block.
*/
void *realloc(void *Address, Xsize_t Size);
/**
* @brief Construct a new Allocator object
*
* @param BaseVirtualAddress Virtual address to map the pages.
* @param UserMode Map the new pages with USER flag?
* @param SMAPEnabled Does the kernel has Supervisor Mode Access Prevention enabled?
*/
V1(void *BaseVirtualAddress, bool UserMode, bool SMAPEnabled);
/**
* @brief Destroy the Allocator object
*
*/
~V1();
};
class V2
{
private:
class Block
{
public:
int Sanity = 0xA110C;
Block *Next = nullptr;
bool IsFree = true;
V2 *ctx = nullptr;
Xuint8_t *Data = nullptr;
Xsize_t DataSize = 0;
void Check();
Block(Xsize_t Size, V2 *ctx);
~Block();
void *operator new(Xsize_t);
void operator delete(void *Address);
} __attribute__((packed, aligned((16))));
/* The base address of the virtual memory */
Xuintptr_t BaseVirtualAddress = 0x0;
/* The size of the heap */
Xsize_t HeapSize = 0x0;
/* The used size of the heap */
Xsize_t HeapUsed = 0x0;
Block *FirstBlock = nullptr;
Xuint8_t *AllocateHeap(Xsize_t Size);
void FreeHeap(Xuint8_t *At, Xsize_t Size);
Xsize_t Align(Xsize_t Size);
void *FindFreeBlock(Xsize_t Size,
Block *&CurrentBlock);
public:
/**
* Arrange the blocks to optimize the memory
* usage.
* The allocator is not arranged by default
* to avoid performance issues.
* This function will defragment the memory
* and free the unused blocks.
*
* You should call this function when the
* kernel is idle or when is not using the
* allocator.
*/
void Arrange();
/**
* Allocate a new memory block
*
* @param Size Size of the block to allocate.
* @return void* Pointer to the allocated
* block.
*/
void *malloc(Xsize_t Size);
/**
* Free a previously allocated block
*
* @param Address Address of the block to
* free.
*/
void free(void *Address);
/**
* Allocate a new memory block
*
* @param NumberOfBlocks Number of blocks
* to allocate.
* @param Size Size of the block to allocate.
* @return void* Pointer to the allocated
* block.
*/
void *calloc(Xsize_t NumberOfBlocks,
Xsize_t Size);
/**
* Reallocate a previously allocated block
*
* @param Address Address of the block
* to reallocate.
* @param Size New size of the block.
* @return void* Pointer to the reallocated
* block.
*/
void *realloc(void *Address, Xsize_t Size);
/**
* Construct a new Allocator object
*
* @param VirtualBase Virtual address
* to map the pages.
*/
V2(void *VirtualBase);
/**
* Destroy the Allocator object
*/
~V2();
friend class Block;
};
}
#endif // !__FENNIX_KERNEL_Xalloc_H__

View File

@ -0,0 +1,290 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include "Xalloc.hpp"
XallocV1_def;
#define XALLOC_CONCAT(x, y) x##y
#define XStoP(d) (((d) + PAGE_SIZE - 1) / PAGE_SIZE)
#define XPtoS(d) ((d)*PAGE_SIZE)
#define Xalloc_BlockSanityKey 0xA110C
extern "C" void *Xalloc_REQUEST_PAGES(Xsize_t Pages);
extern "C" void Xalloc_FREE_PAGES(void *Address, Xsize_t Pages);
extern "C" void Xalloc_MAP_MEMORY(void *VirtualAddress, void *PhysicalAddress, Xsize_t Flags);
extern "C" void Xalloc_UNMAP_MEMORY(void *VirtualAddress);
// TODO: Change memcpy with an optimized version
void *Xmemcpy(void *__restrict__ Destination, const void *__restrict__ Source, Xsize_t Length)
{
unsigned char *dst = (unsigned char *)Destination;
const unsigned char *src = (const unsigned char *)Source;
for (Xsize_t i = 0; i < Length; i++)
dst[i] = src[i];
return Destination;
}
// TODO: Change memset with an optimized version
void *Xmemset(void *__restrict__ Destination, int Data, Xsize_t Length)
{
unsigned char *Buffer = (unsigned char *)Destination;
for (Xsize_t i = 0; i < Length; i++)
Buffer[i] = (unsigned char)Data;
return Destination;
}
namespace Xalloc
{
class Block
{
public:
void *Address = nullptr;
int Sanity = Xalloc_BlockSanityKey;
Xsize_t Size = 0;
Block *Next = nullptr;
Block *Last = nullptr;
bool IsFree = true;
bool Check()
{
if (this->Sanity != Xalloc_BlockSanityKey)
return false;
return true;
}
Block(Xsize_t Size)
{
this->Address = Xalloc_REQUEST_PAGES(XStoP(Size + 1));
this->Size = Size;
Xmemset(this->Address, 0, Size);
}
~Block()
{
Xalloc_FREE_PAGES(this->Address, XStoP(this->Size + 1));
}
/**
* @brief Overload new operator to allocate memory from the heap
* @param Size Unused
* @return void* Pointer to the allocated memory
*/
void *operator new(Xsize_t Size)
{
void *ptr = Xalloc_REQUEST_PAGES(XStoP(sizeof(Block)));
return ptr;
(void)(Size);
}
/**
* @brief Overload delete operator to free memory from the heap
* @param Address Pointer to the memory to free
*/
void operator delete(void *Address)
{
Xalloc_FREE_PAGES(Address, XStoP(sizeof(Block)));
}
} __attribute__((packed, aligned((16))));
class SmartSMAPClass
{
private:
V1 *allocator = nullptr;
public:
SmartSMAPClass(V1 *allocator)
{
this->allocator = allocator;
this->allocator->Xstac();
}
~SmartSMAPClass() { this->allocator->Xclac(); }
};
#define SmartSMAP SmartSMAPClass XALLOC_CONCAT(SmartSMAP##_, __COUNTER__)(this)
void V1::Xstac()
{
if (this->SMAPUsed)
{
#if defined(a86)
asm volatile("stac" ::
: "cc");
#endif
}
}
void V1::Xclac()
{
if (this->SMAPUsed)
{
#if defined(a86)
asm volatile("clac" ::
: "cc");
#endif
}
}
void V1::Arrange()
{
Xalloc_err("Arrange() is not implemented yet!");
}
void *V1::malloc(Xsize_t Size)
{
if (Size == 0)
{
Xalloc_warn("Attempted to allocate 0 bytes!");
return nullptr;
}
SmartSMAP;
XallocV1_lock;
if (this->FirstBlock == nullptr)
{
this->FirstBlock = new Block(Size);
((Block *)this->FirstBlock)->IsFree = false;
XallocV1_unlock;
return ((Block *)this->FirstBlock)->Address;
}
Block *CurrentBlock = ((Block *)this->FirstBlock);
while (CurrentBlock != nullptr)
{
if (!CurrentBlock->Check())
{
Xalloc_err("Block %#lx has an invalid sanity key! (%#x != %#x)",
(Xsize_t)CurrentBlock, CurrentBlock->Sanity, Xalloc_BlockSanityKey);
while (Xalloc_StopOnFail)
;
}
else if (CurrentBlock->IsFree && CurrentBlock->Size >= Size)
{
CurrentBlock->IsFree = false;
Xmemset(CurrentBlock->Address, 0, Size);
XallocV1_unlock;
return CurrentBlock->Address;
}
CurrentBlock = CurrentBlock->Next;
}
CurrentBlock = ((Block *)this->FirstBlock);
while (CurrentBlock->Next != nullptr)
CurrentBlock = CurrentBlock->Next;
CurrentBlock->Next = new Block(Size);
((Block *)CurrentBlock->Next)->Last = CurrentBlock;
((Block *)CurrentBlock->Next)->IsFree = false;
XallocV1_unlock;
return ((Block *)CurrentBlock->Next)->Address;
}
void V1::free(void *Address)
{
if (Address == nullptr)
{
Xalloc_warn("Attempted to free a null pointer!");
return;
}
SmartSMAP;
XallocV1_lock;
Block *CurrentBlock = ((Block *)this->FirstBlock);
while (CurrentBlock != nullptr)
{
if (!CurrentBlock->Check())
{
Xalloc_err("Block %#lx has an invalid sanity key! (%#x != %#x)",
(Xsize_t)CurrentBlock, CurrentBlock->Sanity, Xalloc_BlockSanityKey);
while (Xalloc_StopOnFail)
;
}
else if (CurrentBlock->Address == Address)
{
if (CurrentBlock->IsFree)
{
Xalloc_warn("Attempted to free an already freed pointer!");
XallocV1_unlock;
return;
}
CurrentBlock->IsFree = true;
XallocV1_unlock;
return;
}
CurrentBlock = CurrentBlock->Next;
}
Xalloc_err("Invalid address %#lx.", Address);
XallocV1_unlock;
}
void *V1::calloc(Xsize_t NumberOfBlocks, Xsize_t Size)
{
if (NumberOfBlocks == 0 || Size == 0)
{
Xalloc_warn("The %s%s%s is 0!",
NumberOfBlocks == 0 ? "NumberOfBlocks" : "",
NumberOfBlocks == 0 && Size == 0 ? " and " : "",
Size == 0 ? "Size" : "");
return nullptr;
}
return this->malloc(NumberOfBlocks * Size);
}
void *V1::realloc(void *Address, Xsize_t Size)
{
if (Address == nullptr)
return this->malloc(Size);
if (Size == 0)
{
this->free(Address);
return nullptr;
}
// SmartSMAP;
// XallocV1_lock;
// ...
// XallocV1_unlock;
// TODO: Implement realloc
this->free(Address);
return this->malloc(Size);
}
V1::V1(void *BaseVirtualAddress, bool UserMode, bool SMAPEnabled)
{
SmartSMAP;
XallocV1_lock;
this->SMAPUsed = SMAPEnabled;
this->UserMapping = UserMode;
this->BaseVirtualAddress = BaseVirtualAddress;
XallocV1_unlock;
}
V1::~V1()
{
SmartSMAP;
XallocV1_lock;
Xalloc_trace("Destructor not implemented yet.");
XallocV1_unlock;
}
}

View File

@ -0,0 +1,281 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include "Xalloc.hpp"
XallocV2_def;
#define XALLOC_CONCAT(x, y) x##y
#define XStoP(d) (((d) + PAGE_SIZE - 1) / PAGE_SIZE)
#define XPtoS(d) ((d)*PAGE_SIZE)
extern "C" void *Xalloc_REQUEST_PAGES(Xsize_t Pages);
extern "C" void Xalloc_FREE_PAGES(void *Address, Xsize_t Pages);
extern "C" void Xalloc_MAP_MEMORY(void *VirtualAddress,
void *PhysicalAddress,
Xsize_t Flags);
extern "C" void Xalloc_UNMAP_MEMORY(void *VirtualAddress);
#define Xalloc_BlockSanityKey 0xA110C
/*
[ IN DEVELOPMENT ]
*/
namespace Xalloc
{
void V2::Block::Check()
{
if (unlikely(this->Sanity != Xalloc_BlockSanityKey))
{
Xalloc_err("Block %#lx has an invalid sanity key! (%#x != %#x)",
this, this->Sanity, Xalloc_BlockSanityKey);
while (Xalloc_StopOnFail)
;
}
}
V2::Block::Block(Xsize_t Size, V2 *ctx)
{
this->ctx = ctx;
this->Data = ctx->AllocateHeap(Size);
this->DataSize = Size;
}
V2::Block::~Block()
{
}
void *V2::Block::operator new(Xsize_t)
{
constexpr Xsize_t bPgs = XStoP(sizeof(Block));
void *ptr = Xalloc_REQUEST_PAGES(bPgs);
/* TODO: Do something with the rest of
the allocated memory */
return ptr;
}
void V2::Block::operator delete(void *Address)
{
constexpr Xsize_t bPgs = XStoP(sizeof(Block));
Xalloc_FREE_PAGES(Address, bPgs);
}
/* ========================================= */
Xuint8_t *V2::AllocateHeap(Xsize_t Size)
{
Size = this->Align(Size);
Xsize_t Pages = XStoP(Size);
Xuint8_t *FinalAddress = 0x0;
if (this->HeapUsed + Size >= this->HeapSize)
{
void *Address = Xalloc_REQUEST_PAGES(Pages);
void *VirtualAddress = (void *)(this->BaseVirtualAddress + this->HeapSize);
if (Xalloc_MapPages)
{
for (Xsize_t i = 0; i < Pages; i++)
{
Xuintptr_t Page = i * Xalloc_PAGE_SIZE;
void *vAddress = (void *)((Xuintptr_t)VirtualAddress + Page);
Xalloc_MAP_MEMORY(vAddress, (void *)((Xuintptr_t)Address + Page), 0x3);
}
}
this->HeapSize += XPtoS(Pages);
FinalAddress = (Xuint8_t *)VirtualAddress;
}
else
FinalAddress = (Xuint8_t *)(this->BaseVirtualAddress + this->HeapUsed);
this->HeapUsed += Size;
return (uint8_t *)FinalAddress;
}
void V2::FreeHeap(Xuint8_t *At, Xsize_t Size)
{
Xsize_t Pages = XStoP(Size);
if (Xalloc_MapPages)
{
for (Xsize_t i = 0; i < Pages; i++)
{
Xuintptr_t Page = i * Xalloc_PAGE_SIZE;
void *VirtualAddress = (void *)((Xuintptr_t)At + Page);
Xalloc_UNMAP_MEMORY(VirtualAddress);
}
}
Xalloc_FREE_PAGES(At, Pages);
this->HeapUsed -= Size;
}
Xsize_t V2::Align(Xsize_t Size)
{
return (Size + 0xF) & ~0xF;
}
void *V2::FindFreeBlock(Xsize_t Size, Block *&CurrentBlock)
{
if (this->FirstBlock == nullptr)
{
this->FirstBlock = new Block(Size, this);
this->FirstBlock->IsFree = false;
return this->FirstBlock->Data;
}
while (true)
{
CurrentBlock->Check();
/* FIXME: This will waste a lot of space
need better algorithm */
if (CurrentBlock->IsFree &&
CurrentBlock->DataSize >= Size)
{
CurrentBlock->IsFree = false;
return CurrentBlock->Data;
}
if (CurrentBlock->Next == nullptr)
break;
CurrentBlock = CurrentBlock->Next;
}
return nullptr;
}
void V2::Arrange()
{
Xalloc_err("Arrange() is not implemented yet!");
}
void *V2::malloc(Xsize_t Size)
{
if (Size == 0)
{
Xalloc_warn("Attempted to allocate 0 bytes!");
return nullptr;
}
XallocV2_lock;
Block *CurrentBlock = this->FirstBlock;
void *ret = this->FindFreeBlock(Size, CurrentBlock);
if (ret)
{
XallocV2_unlock;
return ret;
}
CurrentBlock->Next = new Block(Size, this);
CurrentBlock->Next->IsFree = false;
XallocV2_unlock;
return CurrentBlock->Next->Data;
}
void V2::free(void *Address)
{
if (Address == nullptr)
{
Xalloc_warn("Attempted to free a null pointer!");
return;
}
XallocV2_lock;
Block *CurrentBlock = ((Block *)this->FirstBlock);
while (CurrentBlock != nullptr)
{
CurrentBlock->Check();
if (CurrentBlock->Data == Address)
{
if (CurrentBlock->IsFree)
Xalloc_warn("Attempted to free an already freed block! %#lx", Address);
CurrentBlock->IsFree = true;
XallocV2_unlock;
return;
}
CurrentBlock = CurrentBlock->Next;
}
Xalloc_err("Invalid address %#lx.", Address);
XallocV2_unlock;
}
void *V2::calloc(Xsize_t NumberOfBlocks, Xsize_t Size)
{
if (NumberOfBlocks == 0 || Size == 0)
{
Xalloc_warn("The %s%s%s is 0!",
NumberOfBlocks == 0 ? "NumberOfBlocks" : "",
NumberOfBlocks == 0 && Size == 0 ? " and " : "",
Size == 0 ? "Size" : "");
return nullptr;
}
return this->malloc(NumberOfBlocks * Size);
}
void *V2::realloc(void *Address, Xsize_t Size)
{
if (Address == nullptr && Size != 0)
return this->malloc(Size);
if (Size == 0)
{
this->free(Address);
return nullptr;
}
// XallocV2_lock;
// ...
// XallocV2_unlock;
// TODO: Implement realloc
static int once = 0;
if (!once++)
Xalloc_trace("realloc is stub!");
this->free(Address);
return this->malloc(Size);
}
V2::V2(void *VirtualBase)
{
if (VirtualBase == 0x0 && Xalloc_MapPages)
{
Xalloc_err("VirtualBase is 0x0 and Xalloc_MapPages is true!");
while (true)
;
}
XallocV2_lock;
this->BaseVirtualAddress = Xuintptr_t(VirtualBase);
XallocV2_unlock;
}
V2::~V2()
{
XallocV2_lock;
Xalloc_trace("Destructor not implemented yet.");
XallocV2_unlock;
}
}

View File

@ -0,0 +1,796 @@
#include "liballoc_1_1.h"
#include <convert.h>
#pragma GCC diagnostic ignored "-Wconversion"
#pragma GCC diagnostic ignored "-Wsign-conversion"
/** Durand's Amazing Super Duper Memory functions. */
#define VERSION "1.1"
#define ALIGNMENT 16ul // 4ul ///< This is the byte alignment that memory must be allocated on. IMPORTANT for GTK and other stuff.
#define ALIGN_TYPE char /// unsigned char[16] /// unsigned short
#define ALIGN_INFO sizeof(ALIGN_TYPE) * 16 ///< Alignment information is stored right before the pointer. This is the number of bytes of information stored there.
#define USE_CASE1
#define USE_CASE2
#define USE_CASE3
#define USE_CASE4
#define USE_CASE5
/** This macro will conveniently align our pointer upwards */
#define ALIGN(ptr) \
if (ALIGNMENT > 1) \
{ \
uintptr_t diff; \
ptr = (void *)((uintptr_t)ptr + ALIGN_INFO); \
diff = (uintptr_t)ptr & (ALIGNMENT - 1); \
if (diff != 0) \
{ \
diff = ALIGNMENT - diff; \
ptr = (void *)((uintptr_t)ptr + diff); \
} \
*((ALIGN_TYPE *)((uintptr_t)ptr - ALIGN_INFO)) = \
diff + ALIGN_INFO; \
}
#define UNALIGN(ptr) \
if (ALIGNMENT > 1) \
{ \
uintptr_t diff = *((ALIGN_TYPE *)((uintptr_t)ptr - ALIGN_INFO)); \
if (diff < (ALIGNMENT + ALIGN_INFO)) \
{ \
ptr = (void *)((uintptr_t)ptr - diff); \
} \
}
#define LIBALLOC_MAGIC 0xc001c0de
#define LIBALLOC_DEAD 0xdeaddead
// #define LIBALLOCDEBUG 1
#define LIBALLOCINFO 1
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
// #include <stdio.h>
// #include <stdlib.h>
#include <debug.h>
// #define FLUSH() fflush(stdout)
#define FLUSH()
#define atexit(x)
#define printf(m, ...) trace(m, ##__VA_ARGS__)
#endif
/** A structure found at the top of all system allocated
* memory blocks. It details the usage of the memory block.
*/
struct liballoc_major
{
struct liballoc_major *prev; ///< Linked list information.
struct liballoc_major *next; ///< Linked list information.
unsigned int pages; ///< The number of pages in the block.
unsigned int size; ///< The number of pages in the block.
unsigned int usage; ///< The number of bytes used in the block.
struct liballoc_minor *first; ///< A pointer to the first allocated memory in the block.
};
/** This is a structure found at the beginning of all
* sections in a major block which were allocated by a
* malloc, calloc, realloc call.
*/
struct liballoc_minor
{
struct liballoc_minor *prev; ///< Linked list information.
struct liballoc_minor *next; ///< Linked list information.
struct liballoc_major *block; ///< The owning block. A pointer to the major structure.
unsigned int magic; ///< A magic number to idenfity correctness.
unsigned int size; ///< The size of the memory allocated. Could be 1 byte or more.
unsigned int req_size; ///< The size of memory requested.
};
static struct liballoc_major *l_memRoot = NULL; ///< The root memory block acquired from the system.
static struct liballoc_major *l_bestBet = NULL; ///< The major with the most free memory.
static unsigned int l_pageSize = 4096; ///< The size of an individual page. Set up in liballoc_init.
static unsigned int l_pageCount = 16; ///< The number of pages to request per chunk. Set up in liballoc_init.
static unsigned long long l_allocated = 0; ///< Running total of allocated memory.
static unsigned long long l_inuse = 0; ///< Running total of used memory.
static long long l_warningCount = 0; ///< Number of warnings encountered
static long long l_errorCount = 0; ///< Number of actual errors
static long long l_possibleOverruns = 0; ///< Number of possible overruns
// *********** HELPER FUNCTIONS *******************************
__no_sanitize("undefined") static void *liballoc_memset(void *s, int c, size_t n)
{
return memset(s, c, n);
unsigned int i;
for (i = 0; i < n; i++)
((char *)s)[i] = c;
return s;
}
__no_sanitize("undefined") static void *liballoc_memcpy(void *s1, const void *s2, size_t n)
{
return memcpy(s1, s2, n);
char *cdest;
char *csrc;
unsigned int *ldest = (unsigned int *)s1;
unsigned int *lsrc = (unsigned int *)s2;
while (n >= sizeof(unsigned int))
{
*ldest++ = *lsrc++;
n -= sizeof(unsigned int);
}
cdest = (char *)ldest;
csrc = (char *)lsrc;
while (n > 0)
{
*cdest++ = *csrc++;
n -= 1;
}
return s1;
}
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
__no_sanitize("undefined") static void liballoc_dump()
{
#ifdef LIBALLOCDEBUG
struct liballoc_major *maj = l_memRoot;
struct liballoc_minor *min = NULL;
#endif
printf("liballoc: ------ Memory data ---------------\n");
printf("liballoc: System memory allocated: %i bytes\n", l_allocated);
printf("liballoc: Memory in used (malloc'ed): %i bytes\n", l_inuse);
printf("liballoc: Warning count: %i\n", l_warningCount);
printf("liballoc: Error count: %i\n", l_errorCount);
printf("liballoc: Possible overruns: %i\n", l_possibleOverruns);
#ifdef LIBALLOCDEBUG
while (maj != NULL)
{
printf("liballoc: %lx: total = %i, used = %i\n",
maj,
maj->size,
maj->usage);
min = maj->first;
while (min != NULL)
{
printf("liballoc: %lx: %i bytes\n",
min,
min->size);
min = min->next;
}
maj = maj->next;
}
#endif
FLUSH();
}
#endif
// ***************************************************************
__no_sanitize("undefined") static struct liballoc_major *allocate_new_page(unsigned int size)
{
unsigned int st;
struct liballoc_major *maj;
// This is how much space is required.
st = size + sizeof(struct liballoc_major);
st += sizeof(struct liballoc_minor);
// Perfect amount of space?
if ((st % l_pageSize) == 0)
st = st / (l_pageSize);
else
st = st / (l_pageSize) + 1;
// No, add the buffer.
// Make sure it's >= the minimum size.
if (st < l_pageCount)
st = l_pageCount;
maj = (struct liballoc_major *)liballoc_alloc(st);
if (maj == NULL)
{
l_warningCount += 1;
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
printf("liballoc: WARNING: liballoc_alloc( %i ) return NULL\n", st);
FLUSH();
#endif
return NULL; // uh oh, we ran out of memory.
}
maj->prev = NULL;
maj->next = NULL;
maj->pages = st;
maj->size = st * l_pageSize;
maj->usage = sizeof(struct liballoc_major);
maj->first = NULL;
l_allocated += maj->size;
#ifdef LIBALLOCDEBUG
printf("liballoc: Resource allocated %lx of %i pages (%i bytes) for %i size.\n", maj, st, maj->size, size);
printf("liballoc: Total memory usage = %i KB\n", (int)((l_allocated / (1024))));
FLUSH();
#endif
return maj;
}
__no_sanitize("undefined") void *PREFIX(malloc)(size_t req_size)
{
int startedBet = 0;
unsigned long long bestSize = 0;
void *p = NULL;
uintptr_t diff;
struct liballoc_major *maj;
struct liballoc_minor *min;
struct liballoc_minor *new_min;
unsigned long size = req_size;
// For alignment, we adjust size so there's enough space to align.
if (ALIGNMENT > 1)
{
size += ALIGNMENT + ALIGN_INFO;
}
// So, ideally, we really want an alignment of 0 or 1 in order
// to save space.
liballoc_lock();
if (size == 0)
{
l_warningCount += 1;
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
printf("liballoc: WARNING: alloc( 0 ) called from %lx\n",
__builtin_return_address(0));
FLUSH();
#endif
liballoc_unlock();
return PREFIX(malloc)(1);
}
if (l_memRoot == NULL)
{
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
#ifdef LIBALLOCDEBUG
printf("liballoc: initialization of liballoc " VERSION "\n");
#endif
atexit(liballoc_dump);
FLUSH();
#endif
// This is the first time we are being used.
l_memRoot = allocate_new_page(size);
if (l_memRoot == NULL)
{
liballoc_unlock();
#ifdef LIBALLOCDEBUG
printf("liballoc: initial l_memRoot initialization failed\n", p);
FLUSH();
#endif
return NULL;
}
#ifdef LIBALLOCDEBUG
printf("liballoc: set up first memory major %lx\n", l_memRoot);
FLUSH();
#endif
}
#ifdef LIBALLOCDEBUG
printf("liballoc: %lx PREFIX(malloc)( %i ): ",
__builtin_return_address(0),
size);
FLUSH();
#endif
// Now we need to bounce through every major and find enough space....
maj = l_memRoot;
startedBet = 0;
// Start at the best bet....
if (l_bestBet != NULL)
{
bestSize = l_bestBet->size - l_bestBet->usage;
if (bestSize > (size + sizeof(struct liballoc_minor)))
{
maj = l_bestBet;
startedBet = 1;
}
}
while (maj != NULL)
{
diff = maj->size - maj->usage;
// free memory in the block
if (bestSize < diff)
{
// Hmm.. this one has more memory then our bestBet. Remember!
l_bestBet = maj;
bestSize = diff;
}
#ifdef USE_CASE1
// CASE 1: There is not enough space in this major block.
if (diff < (size + sizeof(struct liballoc_minor)))
{
#ifdef LIBALLOCDEBUG
printf("CASE 1: Insufficient space in block %lx\n", maj);
FLUSH();
#endif
// Another major block next to this one?
if (maj->next != NULL)
{
maj = maj->next; // Hop to that one.
continue;
}
if (startedBet == 1) // If we started at the best bet,
{ // let's start all over again.
maj = l_memRoot;
startedBet = 0;
continue;
}
// Create a new major block next to this one and...
maj->next = allocate_new_page(size); // next one will be okay.
if (maj->next == NULL)
break; // no more memory.
maj->next->prev = maj;
maj = maj->next;
// .. fall through to CASE 2 ..
}
#endif
#ifdef USE_CASE2
// CASE 2: It's a brand new block.
if (maj->first == NULL)
{
maj->first = (struct liballoc_minor *)((uintptr_t)maj + sizeof(struct liballoc_major));
maj->first->magic = LIBALLOC_MAGIC;
maj->first->prev = NULL;
maj->first->next = NULL;
maj->first->block = maj;
maj->first->size = size;
maj->first->req_size = req_size;
maj->usage += size + sizeof(struct liballoc_minor);
l_inuse += size;
p = (void *)((uintptr_t)(maj->first) + sizeof(struct liballoc_minor));
ALIGN(p);
#ifdef LIBALLOCDEBUG
printf("CASE 2: returning %lx\n", p);
FLUSH();
#endif
liballoc_unlock(); // release the lock
return p;
}
#endif
#ifdef USE_CASE3
// CASE 3: Block in use and enough space at the start of the block.
diff = (uintptr_t)(maj->first);
diff -= (uintptr_t)maj;
diff -= sizeof(struct liballoc_major);
if (diff >= (size + sizeof(struct liballoc_minor)))
{
// Yes, space in front. Squeeze in.
maj->first->prev = (struct liballoc_minor *)((uintptr_t)maj + sizeof(struct liballoc_major));
maj->first->prev->next = maj->first;
maj->first = maj->first->prev;
maj->first->magic = LIBALLOC_MAGIC;
maj->first->prev = NULL;
maj->first->block = maj;
maj->first->size = size;
maj->first->req_size = req_size;
maj->usage += size + sizeof(struct liballoc_minor);
l_inuse += size;
p = (void *)((uintptr_t)(maj->first) + sizeof(struct liballoc_minor));
ALIGN(p);
#ifdef LIBALLOCDEBUG
printf("CASE 3: returning %lx\n", p);
FLUSH();
#endif
liballoc_unlock(); // release the lock
return p;
}
#endif
#ifdef USE_CASE4
// CASE 4: There is enough space in this block. But is it contiguous?
min = maj->first;
// Looping within the block now...
while (min != NULL)
{
// CASE 4.1: End of minors in a block. Space from last and end?
if (min->next == NULL)
{
// the rest of this block is free... is it big enough?
diff = (uintptr_t)(maj) + maj->size;
diff -= (uintptr_t)min;
diff -= sizeof(struct liballoc_minor);
diff -= min->size;
// minus already existing usage..
if (diff >= (size + sizeof(struct liballoc_minor)))
{
// yay....
min->next = (struct liballoc_minor *)((uintptr_t)min + sizeof(struct liballoc_minor) + min->size);
min->next->prev = min;
min = min->next;
min->next = NULL;
min->magic = LIBALLOC_MAGIC;
min->block = maj;
min->size = size;
min->req_size = req_size;
maj->usage += size + sizeof(struct liballoc_minor);
l_inuse += size;
p = (void *)((uintptr_t)min + sizeof(struct liballoc_minor));
ALIGN(p);
#ifdef LIBALLOCDEBUG
printf("CASE 4.1: returning %lx\n", p);
FLUSH();
#endif
liballoc_unlock(); // release the lock
return p;
}
}
// CASE 4.2: Is there space between two minors?
if (min->next != NULL)
{
// is the difference between here and next big enough?
diff = (uintptr_t)(min->next);
diff -= (uintptr_t)min;
diff -= sizeof(struct liballoc_minor);
diff -= min->size;
// minus our existing usage.
if (diff >= (size + sizeof(struct liballoc_minor)))
{
// yay......
new_min = (struct liballoc_minor *)((uintptr_t)min + sizeof(struct liballoc_minor) + min->size);
new_min->magic = LIBALLOC_MAGIC;
new_min->next = min->next;
new_min->prev = min;
new_min->size = size;
new_min->req_size = req_size;
new_min->block = maj;
min->next->prev = new_min;
min->next = new_min;
maj->usage += size + sizeof(struct liballoc_minor);
l_inuse += size;
p = (void *)((uintptr_t)new_min + sizeof(struct liballoc_minor));
ALIGN(p);
#ifdef LIBALLOCDEBUG
printf("CASE 4.2: returning %lx\n", p);
FLUSH();
#endif
liballoc_unlock(); // release the lock
return p;
}
} // min->next != NULL
min = min->next;
} // while min != NULL ...
#endif
#ifdef USE_CASE5
// CASE 5: Block full! Ensure next block and loop.
if (maj->next == NULL)
{
#ifdef LIBALLOCDEBUG
printf("CASE 5: block full\n");
FLUSH();
#endif
if (startedBet == 1)
{
maj = l_memRoot;
startedBet = 0;
continue;
}
// we've run out. we need more...
maj->next = allocate_new_page(size); // next one guaranteed to be okay
if (maj->next == NULL)
break; // uh oh, no more memory.....
maj->next->prev = maj;
}
#endif
maj = maj->next;
} // while (maj != NULL)
liballoc_unlock(); // release the lock
#ifdef LIBALLOCDEBUG
printf("All cases exhausted. No memory available.\n");
FLUSH();
#endif
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
printf("liballoc: WARNING: PREFIX(malloc)( %i ) returning NULL.\n", size);
liballoc_dump();
FLUSH();
#endif
return NULL;
}
__no_sanitize("undefined") void PREFIX(free)(void *ptr)
{
struct liballoc_minor *min;
struct liballoc_major *maj;
if (ptr == NULL)
{
l_warningCount += 1;
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
printf("liballoc: WARNING: PREFIX(free)( NULL ) called from %lx\n",
__builtin_return_address(0));
FLUSH();
#endif
return;
}
UNALIGN(ptr);
liballoc_lock(); // lockit
min = (struct liballoc_minor *)((uintptr_t)ptr - sizeof(struct liballoc_minor));
if (min->magic != LIBALLOC_MAGIC)
{
l_errorCount += 1;
// Check for overrun errors. For all bytes of LIBALLOC_MAGIC
if (
((min->magic & 0xFFFFFF) == (LIBALLOC_MAGIC & 0xFFFFFF)) ||
((min->magic & 0xFFFF) == (LIBALLOC_MAGIC & 0xFFFF)) ||
((min->magic & 0xFF) == (LIBALLOC_MAGIC & 0xFF)))
{
l_possibleOverruns += 1;
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
printf("liballoc: ERROR: Possible 1-3 byte overrun for magic %lx != %lx\n",
min->magic,
LIBALLOC_MAGIC);
FLUSH();
#endif
}
if (min->magic == LIBALLOC_DEAD)
{
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
printf("liballoc: ERROR: multiple PREFIX(free)() attempt on %lx from %lx.\n",
ptr,
__builtin_return_address(0));
FLUSH();
#endif
}
else
{
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
printf("liballoc: ERROR: Bad PREFIX(free)( %lx ) called from %lx\n",
ptr,
__builtin_return_address(0));
FLUSH();
#endif
}
// being lied to...
liballoc_unlock(); // release the lock
return;
}
#ifdef LIBALLOCDEBUG
printf("liballoc: %lx PREFIX(free)( %lx ): ",
__builtin_return_address(0),
ptr);
FLUSH();
#endif
maj = min->block;
l_inuse -= min->size;
maj->usage -= (min->size + sizeof(struct liballoc_minor));
min->magic = LIBALLOC_DEAD; // No mojo.
if (min->next != NULL)
min->next->prev = min->prev;
if (min->prev != NULL)
min->prev->next = min->next;
if (min->prev == NULL)
maj->first = min->next;
// Might empty the block. This was the first
// minor.
// We need to clean up after the majors now....
if (maj->first == NULL) // Block completely unused.
{
if (l_memRoot == maj)
l_memRoot = maj->next;
if (l_bestBet == maj)
l_bestBet = NULL;
if (maj->prev != NULL)
maj->prev->next = maj->next;
if (maj->next != NULL)
maj->next->prev = maj->prev;
l_allocated -= maj->size;
liballoc_free(maj, maj->pages);
}
else
{
if (l_bestBet != NULL)
{
int bestSize = l_bestBet->size - l_bestBet->usage;
int majSize = maj->size - maj->usage;
if (majSize > bestSize)
l_bestBet = maj;
}
}
#ifdef LIBALLOCDEBUG
printf("OK\n");
FLUSH();
#endif
liballoc_unlock(); // release the lock
}
__no_sanitize("undefined") void *PREFIX(calloc)(size_t nobj, size_t size)
{
int real_size;
void *p;
real_size = nobj * size;
p = PREFIX(malloc)(real_size);
liballoc_memset(p, 0, real_size);
return p;
}
__no_sanitize("undefined") void *PREFIX(realloc)(void *p, size_t size)
{
void *ptr;
struct liballoc_minor *min;
unsigned int real_size;
// Honour the case of size == 0 => free old and return NULL
if (size == 0)
{
PREFIX(free)
(p);
return NULL;
}
// In the case of a NULL pointer, return a simple malloc.
if (p == NULL)
return PREFIX(malloc)(size);
// Unalign the pointer if required.
ptr = p;
UNALIGN(ptr);
liballoc_lock(); // lockit
min = (struct liballoc_minor *)((uintptr_t)ptr - sizeof(struct liballoc_minor));
// Ensure it is a valid structure.
if (min->magic != LIBALLOC_MAGIC)
{
l_errorCount += 1;
// Check for overrun errors. For all bytes of LIBALLOC_MAGIC
if (
((min->magic & 0xFFFFFF) == (LIBALLOC_MAGIC & 0xFFFFFF)) ||
((min->magic & 0xFFFF) == (LIBALLOC_MAGIC & 0xFFFF)) ||
((min->magic & 0xFF) == (LIBALLOC_MAGIC & 0xFF)))
{
l_possibleOverruns += 1;
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
printf("liballoc: ERROR: Possible 1-3 byte overrun for magic %lx != %lx\n",
min->magic,
LIBALLOC_MAGIC);
FLUSH();
#endif
}
if (min->magic == LIBALLOC_DEAD)
{
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
printf("liballoc: ERROR: multiple PREFIX(free)() attempt on %lx from %lx.\n",
ptr,
__builtin_return_address(0));
FLUSH();
#endif
}
else
{
#if defined LIBALLOCDEBUG || defined LIBALLOCINFO
printf("liballoc: ERROR: Bad PREFIX(free)( %lx ) called from %lx\n",
ptr,
__builtin_return_address(0));
FLUSH();
#endif
}
// being lied to...
liballoc_unlock(); // release the lock
return NULL;
}
// Definitely a memory block.
real_size = min->req_size;
if (real_size >= size)
{
min->req_size = size;
liballoc_unlock();
return p;
}
liballoc_unlock();
// If we got here then we're reallocating to a block bigger than us.
ptr = PREFIX(malloc)(size); // We need to allocate new memory
liballoc_memcpy(ptr, p, real_size);
PREFIX(free)
(p);
return ptr;
}

View File

@ -0,0 +1,74 @@
#ifndef _LIBALLOC_H
#define _LIBALLOC_H
#include <types.h>
/** \defgroup ALLOCHOOKS liballoc hooks
*
* These are the OS specific functions which need to
* be implemented on any platform that the library
* is expected to work on.
*/
/** @{ */
// If we are told to not define our own size_t, then we skip the define.
// #define _HAVE_UINTPTR_T
// typedef unsigned long uintptr_t;
// This lets you prefix malloc and friends
#define PREFIX(func) kliballoc_##func
#ifdef __cplusplus
extern "C"
{
#endif
/** This function is supposed to lock the memory data structures. It
* could be as simple as disabling interrupts or acquiring a spinlock.
* It's up to you to decide.
*
* \return 0 if the lock was acquired successfully. Anything else is
* failure.
*/
extern int liballoc_lock();
/** This function unlocks what was previously locked by the liballoc_lock
* function. If it disabled interrupts, it enables interrupts. If it
* had acquiried a spinlock, it releases the spinlock. etc.
*
* \return 0 if the lock was successfully released.
*/
extern int liballoc_unlock();
/** This is the hook into the local system which allocates pages. It
* accepts an integer parameter which is the number of pages
* required. The page size was set up in the liballoc_init function.
*
* \return NULL if the pages were not allocated.
* \return A pointer to the allocated memory.
*/
extern void *liballoc_alloc(size_t);
/** This frees previously allocated memory. The void* parameter passed
* to the function is the exact same value returned from a previous
* liballoc_alloc call.
*
* The integer value is the number of pages to free.
*
* \return 0 if the memory was successfully freed.
*/
extern int liballoc_free(void *, size_t);
extern void *PREFIX(malloc)(size_t); ///< The standard function.
extern void *PREFIX(realloc)(void *, size_t); ///< The standard function.
extern void *PREFIX(calloc)(size_t, size_t); ///< The standard function.
extern void PREFIX(free)(void *); ///< The standard function.
#ifdef __cplusplus
}
#endif
/** @} */
#endif

View File

@ -0,0 +1,29 @@
#include <types.h>
#include <lock.hpp>
#include <memory.hpp>
NewLock(liballocLock);
EXTERNC int liballoc_lock()
{
return liballocLock.Lock(__FUNCTION__);
}
EXTERNC int liballoc_unlock()
{
return liballocLock.Unlock();
}
EXTERNC void *liballoc_alloc(size_t Pages)
{
void *ret = KernelAllocator.RequestPages(Pages);
debug("(%d) = %#lx", Pages, ret);
return ret;
}
EXTERNC int liballoc_free(void *Address, size_t Pages)
{
debug("(%#lx, %d)", Address, Pages);
KernelAllocator.FreePages(Address, Pages);
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,371 @@
/* rpmalloc.h - Memory allocator - Public Domain - 2016 Mattias Jansson
*
* This library provides a cross-platform lock free thread caching malloc implementation in C11.
* The latest source code is always available at
*
* https://github.com/mjansson/rpmalloc
*
* This library is put in the public domain; you can redistribute it and/or modify it without any restrictions.
*
*/
#pragma once
#include <stddef.h>
#ifdef __cplusplus
extern "C"
{
#endif
#if defined(__clang__) || defined(__GNUC__)
#define RPMALLOC_EXPORT __attribute__((visibility("default")))
#define RPMALLOC_ALLOCATOR
#if (defined(__clang_major__) && (__clang_major__ < 4)) || (defined(__GNUC__) && defined(ENABLE_PRELOAD) && ENABLE_PRELOAD)
#define RPMALLOC_ATTRIB_MALLOC
#define RPMALLOC_ATTRIB_ALLOC_SIZE(size)
#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size)
#else
#define RPMALLOC_ATTRIB_MALLOC __attribute__((__malloc__))
#define RPMALLOC_ATTRIB_ALLOC_SIZE(size) __attribute__((alloc_size(size)))
#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size) __attribute__((alloc_size(count, size)))
#endif
#define RPMALLOC_CDECL
#elif defined(_MSC_VER)
#define RPMALLOC_EXPORT
#define RPMALLOC_ALLOCATOR __declspec(allocator) __declspec(restrict)
#define RPMALLOC_ATTRIB_MALLOC
#define RPMALLOC_ATTRIB_ALLOC_SIZE(size)
#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size)
#define RPMALLOC_CDECL __cdecl
#else
#define RPMALLOC_EXPORT
#define RPMALLOC_ALLOCATOR
#define RPMALLOC_ATTRIB_MALLOC
#define RPMALLOC_ATTRIB_ALLOC_SIZE(size)
#define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size)
#define RPMALLOC_CDECL
#endif
//! Define RPMALLOC_CONFIGURABLE to enable configuring sizes. Will introduce
// a very small overhead due to some size calculations not being compile time constants
#ifndef RPMALLOC_CONFIGURABLE
#define RPMALLOC_CONFIGURABLE 1
#endif
//! Define RPMALLOC_FIRST_CLASS_HEAPS to enable heap based API (rpmalloc_heap_* functions).
// Will introduce a very small overhead to track fully allocated spans in heaps
#ifndef RPMALLOC_FIRST_CLASS_HEAPS
#define RPMALLOC_FIRST_CLASS_HEAPS 0
#endif
//! Flag to rpaligned_realloc to not preserve content in reallocation
#define RPMALLOC_NO_PRESERVE 1
//! Flag to rpaligned_realloc to fail and return null pointer if grow cannot be done in-place,
// in which case the original pointer is still valid (just like a call to realloc which failes to allocate
// a new block).
#define RPMALLOC_GROW_OR_FAIL 2
typedef struct rpmalloc_global_statistics_t
{
//! Current amount of virtual memory mapped, all of which might not have been committed (only if ENABLE_STATISTICS=1)
size_t mapped;
//! Peak amount of virtual memory mapped, all of which might not have been committed (only if ENABLE_STATISTICS=1)
size_t mapped_peak;
//! Current amount of memory in global caches for small and medium sizes (<32KiB)
size_t cached;
//! Current amount of memory allocated in huge allocations, i.e larger than LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1)
size_t huge_alloc;
//! Peak amount of memory allocated in huge allocations, i.e larger than LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1)
size_t huge_alloc_peak;
//! Total amount of memory mapped since initialization (only if ENABLE_STATISTICS=1)
size_t mapped_total;
//! Total amount of memory unmapped since initialization (only if ENABLE_STATISTICS=1)
size_t unmapped_total;
} rpmalloc_global_statistics_t;
typedef struct rpmalloc_thread_statistics_t
{
//! Current number of bytes available in thread size class caches for small and medium sizes (<32KiB)
size_t sizecache;
//! Current number of bytes available in thread span caches for small and medium sizes (<32KiB)
size_t spancache;
//! Total number of bytes transitioned from thread cache to global cache (only if ENABLE_STATISTICS=1)
size_t thread_to_global;
//! Total number of bytes transitioned from global cache to thread cache (only if ENABLE_STATISTICS=1)
size_t global_to_thread;
//! Per span count statistics (only if ENABLE_STATISTICS=1)
struct
{
//! Currently used number of spans
size_t current;
//! High water mark of spans used
size_t peak;
//! Number of spans transitioned to global cache
size_t to_global;
//! Number of spans transitioned from global cache
size_t from_global;
//! Number of spans transitioned to thread cache
size_t to_cache;
//! Number of spans transitioned from thread cache
size_t from_cache;
//! Number of spans transitioned to reserved state
size_t to_reserved;
//! Number of spans transitioned from reserved state
size_t from_reserved;
//! Number of raw memory map calls (not hitting the reserve spans but resulting in actual OS mmap calls)
size_t map_calls;
} span_use[64];
//! Per size class statistics (only if ENABLE_STATISTICS=1)
struct
{
//! Current number of allocations
size_t alloc_current;
//! Peak number of allocations
size_t alloc_peak;
//! Total number of allocations
size_t alloc_total;
//! Total number of frees
size_t free_total;
//! Number of spans transitioned to cache
size_t spans_to_cache;
//! Number of spans transitioned from cache
size_t spans_from_cache;
//! Number of spans transitioned from reserved state
size_t spans_from_reserved;
//! Number of raw memory map calls (not hitting the reserve spans but resulting in actual OS mmap calls)
size_t map_calls;
} size_use[128];
} rpmalloc_thread_statistics_t;
typedef struct rpmalloc_config_t
{
//! Map memory pages for the given number of bytes. The returned address MUST be
// aligned to the rpmalloc span size, which will always be a power of two.
// Optionally the function can store an alignment offset in the offset variable
// in case it performs alignment and the returned pointer is offset from the
// actual start of the memory region due to this alignment. The alignment offset
// will be passed to the memory unmap function. The alignment offset MUST NOT be
// larger than 65535 (storable in an uint16_t), if it is you must use natural
// alignment to shift it into 16 bits. If you set a memory_map function, you
// must also set a memory_unmap function or else the default implementation will
// be used for both. This function must be thread safe, it can be called by
// multiple threads simultaneously.
void *(*memory_map)(size_t size, size_t *offset);
//! Unmap the memory pages starting at address and spanning the given number of bytes.
// If release is set to non-zero, the unmap is for an entire span range as returned by
// a previous call to memory_map and that the entire range should be released. The
// release argument holds the size of the entire span range. If release is set to 0,
// the unmap is a partial decommit of a subset of the mapped memory range.
// If you set a memory_unmap function, you must also set a memory_map function or
// else the default implementation will be used for both. This function must be thread
// safe, it can be called by multiple threads simultaneously.
void (*memory_unmap)(void *address, size_t size, size_t offset, size_t release);
//! Called when an assert fails, if asserts are enabled. Will use the standard assert()
// if this is not set.
void (*error_callback)(const char *message);
//! Called when a call to map memory pages fails (out of memory). If this callback is
// not set or returns zero the library will return a null pointer in the allocation
// call. If this callback returns non-zero the map call will be retried. The argument
// passed is the number of bytes that was requested in the map call. Only used if
// the default system memory map function is used (memory_map callback is not set).
int (*map_fail_callback)(size_t size);
//! Size of memory pages. The page size MUST be a power of two. All memory mapping
// requests to memory_map will be made with size set to a multiple of the page size.
// Used if RPMALLOC_CONFIGURABLE is defined to 1, otherwise system page size is used.
size_t page_size;
//! Size of a span of memory blocks. MUST be a power of two, and in [4096,262144]
// range (unless 0 - set to 0 to use the default span size). Used if RPMALLOC_CONFIGURABLE
// is defined to 1.
size_t span_size;
//! Number of spans to map at each request to map new virtual memory blocks. This can
// be used to minimize the system call overhead at the cost of virtual memory address
// space. The extra mapped pages will not be written until actually used, so physical
// committed memory should not be affected in the default implementation. Will be
// aligned to a multiple of spans that match memory page size in case of huge pages.
size_t span_map_count;
//! Enable use of large/huge pages. If this flag is set to non-zero and page size is
// zero, the allocator will try to enable huge pages and auto detect the configuration.
// If this is set to non-zero and page_size is also non-zero, the allocator will
// assume huge pages have been configured and enabled prior to initializing the
// allocator.
// For Windows, see https://docs.microsoft.com/en-us/windows/desktop/memory/large-page-support
// For Linux, see https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt
int enable_huge_pages;
//! Respectively allocated pages and huge allocated pages names for systems
// supporting it to be able to distinguish among anonymous regions.
const char *page_name;
const char *huge_page_name;
} rpmalloc_config_t;
//! Initialize allocator with default configuration
RPMALLOC_EXPORT int
rpmalloc_initialize(void);
//! Initialize allocator with given configuration
RPMALLOC_EXPORT int
rpmalloc_initialize_config(const rpmalloc_config_t *config);
//! Get allocator configuration
RPMALLOC_EXPORT const rpmalloc_config_t *
rpmalloc_config(void);
//! Finalize allocator
RPMALLOC_EXPORT void
rpmalloc_finalize(void);
//! Initialize allocator for calling thread
RPMALLOC_EXPORT void
rpmalloc_thread_initialize(void);
//! Finalize allocator for calling thread
RPMALLOC_EXPORT void
rpmalloc_thread_finalize(int release_caches);
//! Perform deferred deallocations pending for the calling thread heap
RPMALLOC_EXPORT void
rpmalloc_thread_collect(void);
//! Query if allocator is initialized for calling thread
RPMALLOC_EXPORT int
rpmalloc_is_thread_initialized(void);
//! Get per-thread statistics
RPMALLOC_EXPORT void
rpmalloc_thread_statistics(rpmalloc_thread_statistics_t *stats);
//! Get global statistics
RPMALLOC_EXPORT void
rpmalloc_global_statistics(rpmalloc_global_statistics_t *stats);
//! Dump all statistics in human readable format to file (should be a FILE*)
RPMALLOC_EXPORT void
rpmalloc_dump_statistics(void *file);
//! Allocate a memory block of at least the given size
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
rpmalloc(size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(1);
//! Free the given memory block
RPMALLOC_EXPORT void
rpfree(void *ptr);
//! Allocate a memory block of at least the given size and zero initialize it
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
rpcalloc(size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE2(1, 2);
//! Reallocate the given block to at least the given size
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
rprealloc(void *ptr, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
//! Reallocate the given block to at least the given size and alignment,
// with optional control flags (see RPMALLOC_NO_PRESERVE).
// Alignment must be a power of two and a multiple of sizeof(void*),
// and should ideally be less than memory page size. A caveat of rpmalloc
// internals is that this must also be strictly less than the span size (default 64KiB)
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
rpaligned_realloc(void *ptr, size_t alignment, size_t size, size_t oldsize, unsigned int flags) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(3);
//! Allocate a memory block of at least the given size and alignment.
// Alignment must be a power of two and a multiple of sizeof(void*),
// and should ideally be less than memory page size. A caveat of rpmalloc
// internals is that this must also be strictly less than the span size (default 64KiB)
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
rpaligned_alloc(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
//! Allocate a memory block of at least the given size and alignment, and zero initialize it.
// Alignment must be a power of two and a multiple of sizeof(void*),
// and should ideally be less than memory page size. A caveat of rpmalloc
// internals is that this must also be strictly less than the span size (default 64KiB)
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
rpaligned_calloc(size_t alignment, size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3);
//! Allocate a memory block of at least the given size and alignment.
// Alignment must be a power of two and a multiple of sizeof(void*),
// and should ideally be less than memory page size. A caveat of rpmalloc
// internals is that this must also be strictly less than the span size (default 64KiB)
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
rpmemalign(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
//! Allocate a memory block of at least the given size and alignment.
// Alignment must be a power of two and a multiple of sizeof(void*),
// and should ideally be less than memory page size. A caveat of rpmalloc
// internals is that this must also be strictly less than the span size (default 64KiB)
RPMALLOC_EXPORT int
rpposix_memalign(void **memptr, size_t alignment, size_t size);
//! Query the usable size of the given memory block (from given pointer to the end of block)
RPMALLOC_EXPORT size_t
rpmalloc_usable_size(void *ptr);
#if RPMALLOC_FIRST_CLASS_HEAPS
//! Heap type
typedef struct heap_t rpmalloc_heap_t;
//! Acquire a new heap. Will reuse existing released heaps or allocate memory for a new heap
// if none available. Heap API is implemented with the strict assumption that only one single
// thread will call heap functions for a given heap at any given time, no functions are thread safe.
RPMALLOC_EXPORT rpmalloc_heap_t *
rpmalloc_heap_acquire(void);
//! Release a heap (does NOT free the memory allocated by the heap, use rpmalloc_heap_free_all before destroying the heap).
// Releasing a heap will enable it to be reused by other threads. Safe to pass a null pointer.
RPMALLOC_EXPORT void
rpmalloc_heap_release(rpmalloc_heap_t *heap);
//! Allocate a memory block of at least the given size using the given heap.
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
rpmalloc_heap_alloc(rpmalloc_heap_t *heap, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
//! Allocate a memory block of at least the given size using the given heap. The returned
// block will have the requested alignment. Alignment must be a power of two and a multiple of sizeof(void*),
// and should ideally be less than memory page size. A caveat of rpmalloc
// internals is that this must also be strictly less than the span size (default 64KiB).
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
rpmalloc_heap_aligned_alloc(rpmalloc_heap_t *heap, size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(3);
//! Allocate a memory block of at least the given size using the given heap and zero initialize it.
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
rpmalloc_heap_calloc(rpmalloc_heap_t *heap, size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3);
//! Allocate a memory block of at least the given size using the given heap and zero initialize it. The returned
// block will have the requested alignment. Alignment must either be zero, or a power of two and a multiple of sizeof(void*),
// and should ideally be less than memory page size. A caveat of rpmalloc
// internals is that this must also be strictly less than the span size (default 64KiB).
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
rpmalloc_heap_aligned_calloc(rpmalloc_heap_t *heap, size_t alignment, size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3);
//! Reallocate the given block to at least the given size. The memory block MUST be allocated
// by the same heap given to this function.
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
rpmalloc_heap_realloc(rpmalloc_heap_t *heap, void *ptr, size_t size, unsigned int flags) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(3);
//! Reallocate the given block to at least the given size. The memory block MUST be allocated
// by the same heap given to this function. The returned block will have the requested alignment.
// Alignment must be either zero, or a power of two and a multiple of sizeof(void*), and should ideally be
// less than memory page size. A caveat of rpmalloc internals is that this must also be strictly less than
// the span size (default 64KiB).
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
rpmalloc_heap_aligned_realloc(rpmalloc_heap_t *heap, void *ptr, size_t alignment, size_t size, unsigned int flags) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(4);
//! Free the given memory block from the given heap. The memory block MUST be allocated
// by the same heap given to this function.
RPMALLOC_EXPORT void
rpmalloc_heap_free(rpmalloc_heap_t *heap, void *ptr);
//! Free all memory allocated by the heap
RPMALLOC_EXPORT void
rpmalloc_heap_free_all(rpmalloc_heap_t *heap);
//! Set the given heap as the current heap for the calling thread. A heap MUST only be current heap
// for a single thread, a heap can never be shared between multiple threads. The previous
// current heap for the calling thread is released to be reused by other threads.
RPMALLOC_EXPORT void
rpmalloc_heap_thread_set_current(rpmalloc_heap_t *heap);
#endif
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,91 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory/macro.hpp>
#include <sys/mman.h>
#include <memory.hpp>
#include <assert.h>
#include <unistd.h>
// #include "rpmalloc.c"
#include "../../../../kernel.h"
struct heap_t
{
char pad[56408];
};
static heap_t *__rpmalloc_tls_heap = nullptr;
EXTERNC heap_t **__memory_thread_heap(void)
{
if (unlikely(!TaskManager || !thisThread))
{
if (unlikely(!__rpmalloc_tls_heap))
{
__rpmalloc_tls_heap = (heap_t *)KernelAllocator.RequestPages(TO_PAGES(sizeof(heap_t)));
debug("rpmalloc TLS heap: %#lx", __rpmalloc_tls_heap);
memset(__rpmalloc_tls_heap, 0, sizeof(heap_t));
assert(__rpmalloc_tls_heap);
}
return &__rpmalloc_tls_heap;
}
return &__rpmalloc_tls_heap;
heap_t *heap = (heap_t *)thisThread->TLS.pBase;
return (heap_t **)heap;
}
EXTERNC uintptr_t __get_tid(void)
{
if (unlikely(!TaskManager || !thisThread))
return (uintptr_t)-1;
return thisThread->ID;
}
EXTERNC long __rpmalloc_sysconf(int name)
{
switch (name)
{
case _SC_PAGESIZE:
return PAGE_SIZE;
default:
return -1;
}
}
EXTERNC void *__rpmalloc_mmap(void *addr, size_t length, int, int, int fd, off_t offset)
{
assert(addr == 0 && fd == -1 && offset == 0);
void *ptr = KernelAllocator.RequestPages(TO_PAGES(length));
debug("Requested %d pages, got %p", TO_PAGES(length), ptr);
if (ptr == nullptr)
return MAP_FAILED;
return ptr;
}
EXTERNC int __rpmalloc_munmap(void *addr, size_t length)
{
KernelAllocator.FreePages(addr, TO_PAGES(length));
debug("Freed %d pages at %p", TO_PAGES(length), addr);
return 0;
}
EXTERNC int __rpmalloc_posix_madvise(void *addr, size_t length, int advice)
{
func("%#lx %d %d", addr, length, advice);
return 0;
}

View File

@ -0,0 +1,75 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
#include "../../kernel.h"
namespace Memory
{
KernelStackManager::StackAllocation KernelStackManager::DetailedAllocate(size_t Size)
{
SmartLock(StackLock);
Size += 0x10;
size_t pagesNeeded = TO_PAGES(Size);
size_t stackSize = pagesNeeded * PAGE_SIZE;
assert((CurrentStackTop - stackSize) > KERNEL_STACK_BASE);
void *physicalMemory = KernelAllocator.RequestPages(pagesNeeded);
void *virtualAddress = (void *)(CurrentStackTop - stackSize);
Memory::Virtual vmm(KernelPageTable);
vmm.Map(virtualAddress, physicalMemory, stackSize, Memory::RW | Memory::G);
AllocatedStacks.push_back({physicalMemory, virtualAddress, stackSize});
CurrentStackTop -= stackSize;
TotalSize += stackSize;
return {physicalMemory, virtualAddress, stackSize};
}
void *KernelStackManager::Allocate(size_t Size)
{
return this->DetailedAllocate(Size).VirtualAddress;
}
void KernelStackManager::Free(void *Address)
{
SmartLock(StackLock);
auto it = std::find_if(AllocatedStacks.begin(), AllocatedStacks.end(),
[Address](const StackAllocation &stack)
{
return stack.VirtualAddress == Address;
});
if (it == AllocatedStacks.end())
return;
size_t pagesToFree = TO_PAGES(it->Size);
Memory::Virtual vmm(KernelPageTable);
vmm.Unmap(Address, it->Size);
KernelAllocator.FreePages(it->PhysicalAddress, pagesToFree);
TotalSize -= it->Size;
AllocatedStacks.erase(it);
}
KernelStackManager::KernelStackManager() {}
KernelStackManager::~KernelStackManager() {}
}

View File

@ -0,0 +1,616 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
#include <convert.h>
#include <lock.hpp>
#include <debug.h>
#ifdef DEBUG
#include <uart.hpp>
#endif
#include "heap_allocators/Xalloc/Xalloc.hpp"
#include "heap_allocators/liballoc_1_1/liballoc_1_1.h"
#include "heap_allocators/rpmalloc/rpmalloc.h"
#include "../../kernel.h"
// #define DEBUG_ALLOCATIONS 1
#ifdef DEBUG_ALLOCATIONS
#define memdbg(m, ...) \
debug(m, ##__VA_ARGS__); \
__sync
#else
#define memdbg(m, ...)
#endif
using namespace Memory;
Physical KernelAllocator;
Memory::KernelStackManager StackManager;
PageTable *KernelPageTable = nullptr;
bool Page1GBSupport = false;
bool PSESupport = false;
MemoryAllocatorType AllocatorType = MemoryAllocatorType::Pages;
Xalloc::V1 *XallocV1Allocator = nullptr;
Xalloc::V2 *XallocV2Allocator = nullptr;
#ifdef DEBUG
NIF void tracepagetable(PageTable *pt)
{
for (int i = 0; i < 512; i++)
{
#if defined(a64)
if (pt->Entries[i].Present)
debug("Entry %03d: %x %x %x %x %x %x %x %p-%#llx", i,
pt->Entries[i].Present, pt->Entries[i].ReadWrite,
pt->Entries[i].UserSupervisor, pt->Entries[i].WriteThrough,
pt->Entries[i].CacheDisable, pt->Entries[i].Accessed,
pt->Entries[i].ExecuteDisable, pt->Entries[i].Address << 12,
pt->Entries[i]);
#elif defined(a32)
#elif defined(aa64)
#endif
}
}
#endif
NIF void MapEntries(PageTable *PT)
{
debug("mapping %d memory entries", bInfo.Memory.Entries);
Virtual vmm = Virtual(PT);
for (uint64_t i = 0; i < bInfo.Memory.Entries; i++)
{
uintptr_t Base = r_cst(uintptr_t, bInfo.Memory.Entry[i].BaseAddress);
size_t Length = bInfo.Memory.Entry[i].Length;
debug("mapping %#lx-%#lx", Base, Base + Length);
vmm.Map((void *)Base, (void *)Base, Length, RW);
}
/* Make sure 0x0 is unmapped (so we PF when nullptr is accessed) */
vmm.Unmap((void *)0);
}
NIF void MapFramebuffer(PageTable *PT)
{
debug("Mapping Framebuffer");
Virtual vmm = Virtual(PT);
int itrfb = 0;
while (1)
{
if (!bInfo.Framebuffer[itrfb].BaseAddress)
break;
size_t fbSize = bInfo.Framebuffer[itrfb].Pitch * bInfo.Framebuffer[itrfb].Height;
fbSize = ALIGN_UP(fbSize, PAGE_SIZE);
#ifdef DEBUG
if (DebuggerIsAttached)
fbSize += 16 * PAGE_SIZE;
#endif
if (PSESupport && Page1GBSupport)
{
vmm.OptimizedMap(bInfo.Framebuffer[itrfb].BaseAddress,
bInfo.Framebuffer[itrfb].BaseAddress,
fbSize, RW | G | KRsv);
}
else
{
vmm.Map(bInfo.Framebuffer[itrfb].BaseAddress,
bInfo.Framebuffer[itrfb].BaseAddress,
fbSize, RW | G | KRsv);
}
itrfb++;
}
}
NIF void MapKernel(PageTable *PT)
{
debug("Mapping Kernel");
/* RWX */
uintptr_t BootstrapStart = (uintptr_t)&_bootstrap_start;
uintptr_t BootstrapEnd = (uintptr_t)&_bootstrap_end;
/* RX */
uintptr_t KernelTextStart = (uintptr_t)&_kernel_text_start;
uintptr_t KernelTextEnd = (uintptr_t)&_kernel_text_end;
/* RW */
uintptr_t KernelDataStart = (uintptr_t)&_kernel_data_start;
uintptr_t KernelDataEnd = (uintptr_t)&_kernel_data_end;
/* R */
uintptr_t KernelRoDataStart = (uintptr_t)&_kernel_rodata_start;
uintptr_t KernelRoDataEnd = (uintptr_t)&_kernel_rodata_end;
/* RW */
uintptr_t KernelBssStart = (uintptr_t)&_kernel_bss_start;
uintptr_t KernelBssEnd = (uintptr_t)&_kernel_bss_end;
#ifdef DEBUG
uintptr_t KernelStart = (uintptr_t)&_kernel_start;
uintptr_t KernelEnd = (uintptr_t)&_kernel_end;
#endif
uintptr_t KernelFileStart = (uintptr_t)bInfo.Kernel.FileBase;
uintptr_t KernelFileEnd = KernelFileStart + bInfo.Kernel.Size;
debug("Bootstrap: %#lx-%#lx", BootstrapStart, BootstrapEnd);
debug("Kernel text: %#lx-%#lx", KernelTextStart, KernelTextEnd);
debug("Kernel data: %#lx-%#lx", KernelDataStart, KernelDataEnd);
debug("Kernel rodata: %#lx-%#lx", KernelRoDataStart, KernelRoDataEnd);
debug("Kernel bss: %#lx-%#lx", KernelBssStart, KernelBssEnd);
debug("Kernel: %#lx-%#lx", KernelStart, KernelEnd);
debug("Kernel file: %#lx-%#lx", KernelFileStart, KernelFileEnd);
debug("File size: %ld KiB", TO_KiB(bInfo.Kernel.Size));
debug(".bootstrap size: %ld KiB", TO_KiB(BootstrapEnd - BootstrapStart));
debug(".text size: %ld KiB", TO_KiB(KernelTextEnd - KernelTextStart));
debug(".data size: %ld KiB", TO_KiB(KernelDataEnd - KernelDataStart));
debug(".rodata size: %ld KiB", TO_KiB(KernelRoDataEnd - KernelRoDataStart));
debug(".bss size: %ld KiB", TO_KiB(KernelBssEnd - KernelBssStart));
uintptr_t BaseKernelMapAddress = (uintptr_t)bInfo.Kernel.PhysicalBase;
debug("Base kernel map address: %#lx", BaseKernelMapAddress);
uintptr_t k;
Virtual vmm = Virtual(PT);
/* Bootstrap section */
if (BaseKernelMapAddress == BootstrapStart)
{
for (k = BootstrapStart; k < BootstrapEnd; k += PAGE_SIZE)
{
vmm.Map((void *)k, (void *)BaseKernelMapAddress, RW | G | KRsv);
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
}
else
{
trace("Ignoring bootstrap section.");
/* Bootstrap section must be mapped at 0x100000. */
}
/* Text section */
for (k = KernelTextStart; k < KernelTextEnd; k += PAGE_SIZE)
{
vmm.Map((void *)k, (void *)BaseKernelMapAddress, RW | G | KRsv);
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
/* Data section */
for (k = KernelDataStart; k < KernelDataEnd; k += PAGE_SIZE)
{
vmm.Map((void *)k, (void *)BaseKernelMapAddress, RW | G | KRsv);
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
/* Read only data section */
for (k = KernelRoDataStart; k < KernelRoDataEnd; k += PAGE_SIZE)
{
vmm.Map((void *)k, (void *)BaseKernelMapAddress, G | KRsv);
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
/* Block starting symbol section */
for (k = KernelBssStart; k < KernelBssEnd; k += PAGE_SIZE)
{
vmm.Map((void *)k, (void *)BaseKernelMapAddress, RW | G | KRsv);
KernelAllocator.ReservePage((void *)BaseKernelMapAddress);
BaseKernelMapAddress += PAGE_SIZE;
}
debug("Base kernel map address: %#lx", BaseKernelMapAddress);
/* Kernel file */
if (KernelFileStart != 0)
{
for (k = KernelFileStart; k < KernelFileEnd; k += PAGE_SIZE)
{
vmm.Map((void *)k, (void *)k, G | KRsv);
KernelAllocator.ReservePage((void *)k);
}
}
else
info("Cannot determine kernel file address. Ignoring.");
}
NIF void CreatePageTable(PageTable *pt)
{
static int check_cpuid = 0;
if (!check_cpuid++)
{
if (strcmp(CPU::Vendor(), x86_CPUID_VENDOR_AMD) == 0)
{
CPU::x86::AMD::CPUID0x80000001 cpuid;
PSESupport = cpuid.EDX.PSE;
Page1GBSupport = cpuid.EDX.Page1GB;
}
else if (strcmp(CPU::Vendor(), x86_CPUID_VENDOR_INTEL) == 0)
{
CPU::x86::Intel::CPUID0x00000001 cpuid;
PSESupport = cpuid.EDX.PSE;
}
if (PSESupport)
{
#if defined(a64)
CPU::x64::CR4 cr4 = CPU::x64::readcr4();
cr4.PSE = 1;
CPU::x64::writecr4(cr4);
#elif defined(a32)
CPU::x32::CR4 cr4 = CPU::x32::readcr4();
cr4.PSE = 1;
CPU::x32::writecr4(cr4);
#elif defined(aa64)
#endif
trace("PSE Support Enabled");
}
#ifdef DEBUG
if (Page1GBSupport)
debug("1GB Page Support Enabled");
#endif
}
/* TODO: Map faster */
MapEntries(pt);
MapFramebuffer(pt);
MapKernel(pt);
#ifdef DEBUG
tracepagetable(pt);
#endif
}
NIF void InitializeMemoryManagement()
{
#ifdef DEBUG
#ifndef a32
for (uint64_t i = 0; i < bInfo.Memory.Entries; i++)
{
uintptr_t Base = r_cst(uintptr_t, bInfo.Memory.Entry[i].BaseAddress);
size_t Length = bInfo.Memory.Entry[i].Length;
uintptr_t End = Base + Length;
const char *Type = "Unknown";
switch (bInfo.Memory.Entry[i].Type)
{
case likely(Usable):
Type = "Usable";
break;
case Reserved:
Type = "Reserved";
break;
case ACPIReclaimable:
Type = "ACPI Reclaimable";
break;
case ACPINVS:
Type = "ACPI NVS";
break;
case BadMemory:
Type = "Bad Memory";
break;
case BootloaderReclaimable:
Type = "Bootloader Reclaimable";
break;
case KernelAndModules:
Type = "Kernel and Modules";
break;
case Framebuffer:
Type = "Framebuffer";
break;
default:
break;
}
debug("%02ld: %p-%p %s",
i,
Base,
End,
Type);
}
#endif // a32
#endif // DEBUG
trace("Initializing Physical Memory Manager");
// KernelAllocator = Physical(); <- Already called in the constructor
KernelAllocator.Init();
debug("Memory Info:\n\n%lld MiB / %lld MiB (%lld MiB reserved)\n",
TO_MiB(KernelAllocator.GetUsedMemory()),
TO_MiB(KernelAllocator.GetTotalMemory()),
TO_MiB(KernelAllocator.GetReservedMemory()));
/* -- Debugging --
size_t bmap_size = KernelAllocator.GetPageBitmap().Size;
for (size_t i = 0; i < bmap_size; i++)
{
bool idx = KernelAllocator.GetPageBitmap().Get(i);
if (idx == true)
debug("Page %04d: %#lx", i, i * PAGE_SIZE);
}
inf_loop debug("Alloc.: %#lx", KernelAllocator.RequestPage());
*/
trace("Initializing Virtual Memory Manager");
KernelPageTable = (PageTable *)KernelAllocator.RequestPages(TO_PAGES(PAGE_SIZE + 1));
memset(KernelPageTable, 0, PAGE_SIZE);
CreatePageTable(KernelPageTable);
trace("Applying new page table from address %#lx",
KernelPageTable);
CPU::PageTable(KernelPageTable);
debug("Page table updated.");
/* FIXME: Read kernel params */
AllocatorType = Config.AllocatorType;
switch (AllocatorType)
{
case MemoryAllocatorType::Pages:
break;
case MemoryAllocatorType::XallocV1:
{
XallocV1Allocator = new Xalloc::V1((void *)nullptr, false, false);
trace("XallocV1 Allocator initialized at %#lx", XallocV1Allocator);
break;
}
case MemoryAllocatorType::XallocV2:
{
XallocV2Allocator = new Xalloc::V2((void *)nullptr);
trace("XallocV2 Allocator initialized at %#lx", XallocV2Allocator);
break;
}
case MemoryAllocatorType::liballoc11:
break;
case MemoryAllocatorType::rpmalloc_:
{
trace("Using rpmalloc allocator");
rpmalloc_initialize();
break;
rpmalloc_config_t config = {
.memory_map = nullptr,
.memory_unmap = nullptr,
.error_callback = nullptr,
.map_fail_callback = nullptr,
.page_size = PAGE_SIZE,
.span_size = 4 * 1024, /* 4 KiB */
.span_map_count = 1,
.enable_huge_pages = 0,
.page_name = nullptr,
.huge_page_name = nullptr};
rpmalloc_initialize_config(&config);
break;
}
default:
{
error("Unknown allocator type %d", AllocatorType);
CPU::Stop();
}
}
}
void *malloc(size_t Size)
{
if (Size == 0)
{
warn("Attempt to allocate 0 bytes");
Size = 16;
}
memdbg("malloc(%d)->[%s]", Size,
KernelSymbolTable ? KernelSymbolTable->GetSymbol((uintptr_t)__builtin_return_address(0))
: "Unknown");
void *ret = nullptr;
switch (AllocatorType)
{
case MemoryAllocatorType::Pages:
{
ret = KernelAllocator.RequestPages(TO_PAGES(Size + 1));
break;
}
case MemoryAllocatorType::XallocV1:
{
ret = XallocV1Allocator->malloc(Size);
break;
}
case MemoryAllocatorType::XallocV2:
{
ret = XallocV2Allocator->malloc(Size);
break;
}
case MemoryAllocatorType::liballoc11:
{
ret = PREFIX(malloc)(Size);
break;
}
case MemoryAllocatorType::rpmalloc_:
{
ret = rpmalloc(Size);
break;
}
default:
{
error("Unknown allocator type %d", AllocatorType);
CPU::Stop();
}
}
memset(ret, 0, Size);
return ret;
}
void *calloc(size_t n, size_t Size)
{
if (Size == 0)
{
warn("Attempt to allocate 0 bytes");
Size = 16;
}
memdbg("calloc(%d, %d)->[%s]", n, Size,
KernelSymbolTable ? KernelSymbolTable->GetSymbol((uintptr_t)__builtin_return_address(0))
: "Unknown");
void *ret = nullptr;
switch (AllocatorType)
{
case MemoryAllocatorType::Pages:
{
ret = KernelAllocator.RequestPages(TO_PAGES(n * Size + 1));
break;
}
case MemoryAllocatorType::XallocV1:
{
ret = XallocV1Allocator->calloc(n, Size);
break;
}
case MemoryAllocatorType::XallocV2:
{
ret = XallocV2Allocator->calloc(n, Size);
break;
}
case MemoryAllocatorType::liballoc11:
{
void *ret = PREFIX(calloc)(n, Size);
return ret;
}
case MemoryAllocatorType::rpmalloc_:
{
ret = rpcalloc(n, Size);
break;
}
default:
{
error("Unknown allocator type %d", AllocatorType);
CPU::Stop();
}
}
memset(ret, 0, n * Size);
return ret;
}
void *realloc(void *Address, size_t Size)
{
if (Size == 0)
{
warn("Attempt to allocate 0 bytes");
Size = 16;
}
memdbg("realloc(%#lx, %d)->[%s]", Address, Size,
KernelSymbolTable ? KernelSymbolTable->GetSymbol((uintptr_t)__builtin_return_address(0))
: "Unknown");
void *ret = nullptr;
switch (AllocatorType)
{
case unlikely(MemoryAllocatorType::Pages):
{
ret = KernelAllocator.RequestPages(TO_PAGES(Size + 1)); // WARNING: Potential memory leak
break;
}
case MemoryAllocatorType::XallocV1:
{
ret = XallocV1Allocator->realloc(Address, Size);
break;
}
case MemoryAllocatorType::XallocV2:
{
ret = XallocV2Allocator->realloc(Address, Size);
break;
}
case MemoryAllocatorType::liballoc11:
{
void *ret = PREFIX(realloc)(Address, Size);
return ret;
}
case MemoryAllocatorType::rpmalloc_:
{
ret = rprealloc(Address, Size);
break;
}
default:
{
error("Unknown allocator type %d", AllocatorType);
CPU::Stop();
}
}
memset(ret, 0, Size);
return ret;
}
void free(void *Address)
{
if (Address == nullptr)
{
warn("Attempt to free a null pointer");
return;
}
memdbg("free(%#lx)->[%s]", Address,
KernelSymbolTable ? KernelSymbolTable->GetSymbol((uintptr_t)__builtin_return_address(0))
: "Unknown");
switch (AllocatorType)
{
case unlikely(MemoryAllocatorType::Pages):
{
KernelAllocator.FreePage(Address); // WARNING: Potential memory leak
break;
}
case MemoryAllocatorType::XallocV1:
{
XallocV1Allocator->free(Address);
break;
}
case MemoryAllocatorType::XallocV2:
{
XallocV2Allocator->free(Address);
break;
}
case MemoryAllocatorType::liballoc11:
{
PREFIX(free)
(Address);
break;
}
case MemoryAllocatorType::rpmalloc_:
{
rpfree(Address);
break;
}
default:
{
error("Unknown allocator type %d", AllocatorType);
CPU::Stop();
}
}
}

View File

@ -0,0 +1,90 @@
#include <memory.hpp>
#include <filesystem.hpp>
#include <signal.hpp>
#include <utsname.h>
#include <time.h>
namespace Memory
{
void PageTable::Update()
{
#if defined(a86)
asmv("mov %0, %%cr3" ::"r"(this));
#elif defined(aa64)
asmv("msr ttbr0_el1, %0" ::"r"(this));
#endif
}
PageTable *PageTable::Fork()
{
PageTable *NewTable = (PageTable *)KernelAllocator.RequestPages(TO_PAGES(sizeof(PageTable)));
// memset(NewTable, 0, sizeof(PageTable));
// CreatePageTable(NewTable);
memcpy(NewTable, this, sizeof(PageTable));
debug("Forking page table %#lx to %#lx", this, NewTable);
#if defined(a64)
for (size_t i = 0; i < sizeof(Entries) / sizeof(Entries[0]); i++)
{
PageMapLevel4 *PML4 = &Entries[i];
PageMapLevel4 *NewPML4 = &NewTable->Entries[i];
if (!PML4->Present)
continue;
PageDirectoryPointerTableEntryPtr *ptrPDPT = (PageDirectoryPointerTableEntryPtr *)(PML4->GetAddress() << 12);
PageDirectoryPointerTableEntryPtr *ptrNewPDPT = (PageDirectoryPointerTableEntryPtr *)KernelAllocator.RequestPage();
NewPML4->SetAddress((uintptr_t)ptrNewPDPT >> 12);
for (size_t j = 0; j < sizeof(ptrPDPT->Entries) / sizeof(ptrPDPT->Entries[0]); j++)
{
PageDirectoryPointerTableEntry *PDPT = &ptrPDPT->Entries[j];
PageDirectoryPointerTableEntry *NewPDPT = &ptrNewPDPT->Entries[j];
*NewPDPT = *PDPT;
if (!PDPT->Present)
continue;
if (PDPT->PageSize)
continue;
PageDirectoryEntryPtr *ptrPDE = (PageDirectoryEntryPtr *)(PDPT->GetAddress() << 12);
PageDirectoryEntryPtr *ptrNewPDE = (PageDirectoryEntryPtr *)KernelAllocator.RequestPage();
NewPDPT->SetAddress((uintptr_t)ptrNewPDE >> 12);
for (size_t k = 0; k < sizeof(ptrPDE->Entries) / sizeof(ptrPDE->Entries[0]); k++)
{
PageDirectoryEntry *PDE = &ptrPDE->Entries[k];
PageDirectoryEntry *NewPDE = &ptrNewPDE->Entries[k];
*NewPDE = *PDE;
if (!PDE->Present)
continue;
if (PDE->PageSize)
continue;
PageTableEntryPtr *ptrPTE = (PageTableEntryPtr *)(PDE->GetAddress() << 12);
PageTableEntryPtr *ptrNewPTE = (PageTableEntryPtr *)KernelAllocator.RequestPage();
NewPDE->SetAddress((uintptr_t)ptrNewPTE >> 12);
for (size_t l = 0; l < sizeof(ptrPTE->Entries) / sizeof(ptrPTE->Entries[0]); l++)
{
PageTableEntry *PTE = &ptrPTE->Entries[l];
PageTableEntry *NewPTE = &ptrNewPTE->Entries[l];
*NewPTE = *PTE;
}
}
}
}
#else
#error "PageTable::Fork() not implemented for other architectures"
#endif
debug("Forked page table %#lx to %#lx", this, NewTable);
return NewTable;
}
/* We can't have Memory::Virtual in the header */
void *PageTable::__getPhysical(void *Address)
{
Virtual vmm(this);
void *PhysAddr = vmm.GetPhysical((void *)Address);
return PhysAddr;
}
}

View File

@ -0,0 +1,54 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
namespace Memory
{
Virtual::PageMapIndexer::PageMapIndexer(uintptr_t VirtualAddress)
{
uintptr_t Address = VirtualAddress;
#if defined(a64)
Address >>= 12;
this->PTEIndex = Address & 0x1FF;
Address >>= 9;
this->PDEIndex = Address & 0x1FF;
Address >>= 9;
this->PDPTEIndex = Address & 0x1FF;
Address >>= 9;
this->PMLIndex = Address & 0x1FF;
#elif defined(a32)
Address >>= 12;
this->PTEIndex = Address & 0x3FF;
Address >>= 10;
this->PDEIndex = Address & 0x3FF;
#elif defined(aa64)
#endif
if (VirtualAddress > PAGE_SIZE)
{
assert(
this->PTEIndex != 0 ||
this->PDEIndex != 0
#if defined(a64)
|| this->PDPTEIndex != 0 ||
this->PMLIndex != 0
#endif
);
}
}
}

356
Kernel/core/memory/pmm.cpp Normal file
View File

@ -0,0 +1,356 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
#include <acpi.hpp>
#include <debug.h>
#include <elf.h>
#ifdef DEBUG
#include <uart.hpp>
#endif
#include "../../kernel.h"
namespace Memory
{
uint64_t Physical::GetTotalMemory()
{
return this->TotalMemory.load();
}
uint64_t Physical::GetFreeMemory()
{
return this->FreeMemory.load();
}
uint64_t Physical::GetReservedMemory()
{
return this->ReservedMemory.load();
}
uint64_t Physical::GetUsedMemory()
{
return this->UsedMemory.load();
}
bool Physical::SwapPage(void *Address)
{
fixme("%p", Address);
return false;
}
bool Physical::SwapPages(void *Address, size_t PageCount)
{
for (size_t i = 0; i < PageCount; i++)
{
if (!this->SwapPage((void *)((uintptr_t)Address + (i * PAGE_SIZE))))
return false;
}
return false;
}
bool Physical::UnswapPage(void *Address)
{
fixme("%p", Address);
return false;
}
bool Physical::UnswapPages(void *Address, size_t PageCount)
{
for (size_t i = 0; i < PageCount; i++)
{
if (!this->UnswapPage((void *)((uintptr_t)Address + (i * PAGE_SIZE))))
return false;
}
return false;
}
void *Physical::RequestPage()
{
SmartLock(this->MemoryLock);
for (; PageBitmapIndex < PageBitmap.Size * 8; PageBitmapIndex++)
{
if (PageBitmap[PageBitmapIndex] == true)
continue;
this->LockPage((void *)(PageBitmapIndex * PAGE_SIZE));
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
if (this->SwapPage((void *)(PageBitmapIndex * PAGE_SIZE)))
{
this->LockPage((void *)(PageBitmapIndex * PAGE_SIZE));
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
if (TaskManager && !TaskManager->IsPanic())
{
error("Out of memory! Killing current process...");
TaskManager->KillProcess(thisProcess, Tasking::KILL_OOM);
TaskManager->Yield();
}
error("Out of memory! (Free: %ld MiB; Used: %ld MiB; Reserved: %ld MiB)",
TO_MiB(FreeMemory.load()), TO_MiB(UsedMemory.load()), TO_MiB(ReservedMemory.load()));
KPrint("Out of memory! (Free: %ld MiB; Used: %ld MiB; Reserved: %ld MiB)",
TO_MiB(FreeMemory.load()), TO_MiB(UsedMemory.load()), TO_MiB(ReservedMemory.load()));
debug("Raw values: free %#lx used %#lx reserved %#lx",
FreeMemory.load(), UsedMemory.load(), ReservedMemory.load());
CPU::Stop();
__builtin_unreachable();
}
void *Physical::RequestPages(size_t Count)
{
SmartLock(this->MemoryLock);
for (; PageBitmapIndex < PageBitmap.Size * 8; PageBitmapIndex++)
{
if (PageBitmap[PageBitmapIndex] == true)
continue;
for (uint64_t Index = PageBitmapIndex; Index < PageBitmap.Size * 8; Index++)
{
if (PageBitmap[Index] == true)
continue;
for (size_t i = 0; i < Count; i++)
{
if (PageBitmap[Index + i] == true)
goto NextPage;
}
this->LockPages((void *)(Index * PAGE_SIZE), Count);
return (void *)(Index * PAGE_SIZE);
NextPage:
Index += Count;
continue;
}
}
if (this->SwapPages((void *)(PageBitmapIndex * PAGE_SIZE), Count))
{
this->LockPages((void *)(PageBitmapIndex * PAGE_SIZE), Count);
return (void *)(PageBitmapIndex * PAGE_SIZE);
}
if (TaskManager && !TaskManager->IsPanic())
{
error("Out of memory! Killing current process...");
TaskManager->KillProcess(thisProcess, Tasking::KILL_OOM);
TaskManager->Yield();
}
error("Out of memory! (Free: %ld MiB; Used: %ld MiB; Reserved: %ld MiB)",
TO_MiB(FreeMemory.load()), TO_MiB(UsedMemory.load()), TO_MiB(ReservedMemory.load()));
KPrint("Out of memory! (Free: %ld MiB; Used: %ld MiB; Reserved: %ld MiB)",
TO_MiB(FreeMemory.load()), TO_MiB(UsedMemory.load()), TO_MiB(ReservedMemory.load()));
debug("Raw values: free %#lx used %#lx reserved %#lx",
FreeMemory.load(), UsedMemory.load(), ReservedMemory.load());
CPU::Halt(true);
__builtin_unreachable();
}
void Physical::FreePage(void *Address)
{
SmartLock(this->MemoryLock);
if (unlikely(Address == nullptr))
{
warn("Null pointer passed to FreePage.");
return;
}
size_t Index = (size_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == false))
{
warn("Tried to free an already free page. (%p)",
Address);
return;
}
if (PageBitmap.Set(Index, false))
{
FreeMemory.fetch_add(PAGE_SIZE);
UsedMemory.fetch_sub(PAGE_SIZE);
if (PageBitmapIndex > Index)
PageBitmapIndex = Index;
}
}
void Physical::FreePages(void *Address, size_t Count)
{
if (unlikely(Address == nullptr || Count == 0))
{
warn("%s%s%s passed to FreePages.", Address == nullptr ? "Null pointer " : "", Address == nullptr && Count == 0 ? "and " : "", Count == 0 ? "Zero count" : "");
return;
}
for (size_t t = 0; t < Count; t++)
this->FreePage((void *)((uintptr_t)Address + (t * PAGE_SIZE)));
}
void Physical::LockPage(void *Address)
{
if (unlikely(Address == nullptr))
warn("Trying to lock null address.");
uintptr_t Index = (uintptr_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == true))
return;
if (PageBitmap.Set(Index, true))
{
FreeMemory.fetch_sub(PAGE_SIZE);
UsedMemory.fetch_add(PAGE_SIZE);
}
}
void Physical::LockPages(void *Address, size_t PageCount)
{
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to lock %s%s.",
Address ? "null address" : "",
PageCount ? "0 pages" : "");
for (size_t i = 0; i < PageCount; i++)
this->LockPage((void *)((uintptr_t)Address + (i * PAGE_SIZE)));
}
void Physical::ReservePage(void *Address)
{
if (unlikely(Address == nullptr))
warn("Trying to reserve null address.");
uintptr_t Index = (Address == NULL) ? 0 : (uintptr_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == true))
return;
if (PageBitmap.Set(Index, true))
{
FreeMemory.fetch_sub(PAGE_SIZE);
ReservedMemory.fetch_add(PAGE_SIZE);
}
}
void Physical::ReservePages(void *Address, size_t PageCount)
{
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to reserve %s%s.",
Address ? "null address" : "",
PageCount ? "0 pages" : "");
for (size_t t = 0; t < PageCount; t++)
{
uintptr_t Index = ((uintptr_t)Address + (t * PAGE_SIZE)) / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == true))
return;
if (PageBitmap.Set(Index, true))
{
FreeMemory.fetch_sub(PAGE_SIZE);
ReservedMemory.fetch_add(PAGE_SIZE);
}
}
}
void Physical::UnreservePage(void *Address)
{
if (unlikely(Address == nullptr))
warn("Trying to unreserve null address.");
uintptr_t Index = (Address == NULL) ? 0 : (uintptr_t)Address / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == false))
return;
if (PageBitmap.Set(Index, false))
{
FreeMemory.fetch_add(PAGE_SIZE);
ReservedMemory.fetch_sub(PAGE_SIZE);
if (PageBitmapIndex > Index)
PageBitmapIndex = Index;
}
}
void Physical::UnreservePages(void *Address, size_t PageCount)
{
if (unlikely(Address == nullptr || PageCount == 0))
warn("Trying to unreserve %s%s.",
Address ? "null address" : "",
PageCount ? "0 pages" : "");
for (size_t t = 0; t < PageCount; t++)
{
uintptr_t Index = ((uintptr_t)Address + (t * PAGE_SIZE)) / PAGE_SIZE;
if (unlikely(PageBitmap[Index] == false))
return;
if (PageBitmap.Set(Index, false))
{
FreeMemory.fetch_add(PAGE_SIZE);
ReservedMemory.fetch_sub(PAGE_SIZE);
if (PageBitmapIndex > Index)
PageBitmapIndex = Index;
}
}
}
void Physical::Init()
{
SmartLock(this->MemoryLock);
uint64_t MemorySize = bInfo.Memory.Size;
debug("Memory size: %lld bytes (%ld pages)",
MemorySize, TO_PAGES(MemorySize));
TotalMemory.store(MemorySize);
FreeMemory.store(MemorySize);
size_t BitmapSize = (size_t)(MemorySize / PAGE_SIZE) / 8 + 1;
uintptr_t BitmapAddress = 0x0;
size_t BitmapAddressSize = 0;
FindBitmapRegion(BitmapAddress, BitmapAddressSize);
if (BitmapAddress == 0x0)
{
error("No free memory found!");
CPU::Stop();
}
debug("Initializing Bitmap at %p-%p (%d Bytes)",
BitmapAddress,
(void *)(BitmapAddress + BitmapSize),
BitmapSize);
PageBitmap.Size = BitmapSize;
PageBitmap.Buffer = (uint8_t *)BitmapAddress;
for (size_t i = 0; i < BitmapSize; i++)
*(uint8_t *)(PageBitmap.Buffer + i) = 0;
ReserveEssentials();
}
Physical::Physical() {}
Physical::~Physical() {}
}

View File

@ -0,0 +1,207 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
#include <acpi.hpp>
#include <debug.h>
#include <elf.h>
#ifdef DEBUG
#include <uart.hpp>
#endif
#include "../../kernel.h"
namespace Memory
{
__no_sanitize("alignment") void Physical::ReserveEssentials()
{
debug("Reserving pages...");
/* The bootloader won't give us the entire mapping, so we
reserve everything and then unreserve the usable pages. */
this->ReservePages(0, TO_PAGES(bInfo.Memory.Size));
debug("Unreserving usable pages...");
for (uint64_t i = 0; i < bInfo.Memory.Entries; i++)
{
if (bInfo.Memory.Entry[i].Type == Usable)
{
if (uintptr_t(bInfo.Memory.Entry[i].BaseAddress) <= 0xFFFFF)
continue;
this->UnreservePages(bInfo.Memory.Entry[i].BaseAddress,
TO_PAGES(bInfo.Memory.Entry[i].Length));
}
}
debug("Reserving 0x0-0xFFFFF range...");
// this->ReservePage((void *)0x0); /* Trampoline stack, gdt, idt, etc... */
// this->ReservePages((void *)0x2000, 4); /* TRAMPOLINE_START */
/* Reserve the lower part of memory. (0x0-0xFFFFF)
This includes: BIOS, EBDA, VGA, SMP, etc...
https://wiki.osdev.org/Memory_Map_(x86)
*/
this->ReservePages((void *)0x0, TO_PAGES(0xFFFFF));
debug("Reserving bitmap region %#lx-%#lx...",
PageBitmap.Buffer,
(void *)((uintptr_t)PageBitmap.Buffer + PageBitmap.Size));
this->ReservePages(PageBitmap.Buffer, TO_PAGES(PageBitmap.Size));
debug("Reserving kernel physical region %#lx-%#lx...",
bInfo.Kernel.PhysicalBase,
(void *)((uintptr_t)bInfo.Kernel.PhysicalBase + bInfo.Kernel.Size));
this->ReservePages(bInfo.Kernel.PhysicalBase, TO_PAGES(bInfo.Kernel.Size));
debug("Reserving kernel file and symbols...");
if (bInfo.Kernel.FileBase)
this->ReservePages(bInfo.Kernel.FileBase, TO_PAGES(bInfo.Kernel.Size));
if (bInfo.Kernel.Symbols.Num &&
bInfo.Kernel.Symbols.EntSize &&
bInfo.Kernel.Symbols.Shndx)
{
char *sections = r_cst(char *, bInfo.Kernel.Symbols.Sections);
debug("Reserving sections region %#lx-%#lx...",
sections,
(void *)((uintptr_t)sections + bInfo.Kernel.Symbols.EntSize *
bInfo.Kernel.Symbols.Num));
this->ReservePages(sections, TO_PAGES(bInfo.Kernel.Symbols.EntSize *
bInfo.Kernel.Symbols.Num));
Elf_Sym *Symbols = nullptr;
uint8_t *StringAddress = nullptr;
#if defined(a64) || defined(aa64)
Elf64_Xword SymbolSize = 0;
Elf64_Xword StringSize = 0;
#elif defined(a32)
Elf32_Word SymbolSize = 0;
Elf32_Word StringSize = 0;
#endif
for (size_t i = 0; i < bInfo.Kernel.Symbols.Num; ++i)
{
Elf_Shdr *sym = (Elf_Shdr *)&sections[bInfo.Kernel.Symbols.EntSize * i];
Elf_Shdr *str = (Elf_Shdr *)&sections[bInfo.Kernel.Symbols.EntSize *
sym->sh_link];
if (sym->sh_type == SHT_SYMTAB &&
str->sh_type == SHT_STRTAB)
{
Symbols = (Elf_Sym *)sym->sh_addr;
StringAddress = (uint8_t *)str->sh_addr;
SymbolSize = (int)sym->sh_size;
StringSize = (int)str->sh_size;
debug("Symbol table found, %d entries (%ld KiB)",
SymbolSize / sym->sh_entsize,
TO_KiB(SymbolSize));
this->ReservePages(Symbols, TO_PAGES(SymbolSize));
break;
}
}
if (Symbols)
{
debug("Reserving symbol table region %#lx-%#lx...",
Symbols, (void *)((uintptr_t)Symbols + SymbolSize));
this->ReservePages(Symbols, TO_PAGES(SymbolSize));
}
if (StringAddress)
{
debug("Reserving string table region %#lx-%#lx...",
StringAddress, (void *)((uintptr_t)StringAddress + StringSize));
this->ReservePages(StringAddress, TO_PAGES(StringSize));
}
}
debug("Reserving kernel modules...");
for (uint64_t i = 0; i < MAX_MODULES; i++)
{
if (bInfo.Modules[i].Address == 0x0)
continue;
debug("Reserving module %s (%#lx-%#lx)...", bInfo.Modules[i].CommandLine,
bInfo.Modules[i].Address,
(void *)((uintptr_t)bInfo.Modules[i].Address + bInfo.Modules[i].Size));
this->ReservePages((void *)bInfo.Modules[i].Address,
TO_PAGES(bInfo.Modules[i].Size));
}
#if defined(a86)
if (bInfo.RSDP)
{
debug("Reserving RSDT region %#lx-%#lx...", bInfo.RSDP,
(void *)((uintptr_t)bInfo.RSDP + sizeof(BootInfo::RSDPInfo)));
this->ReservePages(bInfo.RSDP, TO_PAGES(sizeof(BootInfo::RSDPInfo)));
ACPI::ACPI::ACPIHeader *ACPIPtr;
bool XSDT = false;
if (bInfo.RSDP->Revision >= 2 && bInfo.RSDP->XSDTAddress)
{
ACPIPtr = (ACPI::ACPI::ACPIHeader *)bInfo.RSDP->XSDTAddress;
XSDT = true;
}
else
ACPIPtr = (ACPI::ACPI::ACPIHeader *)(uintptr_t)bInfo.RSDP->RSDTAddress;
debug("Reserving RSDT...");
this->ReservePages((void *)bInfo.RSDP, TO_PAGES(sizeof(BootInfo::RSDPInfo)));
if (!Memory::Virtual().Check(ACPIPtr))
{
error("ACPI table is located in an unmapped region.");
return;
}
size_t TableSize = ((ACPIPtr->Length - sizeof(ACPI::ACPI::ACPIHeader)) /
(XSDT ? 8 : 4));
debug("Reserving %d ACPI tables...", TableSize);
for (size_t t = 0; t < TableSize; t++)
{
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wint-to-pointer-cast"
// TODO: Should I be concerned about unaligned memory access?
ACPI::ACPI::ACPIHeader *SDTHdr = nullptr;
if (XSDT)
SDTHdr =
(ACPI::ACPI::ACPIHeader *)(*(uint64_t *)((uint64_t)ACPIPtr +
sizeof(ACPI::ACPI::ACPIHeader) +
(t * 8)));
else
SDTHdr =
(ACPI::ACPI::ACPIHeader *)(*(uint32_t *)((uint64_t)ACPIPtr +
sizeof(ACPI::ACPI::ACPIHeader) +
(t * 4)));
#pragma GCC diagnostic pop
this->ReservePages(SDTHdr, TO_PAGES(SDTHdr->Length));
}
}
#elif defined(aa64)
#endif
}
}

View File

@ -0,0 +1,164 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
#include <debug.h>
namespace Memory
{
bool StackGuard::Expand(uintptr_t FaultAddress)
{
if (!this->UserMode)
assert(!"Kernel mode stack expansion not implemented");
if (FaultAddress < USER_STACK_END ||
FaultAddress > USER_STACK_BASE)
{
info("Fault address %#lx is not in range of stack %#lx - %#lx",
FaultAddress, USER_STACK_END, USER_STACK_BASE);
return false; /* It's not about the stack. */
}
uintptr_t roundFA = ROUND_DOWN(FaultAddress, PAGE_SIZE);
uintptr_t diff = (uintptr_t)this->StackBottom - roundFA;
size_t stackPages = TO_PAGES(diff);
stackPages = stackPages < 1 ? 1 : stackPages;
debug("roundFA: %#lx, sb: %#lx, diff: %#lx, stackPages: %d",
roundFA, this->StackBottom, diff, stackPages);
void *AllocatedStack = vma->RequestPages(stackPages);
debug("AllocatedStack: %#lx", AllocatedStack);
for (size_t i = 0; i < stackPages; i++)
{
void *vAddress = (void *)((uintptr_t)this->StackBottom - (i * PAGE_SIZE));
void *pAddress = (void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE));
vma->Map(vAddress, pAddress, PAGE_SIZE, PTFlag::RW | PTFlag::US);
AllocatedPages ap = {
.PhysicalAddress = pAddress,
.VirtualAddress = vAddress,
};
AllocatedPagesList.push_back(ap);
debug("Mapped %#lx to %#lx", pAddress, vAddress);
}
this->StackBottom = (void *)((uintptr_t)this->StackBottom - (stackPages * PAGE_SIZE));
this->Size += stackPages * PAGE_SIZE;
debug("Stack expanded to %#lx", this->StackBottom);
this->Expanded = true;
return true;
}
void StackGuard::Fork(StackGuard *Parent)
{
if (!this->UserMode)
assert(!"Kernel mode stack fork not implemented");
this->UserMode = Parent->UserMode;
this->StackBottom = Parent->StackBottom;
this->StackTop = Parent->StackTop;
this->StackPhysicalBottom = Parent->StackPhysicalBottom;
this->StackPhysicalTop = Parent->StackPhysicalTop;
this->Size = Parent->Size;
this->Expanded = Parent->Expanded;
std::list<AllocatedPages> ParentAllocatedPages = Parent->GetAllocatedPages();
foreach (auto Page in ParentAllocatedPages)
{
void *NewPhysical = vma->RequestPages(1);
debug("Forking address %#lx to %#lx", Page.PhysicalAddress, NewPhysical);
memcpy(NewPhysical, Page.PhysicalAddress, PAGE_SIZE);
vma->Remap(Page.VirtualAddress, NewPhysical, PTFlag::RW | PTFlag::US);
AllocatedPages ap = {
.PhysicalAddress = NewPhysical,
.VirtualAddress = Page.VirtualAddress,
};
AllocatedPagesList.push_back(ap);
debug("Mapped %#lx to %#lx", NewPhysical, Page.VirtualAddress);
}
}
StackGuard::StackGuard(bool User, VirtualMemoryArea *_vma)
{
this->UserMode = User;
this->vma = _vma;
if (this->UserMode)
{
void *AllocatedStack = vma->RequestPages(TO_PAGES(USER_STACK_SIZE));
this->StackBottom = (void *)USER_STACK_BASE;
this->StackTop = (void *)(USER_STACK_BASE + USER_STACK_SIZE);
this->StackPhysicalBottom = AllocatedStack;
this->StackPhysicalTop = (void *)((uintptr_t)AllocatedStack + USER_STACK_SIZE);
this->Size = USER_STACK_SIZE;
debug("AllocatedStack: %#lx", AllocatedStack);
for (size_t i = 0; i < TO_PAGES(USER_STACK_SIZE); i++)
{
void *vAddress = (void *)(USER_STACK_BASE + (i * PAGE_SIZE));
void *pAddress = (void *)((uintptr_t)AllocatedStack + (i * PAGE_SIZE));
vma->Map(vAddress, pAddress, PAGE_SIZE, PTFlag::RW | PTFlag::US);
AllocatedPages ap = {
.PhysicalAddress = pAddress,
.VirtualAddress = vAddress,
};
AllocatedPagesList.push_back(ap);
debug("Mapped %#lx to %#lx", pAddress, vAddress);
}
}
else
{
Memory::KernelStackManager::StackAllocation sa = StackManager.DetailedAllocate(LARGE_STACK_SIZE);
this->StackBottom = sa.VirtualAddress;
this->StackTop = (void *)((uintptr_t)this->StackBottom + LARGE_STACK_SIZE);
this->StackPhysicalBottom = sa.PhysicalAddress;
this->StackPhysicalTop = (void *)((uintptr_t)this->StackPhysicalBottom + LARGE_STACK_SIZE);
this->Size = LARGE_STACK_SIZE;
debug("StackBottom: %#lx", this->StackBottom);
for (size_t i = 0; i < TO_PAGES(LARGE_STACK_SIZE); i++)
{
AllocatedPages pa = {
.PhysicalAddress = (void *)((uintptr_t)this->StackPhysicalBottom + (i * PAGE_SIZE)),
.VirtualAddress = (void *)((uintptr_t)this->StackBottom + (i * PAGE_SIZE)),
};
AllocatedPagesList.push_back(pa);
}
}
debug("Allocated stack at %#lx", this->StackBottom);
debug("Stack Range: %#lx - %#lx", this->StackBottom, this->StackTop);
}
StackGuard::~StackGuard()
{
if (!this->UserMode)
{
for (auto Page : this->AllocatedPagesList)
StackManager.Free(Page.VirtualAddress);
}
/* VMA will free the stack */
}
}

142
Kernel/core/memory/va.cpp Normal file
View File

@ -0,0 +1,142 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory/table.hpp>
#include <memory/va.hpp>
#include <cpu.hpp>
#include <debug.h>
#include <bitset>
#include "../../kernel.h"
namespace Memory
{
VirtualAllocation::AllocatedPages VirtualAllocation::RequestPages(size_t Count)
{
func("%lld", Count);
void *pAddress = KernelAllocator.RequestPages(Count);
memset(pAddress, 0, FROM_PAGES(Count));
Virtual vmm(this->Table);
SmartLock(MgrLock);
forItr(itr, AllocatedPagesList)
{
if (likely(itr->Free == false))
continue;
if (itr->PageCount == Count)
{
itr->Free = false;
vmm.Map(itr->VirtualAddress, pAddress, FROM_PAGES(Count), RW | KRsv | G);
return *itr;
}
if (itr->PageCount > Count)
{
/* Split the block */
void *vAddress = itr->VirtualAddress;
void *pAddress = itr->PhysicalAddress;
size_t PageCount = itr->PageCount;
AllocatedPagesList.erase(itr);
AllocatedPagesList.push_back({(void *)((uintptr_t)pAddress + FROM_PAGES(Count)),
(void *)((uintptr_t)vAddress + FROM_PAGES(Count)),
PageCount - Count, true});
AllocatedPagesList.push_back({pAddress, vAddress, Count, false});
vmm.Map(vAddress, pAddress, FROM_PAGES(Count), RW | KRsv | G);
debug("Split region %#lx-%#lx", vAddress, (uintptr_t)vAddress + FROM_PAGES(Count));
debug("Free region %#lx-%#lx", (uintptr_t)vAddress + FROM_PAGES(Count), (uintptr_t)vAddress + FROM_PAGES(PageCount - Count));
return AllocatedPagesList.back();
}
}
/* Allocate new region */
void *vAddress = CurrentBase;
vmm.Map(vAddress, pAddress, FROM_PAGES(Count), RW | KRsv | G);
AllocatedPagesList.push_back({pAddress, vAddress, Count, false});
debug("New region %#lx-%#lx", vAddress, (uintptr_t)vAddress + FROM_PAGES(Count));
CurrentBase = (void *)((uintptr_t)CurrentBase + FROM_PAGES(Count));
assert(USER_ALLOC_END > (uintptr_t)CurrentBase);
return AllocatedPagesList.back();
}
void VirtualAllocation::FreePages(void *Address, size_t Count)
{
func("%#lx, %lld", Address, Count);
SmartLock(MgrLock);
foreach (auto &apl in AllocatedPagesList)
{
if (apl.VirtualAddress != Address)
continue;
if (apl.PageCount != Count)
{
error("Page count mismatch! (Allocated: %lld, Requested: %lld)",
apl.PageCount, Count);
return;
}
Virtual vmm(this->Table);
for (size_t i = 0; i < Count; i++)
{
void *AddressToUnmap = (void *)((uintptr_t)Address + FROM_PAGES(i));
vmm.Unmap(AddressToUnmap);
}
KernelAllocator.FreePages(Address, Count);
apl.Free = true;
debug("Freed region %#lx-%#lx", Address, (uintptr_t)Address + FROM_PAGES(Count));
return;
}
}
void VirtualAllocation::MapTo(AllocatedPages ap, PageTable *TargetTable)
{
func("%#lx, %#lx", ap.VirtualAddress, TargetTable);
Virtual vmm(TargetTable);
vmm.Map(ap.VirtualAddress, ap.PhysicalAddress, FROM_PAGES(ap.PageCount), RW | KRsv | G);
}
VirtualAllocation::VirtualAllocation(void *Base)
: BaseAddress(Base), CurrentBase(Base),
Table((PageTable *)CPU::PageTable())
{
func("%#lx", Base);
}
VirtualAllocation::~VirtualAllocation()
{
/* No need to remap pages, the page table will be destroyed */
Virtual vmm(this->Table);
foreach (auto ap in AllocatedPagesList)
{
KernelAllocator.FreePages(ap.PhysicalAddress, ap.PageCount);
for (size_t i = 0; i < ap.PageCount; i++)
{
void *AddressToUnmap = (void *)((uintptr_t)ap.VirtualAddress + FROM_PAGES(i));
vmm.Unmap(AddressToUnmap);
}
}
}
}

494
Kernel/core/memory/vma.cpp Normal file
View File

@ -0,0 +1,494 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory/vma.hpp>
#include <memory/table.hpp>
#include <cpu.hpp>
#include <debug.h>
#include <bitset>
#include "../../kernel.h"
namespace Memory
{
uint64_t VirtualMemoryArea::GetAllocatedMemorySize()
{
SmartLock(MgrLock);
uint64_t Size = 0;
foreach (auto ap in AllocatedPagesList)
Size += ap.PageCount;
return FROM_PAGES(Size);
}
void *VirtualMemoryArea::RequestPages(size_t Count, bool User, bool Protect)
{
func("%lld, %s, %s", Count,
User ? "true" : "false",
Protect ? "true" : "false");
void *Address = KernelAllocator.RequestPages(Count);
memset(Address, 0, Count * PAGE_SIZE);
int Flags = PTFlag::RW;
if (User)
Flags |= PTFlag::US;
if (Protect)
Flags |= PTFlag::KRsv;
Virtual vmm(this->Table);
SmartLock(MgrLock);
vmm.Map(Address, Address, FROM_PAGES(Count), Flags);
AllocatedPagesList.push_back({Address, Count, Protect});
debug("%#lx +{%#lx, %lld}", this, Address, Count);
return Address;
}
void VirtualMemoryArea::FreePages(void *Address, size_t Count)
{
func("%#lx, %lld", Address, Count);
SmartLock(MgrLock);
forItr(itr, AllocatedPagesList)
{
if (itr->Address != Address)
continue;
if (itr->Protected)
{
error("Address %#lx is protected", Address);
return;
}
/** TODO: Advanced checks. Allow if the page count is less than the requested one.
* This will allow the user to free only a part of the allocated pages.
*
* But this will be in a separate function because we need to specify if we
* want to free from the start or from the end and return the new address.
*/
if (itr->PageCount != Count)
{
error("Page count mismatch! (Allocated: %lld, Requested: %lld)",
itr->PageCount, Count);
return;
}
Virtual vmm(this->Table);
for (size_t i = 0; i < Count; i++)
{
void *AddressToMap = (void *)((uintptr_t)Address + (i * PAGE_SIZE));
vmm.Remap(AddressToMap, AddressToMap, PTFlag::RW);
}
KernelAllocator.FreePages(Address, Count);
AllocatedPagesList.erase(itr);
debug("%#lx -{%#lx, %lld}", this, Address, Count);
return;
}
}
void VirtualMemoryArea::DetachAddress(void *Address)
{
func("%#lx", Address);
SmartLock(MgrLock);
forItr(itr, AllocatedPagesList)
{
if (itr->Address == Address)
{
if (itr->Protected)
{
error("Address %#lx is protected", Address);
return;
}
AllocatedPagesList.erase(itr);
return;
}
}
}
void *VirtualMemoryArea::CreateCoWRegion(void *Address,
size_t Length,
bool Read, bool Write, bool Exec,
bool Fixed, bool Shared)
{
func("%#lx, %lld, %s, %s, %s, %s, %s", Address, Length,
Read ? "true" : "false",
Write ? "true" : "false",
Exec ? "true" : "false",
Fixed ? "true" : "false",
Shared ? "true" : "false");
Virtual vmm(this->Table);
// FIXME
// for (uintptr_t j = uintptr_t(Address);
// j < uintptr_t(Address) + Length;
// j += PAGE_SIZE)
// {
// if (vmm.Check((void *)j, G))
// {
// if (Fixed)
// return (void *)-EINVAL;
// Address = (void *)(j + PAGE_SIZE);
// }
// }
bool AnyAddress = Address == nullptr;
debug("AnyAddress: %s", AnyAddress ? "true" : "false");
if (AnyAddress)
{
Address = this->RequestPages(TO_PAGES(Length), true);
debug("Allocated %#lx-%#lx for pt %#lx",
Address, (uintptr_t)Address + Length, this->Table);
return Address;
}
SmartLock(MgrLock);
if (vmm.Check(Address, PTFlag::KRsv))
{
error("Cannot create CoW region at %#lx", Address);
return (void *)-EPERM;
}
debug("unmapping %#lx-%#lx", Address, (uintptr_t)Address + Length);
vmm.Unmap(Address, Length);
debug("mapping cow at %#lx-%#lx", Address, (uintptr_t)Address + Length);
vmm.Map(Address, nullptr, Length, PTFlag::CoW);
debug("CoW region created at range %#lx-%#lx for pt %#lx",
Address, (uintptr_t)Address + Length, this->Table);
SharedRegion sr{
.Address = Address,
.Read = Read,
.Write = Write,
.Exec = Exec,
.Fixed = Fixed,
.Shared = Shared,
.Length = Length,
.ReferenceCount = 0,
};
SharedRegions.push_back(sr);
debug("CoW region created at %#lx for pt %#lx",
Address, this->Table);
return Address;
}
bool VirtualMemoryArea::HandleCoW(uintptr_t PFA)
{
func("%#lx", PFA);
Virtual vmm(this->Table);
PageTableEntry *pte = vmm.GetPTE((void *)PFA);
debug("ctx: %#lx", this);
if (!pte)
{
/* Unmapped page */
debug("PTE is null!");
return false;
}
if (!pte->CopyOnWrite)
{
debug("PFA %#lx is not CoW (pt %#lx) (flags %#lx)",
PFA, this->Table, pte->raw);
return false;
}
foreach (auto sr in SharedRegions)
{
uintptr_t Start = (uintptr_t)sr.Address;
uintptr_t End = (uintptr_t)sr.Address + sr.Length;
debug("Start: %#lx, End: %#lx (PFA: %#lx)",
Start, End, PFA);
if (PFA >= Start && PFA < End)
{
if (sr.Shared)
{
fixme("Shared CoW");
return false;
}
void *pAddr = this->RequestPages(1);
if (pAddr == nullptr)
return false;
memset(pAddr, 0, PAGE_SIZE);
assert(pte->Present == true);
pte->ReadWrite = sr.Write;
pte->UserSupervisor = sr.Read;
pte->ExecuteDisable = sr.Exec;
pte->CopyOnWrite = false;
debug("PFA %#lx is CoW (pt %#lx, flags %#lx)",
PFA, this->Table, pte->raw);
#if defined(a64)
CPU::x64::invlpg((void *)PFA);
#elif defined(a32)
CPU::x32::invlpg((void *)PFA);
#endif
return true;
}
}
debug("%#lx not found in CoW regions", PFA);
return false;
}
void VirtualMemoryArea::FreeAllPages()
{
SmartLock(MgrLock);
foreach (auto ap in AllocatedPagesList)
{
KernelAllocator.FreePages(ap.Address, ap.PageCount);
Virtual vmm(this->Table);
for (size_t i = 0; i < ap.PageCount; i++)
vmm.Remap((void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)),
(void *)((uintptr_t)ap.Address + (i * PAGE_SIZE)),
PTFlag::RW);
}
AllocatedPagesList.clear();
}
void VirtualMemoryArea::Fork(VirtualMemoryArea *Parent)
{
func("%#lx", Parent);
assert(Parent);
debug("parent apl:%d sr:%d [P:%#lx C:%#lx]",
Parent->AllocatedPagesList.size(), Parent->SharedRegions.size(),
Parent->Table, this->Table);
debug("ctx: this: %#lx parent: %#lx", this, Parent);
Virtual vmm(this->Table);
SmartLock(MgrLock);
foreach (auto &ap in Parent->AllocatedPagesList)
{
if (ap.Protected)
{
debug("Protected %#lx-%#lx", ap.Address,
(uintptr_t)ap.Address + (ap.PageCount * PAGE_SIZE));
continue; /* We don't want to modify these pages. */
}
MgrLock.Unlock();
void *Address = this->RequestPages(ap.PageCount);
MgrLock.Lock(__FUNCTION__);
if (Address == nullptr)
return;
memcpy(Address, ap.Address, FROM_PAGES(ap.PageCount));
for (size_t i = 0; i < ap.PageCount; i++)
{
void *AddressToMap = (void *)((uintptr_t)ap.Address + (i * PAGE_SIZE));
void *RealAddress = (void *)((uintptr_t)Address + (i * PAGE_SIZE));
#if defined(a86)
PageTableEntry *pte = vmm.GetPTE(AddressToMap);
uintptr_t Flags = 0;
Flags |= pte->Present ? (uintptr_t)PTFlag::P : 0;
Flags |= pte->ReadWrite ? (uintptr_t)PTFlag::RW : 0;
Flags |= pte->UserSupervisor ? (uintptr_t)PTFlag::US : 0;
Flags |= pte->CopyOnWrite ? (uintptr_t)PTFlag::CoW : 0;
Flags |= pte->KernelReserve ? (uintptr_t)PTFlag::KRsv : 0;
debug("Mapping %#lx to %#lx (flags %s/%s/%s/%s/%s)",
RealAddress, AddressToMap,
Flags & PTFlag::P ? "P" : "-",
Flags & PTFlag::RW ? "RW" : "-",
Flags & PTFlag::US ? "US" : "-",
Flags & PTFlag::CoW ? "CoW" : "-",
Flags & PTFlag::KRsv ? "KRsv" : "-");
MgrLock.Unlock();
this->Map(AddressToMap, RealAddress, PAGE_SIZE, Flags);
MgrLock.Lock(__FUNCTION__);
#else
#error "Not implemented"
#endif
}
debug("Forked %#lx-%#lx", ap.Address,
(uintptr_t)ap.Address + (ap.PageCount * PAGE_SIZE));
}
foreach (auto &sr in Parent->SharedRegions)
{
MgrLock.Unlock();
void *Address = this->CreateCoWRegion(sr.Address, sr.Length,
sr.Read, sr.Write, sr.Exec,
sr.Fixed, sr.Shared);
MgrLock.Lock(__FUNCTION__);
if (Address == nullptr)
return;
memcpy(Address, sr.Address, sr.Length);
debug("Forked CoW region %#lx-%#lx", sr.Address,
(uintptr_t)sr.Address + sr.Length);
}
}
int VirtualMemoryArea::Map(void *VirtualAddress, void *PhysicalAddress,
size_t Length, uint64_t Flags)
{
Virtual vmm(this->Table);
SmartLock(MgrLock);
uintptr_t intVirtualAddress = (uintptr_t)VirtualAddress;
uintptr_t intPhysicalAddress = (uintptr_t)PhysicalAddress;
for (uintptr_t va = intVirtualAddress;
va < intVirtualAddress + Length; va += PAGE_SIZE)
{
if (vmm.Check(VirtualAddress, PTFlag::KRsv))
{
error("Virtual address %#lx is reserved", VirtualAddress);
return -EPERM;
}
}
for (uintptr_t va = intPhysicalAddress;
va < intPhysicalAddress + Length; va += PAGE_SIZE)
{
if (vmm.Check(PhysicalAddress, PTFlag::KRsv))
{
error("Physical address %#lx is reserved", PhysicalAddress);
return -EPERM;
}
}
vmm.Map(VirtualAddress, PhysicalAddress, Length, Flags);
debug("Mapped %#lx-%#lx to %#lx-%#lx (flags %#lx)",
VirtualAddress, intVirtualAddress + Length,
PhysicalAddress, intPhysicalAddress + Length,
Flags);
return 0;
}
int VirtualMemoryArea::Remap(void *VirtualAddress, void *PhysicalAddress, uint64_t Flags)
{
Virtual vmm(this->Table);
SmartLock(MgrLock);
if (vmm.Check(VirtualAddress, PTFlag::KRsv))
{
error("Virtual address %#lx is reserved", VirtualAddress);
return -EPERM;
}
if (vmm.Check(PhysicalAddress, PTFlag::KRsv))
{
error("Physical address %#lx is reserved", PhysicalAddress);
return -EPERM;
}
vmm.Remap(VirtualAddress, PhysicalAddress, Flags);
debug("Remapped %#lx to %#lx (flags %#lx)",
VirtualAddress, PhysicalAddress, Flags);
return 0;
}
int VirtualMemoryArea::Unmap(void *VirtualAddress, size_t Length)
{
Virtual vmm(this->Table);
SmartLock(MgrLock);
uintptr_t intVirtualAddress = (uintptr_t)VirtualAddress;
for (uintptr_t va = intVirtualAddress;
va < intVirtualAddress + Length; va += PAGE_SIZE)
{
if (vmm.Check(VirtualAddress, PTFlag::KRsv))
{
error("Virtual address %#lx is reserved", VirtualAddress);
return -EPERM;
}
}
vmm.Unmap(VirtualAddress, Length);
debug("Unmapped %#lx-%#lx", VirtualAddress, intVirtualAddress + Length);
return 0;
}
void *VirtualMemoryArea::__UserCheckAndGetAddress(void *Address, size_t Length)
{
Virtual vmm(this->Table);
SmartLock(MgrLock);
void *pAddress = this->Table->Get(Address);
if (pAddress == nullptr)
{
debug("Virtual address %#lx returns nullptr", Address);
return nullptr;
}
uintptr_t intAddress = (uintptr_t)Address;
intAddress = ALIGN_DOWN(intAddress, PAGE_SIZE);
for (uintptr_t va = intAddress; va < intAddress + Length; va += PAGE_SIZE)
{
if (vmm.Check((void *)va, PTFlag::US))
continue;
debug("Unable to get address %#lx, page is not user accessible", va);
return nullptr;
}
return pAddress;
}
int VirtualMemoryArea::__UserCheck(void *Address, size_t Length)
{
Virtual vmm(this->Table);
SmartLock(MgrLock);
if (vmm.Check(Address, PTFlag::US))
return 0;
debug("Address %#lx is not user accessible", Address);
return -EFAULT;
}
VirtualMemoryArea::VirtualMemoryArea(PageTable *_Table)
: Table(_Table)
{
SmartLock(MgrLock);
if (_Table == nullptr)
{
if (TaskManager)
{
Tasking::PCB *pcb = thisProcess;
assert(pcb);
this->Table = thisProcess->PageTable;
}
else
this->Table = (PageTable *)CPU::PageTable();
}
}
VirtualMemoryArea::~VirtualMemoryArea()
{
/* No need to remap pages, the page table will be destroyed */
SmartLock(MgrLock);
foreach (auto ap in AllocatedPagesList)
KernelAllocator.FreePages(ap.Address, ap.PageCount);
}
}

View File

@ -0,0 +1,44 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#include <memory.hpp>
#include <convert.h>
#include <debug.h>
#include "../../kernel.h"
namespace Memory
{
Virtual::Virtual(PageTable *Table)
{
if (Table)
this->pTable = Table;
else
this->pTable = thisPageTable;
// debug("+ %#lx (PT: %#lx) %s", this, this->pTable,
// KernelSymbolTable
// ? KernelSymbolTable->GetSymbol((uintptr_t)__builtin_return_address(0))
// : "Unknown");
}
Virtual::~Virtual()
{
// debug("- %#lx", this);
}
}