Implement std::atomic and remove the old implementation Atomic and rename vector.hpp to vector

This commit is contained in:
Alex 2023-04-10 05:29:41 +03:00
parent b4dbf2c281
commit 70e11f17e7
Signed by untrusted user who does not match committer: enderice2
GPG Key ID: EACC3AD603BAB4DD
31 changed files with 718 additions and 352 deletions

View File

@ -22,8 +22,8 @@
#include <boot/binfo.h>
#include <ints.hpp>
#include <vector.hpp>
#include <cpu.hpp>
#include <vector>
namespace ACPI
{

View File

@ -18,10 +18,10 @@
#include <smp.hpp>
#include <memory.hpp>
#include <atomic.hpp>
#include <ints.hpp>
#include <assert.h>
#include <cpu.hpp>
#include <atomic>
#include "../../../kernel.h"
#include "../acpi.hpp"
@ -41,7 +41,7 @@ enum SMPTrampolineAddress
TRAMPOLINE_START = 0x2000
};
Atomic<bool> CPUEnabled = false;
std::atomic_bool CPUEnabled = false;
#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
static __aligned(PAGE_SIZE) CPUData CPUs[MAX_CPU] = {0};
@ -79,7 +79,7 @@ extern "C" void StartCPU()
CPU::Interrupts(CPU::Enable);
KPrint("\e058C19CPU \e8888FF%d \e058C19is online", CoreID);
CPUEnabled.Store(true, MemoryOrder::Release);
CPUEnabled.store(true, std::memory_order_release);
CPU::Halt(true);
}
@ -124,9 +124,9 @@ namespace SMP
((APIC::APIC *)Interrupts::apic[0])->SendInitIPI(((ACPI::MADT *)madt)->lapic[i]->APICId);
((APIC::APIC *)Interrupts::apic[0])->SendStartupIPI(((ACPI::MADT *)madt)->lapic[i]->APICId, TRAMPOLINE_START);
while (!CPUEnabled.Load(MemoryOrder::Acquire))
while (!CPUEnabled.load(std::memory_order_acquire))
CPU::Pause();
CPUEnabled.Store(false, MemoryOrder::Release);
CPUEnabled.store(false, std::memory_order_release);
trace("CPU %d loaded.", ((ACPI::MADT *)madt)->lapic[i]->APICId);
}
else

View File

@ -999,8 +999,8 @@ namespace CrashHandler
if (TaskManager && cpudata != nullptr)
{
crashdata.Process = cpudata->CurrentProcess.Load();
crashdata.Thread = cpudata->CurrentThread.Load();
crashdata.Process = cpudata->CurrentProcess.load();
crashdata.Thread = cpudata->CurrentThread.load();
error("Current Process: %s(%ld)",
cpudata->CurrentProcess->Name,

View File

@ -18,8 +18,8 @@
#include <ints.hpp>
#include <syscalls.hpp>
#include <vector.hpp>
#include <smp.hpp>
#include <vector>
#include <io.h>
#if defined(a64)

View File

@ -29,7 +29,7 @@
#endif
bool ForceUnlock = false;
Atomic<size_t> LocksCount = 0;
std::atomic_size_t LocksCount = 0;
size_t GetLocksCount() { return LocksCount; }
@ -75,7 +75,7 @@ void LockClass::DeadLock(SpinLockData Lock)
this->DeadLocks++;
if (Config.UnlockDeadLock && this->DeadLocks.Load() > 10)
if (Config.UnlockDeadLock && this->DeadLocks.load() > 10)
{
warn("Unlocking lock '%s' to prevent deadlock. (this is enabled in the kernel config)", Lock.AttemptingToGet);
this->DeadLocks = 0;
@ -93,7 +93,7 @@ int LockClass::Lock(const char *FunctionName)
Retry:
int i = 0;
while (IsLocked.Exchange(true, MemoryOrder::Acquire) && ++i < (DebuggerIsAttached ? 0x100000 : 0x10000000))
while (IsLocked.exchange(true, std::memory_order_acquire) && ++i < (DebuggerIsAttached ? 0x100000 : 0x10000000))
CPU::Pause();
if (i >= (DebuggerIsAttached ? 0x100000 : 0x10000000))
@ -120,7 +120,7 @@ int LockClass::Unlock()
{
__sync;
IsLocked.Store(false, MemoryOrder::Release);
IsLocked.store(false, std::memory_order_release);
LockData.Count--;
LocksCount--;
@ -178,17 +178,17 @@ int LockClass::TimeoutLock(const char *FunctionName, uint64_t Timeout)
LockData.AttemptingToGet = FunctionName;
LockData.StackPointerAttempt = (uintptr_t)__builtin_frame_address(0);
Atomic<uint64_t> Target = 0;
std::atomic_uint64_t Target = 0;
Retry:
int i = 0;
while (IsLocked.Exchange(true, MemoryOrder::Acquire) && ++i < (DebuggerIsAttached ? 0x100000 : 0x10000000))
while (IsLocked.exchange(true, std::memory_order_acquire) && ++i < (DebuggerIsAttached ? 0x100000 : 0x10000000))
CPU::Pause();
if (i >= (DebuggerIsAttached ? 0x100000 : 0x10000000))
{
if (Target.Load() == 0)
Target.Store(TimeManager->CalculateTarget(Timeout));
TimeoutDeadLock(LockData, Target.Load());
if (Target.load() == 0)
Target.store(TimeManager->CalculateTarget(Timeout));
TimeoutDeadLock(LockData, Target.load());
goto Retry;
}

View File

@ -17,8 +17,8 @@
#include <uart.hpp>
#include <vector.hpp>
#include <debug.h>
#include <vector>
volatile bool serialports[8] = {false, false, false, false, false, false, false, false};
std::vector<UniversalAsynchronousReceiverTransmitter::Events *> RegisteredEvents;

View File

@ -18,12 +18,12 @@
#include "kernel.h"
#include <filesystem/ustar.hpp>
#include <vector.hpp>
#include <power.hpp>
#include <lock.hpp>
#include <printf.h>
#include <exec.hpp>
#include <cwalk.h>
#include <vector>
#define STB_IMAGE_IMPLEMENTATION
#define STBI_NO_STDIO

View File

@ -155,6 +155,10 @@ LockClass mExtTrkLock;
* https://en.wikipedia.org/wiki/Control_register
* https://web.archive.org/web/20160312223150/http://ncsi.com/nsatc11/presentations/wednesday/emerging_technologies/fischer.pdf
* https://en.wikipedia.org/wiki/Supervisor_Mode_Access_Prevention
*
* - Atomic operations:
* https://en.cppreference.com/w/cpp/atomic/atomic
*
*/
#ifdef a64

View File

@ -124,12 +124,12 @@ namespace Tasking
SafeFunction NIF bool Task::FindNewProcess(void *CPUDataPointer)
{
CPUData *CurrentCPU = (CPUData *)CPUDataPointer;
fnp_schedbg("%d processes", ListProcess.size());
fnp_schedbg("%d processes", ProcessList.size());
#ifdef DEBUG_FIND_NEW_PROCESS
foreach (auto process in ListProcess)
foreach (auto process in ProcessList)
fnp_schedbg("Process %d %s", process->ID, process->Name);
#endif
foreach (auto process in ListProcess)
foreach (auto process in ProcessList)
{
if (InvalidPCB(process))
continue;
@ -179,7 +179,7 @@ namespace Tasking
for (size_t i = 0; i < CurrentCPU->CurrentProcess->Threads.size(); i++)
{
if (CurrentCPU->CurrentProcess->Threads[i] == CurrentCPU->CurrentThread.Load())
if (CurrentCPU->CurrentProcess->Threads[i] == CurrentCPU->CurrentThread.load())
{
size_t TempIndex = i;
RetryAnotherThread:
@ -213,7 +213,7 @@ namespace Tasking
CurrentCPU->CurrentThread = nextThread;
gnat_schedbg("[thd 0 -> end] Scheduling thread %d parent of %s->%d Procs %d",
thread->ID, thread->Parent->Name,
CurrentCPU->CurrentProcess->Threads.size(), ListProcess.size());
CurrentCPU->CurrentProcess->Threads.size(), ProcessList.size());
return true;
}
#ifdef DEBUG
@ -232,9 +232,9 @@ namespace Tasking
CPUData *CurrentCPU = (CPUData *)CPUDataPointer;
bool Skip = true;
foreach (auto process in ListProcess)
foreach (auto process in ProcessList)
{
if (process == CurrentCPU->CurrentProcess.Load())
if (process == CurrentCPU->CurrentProcess.load())
{
Skip = false;
gnap_schedbg("Found current process %#lx", process);
@ -279,7 +279,7 @@ namespace Tasking
CurrentCPU->CurrentProcess = process;
CurrentCPU->CurrentThread = thread;
gnap_schedbg("[cur proc+1 -> first thd] Scheduling thread %d %s->%d (Total Procs %d)",
thread->ID, thread->Name, process->Threads.size(), ListProcess.size());
thread->ID, thread->Name, process->Threads.size(), ProcessList.size());
return true;
}
}
@ -291,7 +291,7 @@ namespace Tasking
{
CPUData *CurrentCPU = (CPUData *)CPUDataPointer;
foreach (auto process in ListProcess)
foreach (auto process in ProcessList)
{
if (InvalidPCB(process))
{
@ -325,7 +325,7 @@ namespace Tasking
CurrentCPU->CurrentProcess = process;
CurrentCPU->CurrentThread = thread;
sspt_schedbg("[proc 0 -> end -> first thd] Scheduling thread %d parent of %s->%d (Procs %d)",
thread->ID, thread->Parent->Name, process->Threads.size(), ListProcess.size());
thread->ID, thread->Parent->Name, process->Threads.size(), ProcessList.size());
return true;
}
}
@ -334,7 +334,7 @@ namespace Tasking
SafeFunction NIF void Task::UpdateProcessStatus()
{
foreach (auto process in ListProcess)
foreach (auto process in ProcessList)
{
if (InvalidPCB(process))
continue;
@ -363,7 +363,7 @@ namespace Tasking
SafeFunction NIF void Task::WakeUpThreads(void *CPUDataPointer)
{
CPUData *CurrentCPU = (CPUData *)CPUDataPointer;
foreach (auto process in ListProcess)
foreach (auto process in ProcessList)
{
if (InvalidPCB(process))
continue;
@ -479,7 +479,7 @@ namespace Tasking
}
CPU::x64::writecr3({.raw = (uint64_t)KernelPageTable}); /* Restore kernel page table for safety reasons. */
uint64_t SchedTmpTicks = CPU::Counter();
this->LastTaskTicks.Store(SchedTmpTicks - this->SchedulerTicks.Load());
this->LastTaskTicks.store(SchedTmpTicks - this->SchedulerTicks.load());
CPUData *CurrentCPU = GetCurrentCPU();
schedbg("Scheduler called on CPU %d.", CurrentCPU->ID);
schedbg("%d: %ld%%", CurrentCPU->ID, GetUsage(CurrentCPU->ID));
@ -509,7 +509,7 @@ namespace Tasking
}
#endif
if (unlikely(InvalidPCB(CurrentCPU->CurrentProcess.Load()) || InvalidTCB(CurrentCPU->CurrentThread.Load())))
if (unlikely(InvalidPCB(CurrentCPU->CurrentProcess.load()) || InvalidTCB(CurrentCPU->CurrentThread.load())))
{
schedbg("Invalid process or thread. Finding a new one.");
if (this->FindNewProcess(CurrentCPU))
@ -669,7 +669,7 @@ namespace Tasking
/* RealEnd->[Function Exit] */
RealEnd:
this->SchedulerTicks.Store(CPU::Counter() - SchedTmpTicks);
this->SchedulerTicks.store(CPU::Counter() - SchedTmpTicks);
__sync; /* TODO: Is this really needed? */
}

View File

@ -17,9 +17,9 @@
#include <task.hpp>
#include <vector.hpp>
#include <rand.hpp>
#include <debug.h>
#include <vector>
namespace Tasking
{

View File

@ -130,36 +130,36 @@ namespace Tasking
foreach (PCB *process in Process->Children)
RemoveProcess(process);
for (size_t i = 0; i < ListProcess.size(); i++)
for (size_t i = 0; i < ProcessList.size(); i++)
{
if (ListProcess[i] == Process)
if (ProcessList[i] == Process)
{
trace("Process \"%s\"(%d) removed from the list", Process->Name, Process->ID);
// Free memory
delete ListProcess[i]->IPC, ListProcess[i]->IPC = nullptr;
delete ListProcess[i]->ELFSymbolTable, ListProcess[i]->ELFSymbolTable = nullptr;
SecurityManager.DestroyToken(ListProcess[i]->Security.UniqueToken);
if (ListProcess[i]->Security.TrustLevel == TaskTrustLevel::User)
KernelAllocator.FreePages((void *)ListProcess[i]->PageTable, TO_PAGES(sizeof(Memory::PageTable4) + 1));
delete ProcessList[i]->IPC, ProcessList[i]->IPC = nullptr;
delete ProcessList[i]->ELFSymbolTable, ProcessList[i]->ELFSymbolTable = nullptr;
SecurityManager.DestroyToken(ProcessList[i]->Security.UniqueToken);
if (ProcessList[i]->Security.TrustLevel == TaskTrustLevel::User)
KernelAllocator.FreePages((void *)ProcessList[i]->PageTable, TO_PAGES(sizeof(Memory::PageTable4) + 1));
// Remove the process from parent's children list
if (ListProcess[i]->Parent)
for (size_t j = 0; j < ListProcess[i]->Parent->Children.size(); j++)
if (ProcessList[i]->Parent)
for (size_t j = 0; j < ProcessList[i]->Parent->Children.size(); j++)
{
if (ListProcess[i]->Parent->Children[j] == ListProcess[i])
if (ProcessList[i]->Parent->Children[j] == ProcessList[i])
{
ListProcess[i]->Parent->Children.remove(j);
ProcessList[i]->Parent->Children.remove(j);
break;
}
}
// Delete process directory
vfs->Delete(ListProcess[i]->ProcessDirectory, true);
vfs->Delete(ProcessList[i]->ProcessDirectory, true);
// Free memory
delete ListProcess[i], ListProcess[i] = nullptr;
delete ProcessList[i], ProcessList[i] = nullptr;
// Remove from the list
ListProcess.remove(i);
ProcessList.remove(i);
break;
}
}
@ -217,23 +217,23 @@ namespace Tasking
CPU::Halt(true);
}
PCB *Task::GetCurrentProcess() { return GetCurrentCPU()->CurrentProcess.Load(); }
TCB *Task::GetCurrentThread() { return GetCurrentCPU()->CurrentThread.Load(); }
PCB *Task::GetCurrentProcess() { return GetCurrentCPU()->CurrentProcess.load(); }
TCB *Task::GetCurrentThread() { return GetCurrentCPU()->CurrentThread.load(); }
PCB *Task::GetProcessByID(UPID ID)
{
for (size_t i = 0; i < ListProcess.size(); i++)
if (ListProcess[i]->ID == ID)
return ListProcess[i];
for (size_t i = 0; i < ProcessList.size(); i++)
if (ProcessList[i]->ID == ID)
return ProcessList[i];
return nullptr;
}
TCB *Task::GetThreadByID(UTID ID)
{
for (size_t i = 0; i < ListProcess.size(); i++)
for (size_t j = 0; j < ListProcess[i]->Threads.size(); j++)
if (ListProcess[i]->Threads[j]->ID == ID)
return ListProcess[i]->Threads[j];
for (size_t i = 0; i < ProcessList.size(); i++)
for (size_t j = 0; j < ProcessList[i]->Threads.size(); j++)
if (ProcessList[i]->Threads[j]->ID == ID)
return ProcessList[i]->Threads[j];
return nullptr;
}
@ -312,7 +312,7 @@ namespace Tasking
while (true)
{
this->Sleep(1000);
foreach (auto process in ListProcess)
foreach (auto process in ProcessList)
{
if (InvalidPCB(process))
continue;
@ -324,9 +324,9 @@ namespace Tasking
void Task::RevertProcessCreation(PCB *Process)
{
for (size_t i = 0; i < ListProcess.size(); i++)
for (size_t i = 0; i < ProcessList.size(); i++)
{
if (ListProcess[i] == Process)
if (ProcessList[i] == Process)
{
SecurityManager.DestroyToken(Process->Security.UniqueToken);
if (Process->Security.TrustLevel == TaskTrustLevel::User)
@ -345,7 +345,7 @@ namespace Tasking
delete Process->IPC, Process->IPC = nullptr;
delete Process->ELFSymbolTable, Process->ELFSymbolTable = nullptr;
delete Process, Process = nullptr;
ListProcess.remove(i);
ProcessList.remove(i);
NextPID--;
break;
}
@ -769,7 +769,7 @@ namespace Tasking
if (Parent)
Parent->Children.push_back(Process);
ListProcess.push_back(Process);
ProcessList.push_back(Process);
return Process;
}
@ -859,27 +859,27 @@ namespace Tasking
debug("Destructor called");
{
SmartLock(TaskingLock);
foreach (PCB *Process in ListProcess)
foreach (PCB *Process in ProcessList)
{
foreach (TCB *Thread in Process->Threads)
{
if (Thread == GetCurrentCPU()->CurrentThread.Load() ||
if (Thread == GetCurrentCPU()->CurrentThread.load() ||
Thread == CleanupThread)
continue;
this->KillThread(Thread, 0xFFFF);
}
if (Process == GetCurrentCPU()->CurrentProcess.Load())
if (Process == GetCurrentCPU()->CurrentProcess.load())
continue;
this->KillProcess(Process, 0xFFFF);
}
}
while (ListProcess.size() > 0)
while (ProcessList.size() > 0)
{
trace("Waiting for %d processes to terminate", ListProcess.size());
trace("Waiting for %d processes to terminate", ProcessList.size());
int NotTerminated = 0;
foreach (PCB *Process in ListProcess)
foreach (PCB *Process in ProcessList)
{
debug("Process %s(%d) is still running (or waiting to be removed status %#lx)", Process->Name, Process->ID, Process->Status);
if (Process->Status == TaskStatus::Terminated)

36
Tests/std.cpp Normal file
View File

@ -0,0 +1,36 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#ifdef DEBUG
#include <std.hpp>
#include <assert.h>
__constructor void Test_std()
{
std::atomic_int a = 0;
a++;
assert(a == 1);
int b = a.exchange(2);
assert(b == 1);
assert(a == 2);
debug("std: OK");
}
#endif // DEBUG

19
include/atomic Normal file
View File

@ -0,0 +1,19 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#pragma once
#include <std/atomic.hpp>

View File

@ -1,232 +0,0 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef __FENNIX_KERNEL_ATOMIC_H__
#define __FENNIX_KERNEL_ATOMIC_H__
#define _Atomic(T) T
#define builtin_atomic_n(name) __atomic_##name##_n
#define builtin_atomic(name) __atomic_##name
enum MemoryOrder
{
/**
* @brief Relaxed memory order
*
* This memory ordering specifies that the
* operation on the atomic variable has no
* synchronization with other memory accesses.
* This is the most relaxed ordering and provides
* the least synchronization.
*/
Relaxed = __ATOMIC_RELAXED,
/**
* @brief Acquire memory order
*
* This memory ordering specifies that subsequent
* memory accesses after the atomic operation
* cannot be reordered before the atomic operation.
* This ordering provides synchronization with
* subsequent loads.
*/
Acquire = __ATOMIC_ACQUIRE,
/**
* @brief Release memory order
*
* This memory ordering specifies that previous
* memory accesses before the atomic operation
* cannot be reordered after the atomic operation.
* This ordering provides synchronization with
* previous stores.
*/
Release = __ATOMIC_RELEASE,
/**
* @brief Acquire and release memory order
*
* This memory ordering combines both the acquire
* and release memory orderings. This ordering
* provides synchronization with both previous
* stores and subsequent loads.
*/
AcqRel = __ATOMIC_ACQ_REL,
/**
* @brief Sequentially consistent memory order
*
* This memory ordering is a combination of
* @see AcqRel and the additional
* guarantee of a single total order of all
* @see SeqCst operations on the same object.
*/
SeqCst = __ATOMIC_SEQ_CST
};
template <typename T>
class Atomic
{
_Atomic(T) Value;
public:
Atomic() : Value(0) {}
Atomic(T Init) : Value(Init) {}
/**
* @brief Load the value of the atomic variable
*
* @param Order The memory order to use
* @return T The value of the atomic variable
*/
T Load(MemoryOrder Order = MemoryOrder::SeqCst)
{
return builtin_atomic_n(load)(&this->Value, Order);
}
/**
* @brief Store a value to the atomic variable
*
* @param v The value to store
* @param Order The memory order to use
*/
void Store(T v, MemoryOrder Order = MemoryOrder::SeqCst)
{
return builtin_atomic_n(store)(&this->Value, v, Order);
}
/**
* @brief Exchange the value of the atomic variable
*
* @param v The value to exchange
* @param Order The memory order to use
* @return T The old value of the atomic variable
*/
T Exchange(T v, MemoryOrder Order = MemoryOrder::SeqCst)
{
return builtin_atomic_n(exchange)(&this->Value, v, Order);
}
/**
* @brief Compare and exchange the value of the atomic variable
*
* @param Expected The expected value
* @param Desired The desired value
* @param Order The memory order to use
* @return true If the exchange was successful
* @return false If the exchange was not successful
*/
bool CompareExchange(T &Expected, T Desired, MemoryOrder Order = MemoryOrder::SeqCst)
{
return builtin_atomic_n(compare_exchange)(&this->Value, &Expected, Desired, true, Order, Order);
}
/**
* @brief Fetch and add the value of the atomic variable
*
* @param v The value to add
* @param Order The memory order to use
* @return T The old value of the atomic variable
*/
T FetchAdd(T v, MemoryOrder Order = MemoryOrder::SeqCst)
{
return builtin_atomic(fetch_add)(&this->Value, v, Order);
}
/**
* @brief Fetch and subtract the value of the atomic variable
*
* @param v The value to subtract
* @param Order The memory order to use
* @return T The old value of the atomic variable
*/
T FetchSub(T v, MemoryOrder Order = MemoryOrder::SeqCst)
{
return builtin_atomic(fetch_sub)(&this->Value, v, Order);
}
/**
* @brief Fetch and and the value of the atomic variable
*
* @param v The value to and
* @param Order The memory order to use
* @return T The old value of the atomic variable
*/
T FetchAnd(T v, MemoryOrder Order = MemoryOrder::SeqCst)
{
return builtin_atomic(fetch_and)(&this->Value, v, Order);
}
/**
* @brief Fetch and or the value of the atomic variable
*
* @param v The value to or
* @param Order The memory order to use
* @return T The old value of the atomic variable
*/
T FetchOr(T v, MemoryOrder Order = MemoryOrder::SeqCst)
{
return builtin_atomic(fetch_or)(&this->Value, v, Order);
}
/**
* @brief Fetch and xor the value of the atomic variable
*
* @param v The value to xor
* @param Order The memory order to use
* @return T The old value of the atomic variable
*/
T FetchXor(T v, MemoryOrder Order = MemoryOrder::SeqCst)
{
return builtin_atomic(fetch_xor)(&this->Value, v, Order);
}
/**
* @brief Fetch and nand the value of the atomic variable
*
* @param v The value to nand
* @param Order The memory order to use
* @return T The old value of the atomic variable
*/
T FetchNand(T v, MemoryOrder Order = MemoryOrder::SeqCst)
{
return builtin_atomic(fetch_nand)(&this->Value, v, Order);
}
operator bool() { return this->Load() != 0; }
T operator->() { return this->Load(); }
T operator++() { return this->FetchAdd(1) + 1; }
T operator--() { return this->FetchSub(1) - 1; }
T operator++(int) { return this->FetchAdd(1); }
T operator--(int) { return this->FetchSub(1); }
T operator+=(T v) { return this->FetchAdd(v) + v; }
T operator-=(T v) { return this->FetchSub(v) - v; }
T operator&=(T v) { return this->FetchAnd(v) & v; }
T operator|=(T v) { return this->FetchOr(v) | v; }
T operator^=(T v) { return this->FetchXor(v) ^ v; }
T operator~() { return this->FetchNand(-1); }
T operator=(T v)
{
this->Store(v);
return v;
}
};
#undef builtin_atomic_n
#undef builtin_atomic
#endif // !__FENNIX_KERNEL_ATOMIC_H__

24
include/cstddef Normal file
View File

@ -0,0 +1,24 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#pragma once
namespace std
{
typedef unsigned long size_t;
typedef long ptrdiff_t;
}

View File

@ -15,9 +15,5 @@
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef __FENNIX_KERNEL_CSTRING_H__
#define __FENNIX_KERNEL_CSTRING_H__
#pragma once
#include <convert.h>
#endif // !__FENNIX_KERNEL_CSTRING_H__

View File

@ -19,7 +19,7 @@
#define __FENNIX_KERNEL_DISK_H__
#include <types.h>
#include <vector.hpp>
#include <vector>
namespace Disk
{

View File

@ -20,12 +20,12 @@
#include <types.h>
#include <vector.hpp>
#include <memory.hpp>
#include <ints.hpp>
#include <debug.h>
#include <cpu.hpp>
#include <pci.hpp>
#include <vector>
namespace Driver
{

View File

@ -21,7 +21,7 @@
#include <types.h>
#include <smart_ptr.hpp>
#include <vector.hpp>
#include <vector>
namespace VirtualFileSystem
{

View File

@ -21,8 +21,8 @@
#include <types.h>
#include <display.hpp>
#include <memory.hpp>
#include <vector.hpp>
#include <debug.h>
#include <vector>
namespace GraphicalUserInterface
{

View File

@ -20,10 +20,10 @@
#include <types.h>
#include <filesystem.hpp>
#include <vector.hpp>
#include <memory.hpp>
#include <atomic.hpp>
#include <lock.hpp>
#include <vector>
#include <atomic>
namespace InterProcessCommunication
{
@ -59,7 +59,7 @@ namespace InterProcessCommunication
VirtualFileSystem::Node *Node;
void *Buffer;
long Length;
Atomic<bool> Listening;
std::atomic_bool Listening;
};
class IPC

View File

@ -20,8 +20,8 @@
#include <types.h>
#include <atomic.hpp>
#include <cpu.hpp>
#include <atomic>
#ifdef __cplusplus
@ -40,19 +40,19 @@ class LockClass
{
struct SpinLockData
{
Atomic<uint64_t> LockData = 0x0;
Atomic<const char *> CurrentHolder = "(nul)";
Atomic<const char *> AttemptingToGet = "(nul)";
Atomic<uintptr_t> StackPointerHolder = 0;
Atomic<uintptr_t> StackPointerAttempt = 0;
Atomic<size_t> Count = 0;
Atomic<long> Core = 0;
std::atomic_uint64_t LockData = 0x0;
std::atomic<const char *> CurrentHolder = "(nul)";
std::atomic<const char *> AttemptingToGet = "(nul)";
std::atomic_uintptr_t StackPointerHolder = 0;
std::atomic_uintptr_t StackPointerAttempt = 0;
std::atomic_size_t Count = 0;
std::atomic_long Core = 0;
};
private:
SpinLockData LockData;
Atomic<bool> IsLocked = false;
Atomic<unsigned long> DeadLocks = 0;
std::atomic_bool IsLocked = false;
std::atomic_ulong DeadLocks = 0;
void DeadLock(SpinLockData Lock);
void TimeoutDeadLock(SpinLockData Lock, uint64_t Timeout);

View File

@ -20,8 +20,8 @@
#include <net/eth.hpp>
#include <net/nc.hpp>
#include <vector.hpp>
#include <types.h>
#include <vector>
namespace NetworkARP
{

View File

@ -19,11 +19,11 @@
#define __FENNIX_KERNEL_NETWORK_CONTROLLER_H__
#include <net/net.hpp>
#include <vector.hpp>
#include <memory.hpp>
#include <task.hpp>
#include <types.h>
#include <debug.h>
#include <vector>
namespace NetworkInterfaceManager
{

View File

@ -21,7 +21,7 @@
#include <types.h>
#include <debug.h>
#include <vector.hpp>
#include <vector>
namespace PCI
{

View File

@ -18,9 +18,9 @@
#ifndef __FENNIX_KERNEL_SMP_H__
#define __FENNIX_KERNEL_SMP_H__
#include <types.h>
#include <atomic.hpp>
#include <task.hpp>
#include <types.h>
#include <atomic>
/** @brief Maximum supported number of CPU cores by the kernel */
#define MAX_CPU 256
@ -54,10 +54,10 @@ struct CPUData
long ErrorCode;
/** @brief Current running process */
Atomic<Tasking::PCB *> CurrentProcess;
std::atomic<Tasking::PCB *> CurrentProcess;
/** @brief Current running thread */
Atomic<Tasking::TCB *> CurrentThread;
std::atomic<Tasking::TCB *> CurrentThread;
/** @brief Architecture-specific data. */
CPUArchData Data;

View File

@ -22,14 +22,15 @@
#define __FENNIX_KERNEL_STD_H__
#include <types.h>
#include <std/unordered_map.hpp>
#include <std/atomic.hpp>
#include <std/functional.hpp>
#include <std/stdexcept.hpp>
#include <std/list.hpp>
#include <std/smart_ptr.hpp>
#include <std/stdexcept.hpp>
#include <std/string.hpp>
#include <std/unordered_map.hpp>
#include <std/utility.hpp>
#include <std/vector.hpp>
#include <std/string.hpp>
#include <std/list.hpp>
/**
* @brief // stub namespace for std::align_val_t and new operator

521
include/std/atomic.hpp Normal file
View File

@ -0,0 +1,521 @@
/*
This file is part of Fennix Kernel.
Fennix Kernel is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Fennix Kernel is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fennix Kernel. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef __FENNIX_KERNEL_STD_ATOMIC_H__
#define __FENNIX_KERNEL_STD_ATOMIC_H__
#include <types.h>
#include <cstddef>
#include <debug.h>
namespace std
{
#define _atomic(T) T
#define builtin_atomic_n(name) __atomic_##name##_n
#define builtin_atomic(name) __atomic_##name
/**
* @brief Specifies the memory ordering constraints for atomic operations.
*
* This enum specifies the possible values for the memory order parameter of atomic operations.
*
* Possible values are:
*
* - memory_order_relaxed: There are no synchronization
* or ordering constraints imposed on other reads or writes,
* only this operation's atomicity is guaranteed.
*
* - memory_order_consume: A load operation with this
* memory order performs a consume operation on the
* affected memory location: no reads or writes in the
* current thread dependent on the value currently loaded
* can be reordered before this load.
*
* - memory_order_acquire: A load operation with this
* memory order performs the acquire operation on the
* affected memory location: no reads or writes in the
* current thread can be reordered before this load.
*
* - memory_order_release: A store operation with this
* memory order performs the release operation: no reads
* or writes in the current thread can be reordered after
* this store.
*
* - memory_order_acq_rel: A read-modify-write operation
* with this memory order is both an acquire operation
* and a release operation.
*
* - memory_order_seq_cst: A load operation with this
* memory order performs an acquire operation, a store
* performs a release operation, and read-modify-write
* performs both an acquire operation and a release
* operation, plus a single total order exists in which
* all threads observe all modifications in the same order.
*/
enum class memory_order : int
{
relaxed,
consume,
acquire,
release,
acq_rel,
seq_cst
};
inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
inline constexpr memory_order memory_order_consume = memory_order::consume;
inline constexpr memory_order memory_order_acquire = memory_order::acquire;
inline constexpr memory_order memory_order_release = memory_order::release;
inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
template <typename T>
class atomic
{
_atomic(T) value;
public:
atomic() noexcept : value(0) {}
atomic(T desired) noexcept : value(desired) {}
// atomic(const atomic &) = delete;
/**
* @brief Load the value of the atomic variable
*
* @note Order must be one of memory_order::relaxed, memory_order::consume, memory_order::acquire or memory_order::seq_cst
*
* @param order Memory order constraint to use
* @return The value of the atomic variable
*/
T load(memory_order order = memory_order::seq_cst) const noexcept
{
return builtin_atomic_n(load)(&this->value, static_cast<int>(order));
}
/**
* @copydoc load()
*/
T load(memory_order order = memory_order::seq_cst) const volatile noexcept
{
return builtin_atomic_n(load)(&this->value, static_cast<int>(order));
}
/**
* @brief Store the value of the atomic variable
*
* @note Order must be one of memory_order::relaxed, memory_order::release or memory_order::seq_cst
*
* @param desired The value to store
* @param order Memory order constraint to use
*/
void store(T desired, memory_order order = memory_order::seq_cst) noexcept
{
builtin_atomic_n(store)(&this->value, desired, static_cast<int>(order));
}
/**
* @copydoc store()
*/
void store(T desired, memory_order order = memory_order::seq_cst) volatile noexcept
{
builtin_atomic_n(store)(&this->value, desired, static_cast<int>(order));
}
/**
* @brief Exchange the value of the atomic variable
*
* @param desired The value to exchange
* @param order Memory order constraint to use
* @return The value of the atomic variable before the exchange
*/
T exchange(T desired, memory_order order = memory_order::seq_cst) noexcept
{
return builtin_atomic_n(exchange)(&this->value, desired, static_cast<int>(order));
}
/**
* @copydoc exchange()
*/
T exchange(T desired, memory_order order = memory_order::seq_cst) volatile noexcept
{
return builtin_atomic_n(exchange)(&this->value, desired, static_cast<int>(order));
}
/**
* @brief Compare and exchange the value of the atomic variable
*
* @param expected The expected value
* @param desired The desired value
* @param success Memory order constraint to use if the exchange succeeds
* @param failure Memory order constraint to use if the exchange fails
* @return True if the exchange succeeded, false otherwise
*/
bool compare_exchange_weak(T &expected, T desired, std::memory_order success, std::memory_order failure) noexcept
{
return builtin_atomic(compare_exchange_weak)(&this->value, &expected, desired, false, success, failure);
}
/**
* @copydoc compare_exchange_weak()
*/
bool compare_exchange_weak(T &expected, T desired, std::memory_order success, std::memory_order failure) volatile noexcept
{
return builtin_atomic(compare_exchange_weak)(&this->value, &expected, desired, false, success, failure);
}
/**
* @brief Compare and exchange the value of the atomic variable
*
* @param expected The expected value
* @param desired The desired value
* @param order Memory order constraint to use
* @return True if the exchange succeeded, false otherwise
*/
bool compare_exchange_weak(T &expected, T desired, std::memory_order order = std::memory_order_seq_cst) noexcept
{
return builtin_atomic(compare_exchange_weak)(&this->value, &expected, desired, false, order, static_cast<int>(order));
}
/**
* @copydoc compare_exchange_weak()
*/
bool compare_exchange_weak(T &expected, T desired, std::memory_order order = std::memory_order_seq_cst) volatile noexcept
{
return builtin_atomic(compare_exchange_weak)(&this->value, &expected, desired, false, order, static_cast<int>(order));
}
/**
* @brief Compare and exchange the value of the atomic variable
*
* @param expected The expected value
* @param desired The desired value
* @param success Memory order constraint to use if the exchange succeeds
* @param failure Memory order constraint to use if the exchange fails
* @return True if the exchange succeeded, false otherwise
*/
bool compare_exchange_strong(T &expected, T desired, std::memory_order success, std::memory_order failure) noexcept
{
return builtin_atomic(compare_exchange_strong)(&this->value, &expected, desired, true, success, failure);
}
/**
* @copydoc compare_exchange_strong()
*/
bool compare_exchange_strong(T &expected, T desired, std::memory_order success, std::memory_order failure) volatile noexcept
{
return builtin_atomic(compare_exchange_strong)(&this->value, &expected, desired, true, success, failure);
}
/**
* @brief Compare and exchange the value of the atomic variable
*
* @param expected The expected value
* @param desired The desired value
* @param order Memory order constraint to use
* @return True if the exchange succeeded, false otherwise
*/
bool compare_exchange_strong(T &expected, T desired, std::memory_order order = std::memory_order_seq_cst) noexcept
{
return builtin_atomic(compare_exchange_strong)(&this->value, &expected, desired, true, order, static_cast<int>(order));
}
/**
* @copydoc compare_exchange_strong()
*/
bool compare_exchange_strong(T &expected, T desired, std::memory_order order = std::memory_order_seq_cst) volatile noexcept
{
return builtin_atomic(compare_exchange_strong)(&this->value, &expected, desired, true, order, static_cast<int>(order));
}
/**
* @brief Fetch and add the value of the atomic variable
*
* @param arg The value to add
* @param order Memory order constraint to use
* @return The value of the atomic variable before the addition
*/
T fetch_add(T arg, std::memory_order order = std::memory_order_seq_cst) noexcept
{
return builtin_atomic(fetch_add)(&this->value, arg, static_cast<int>(order));
}
/**
* @copydoc fetch_add()
*/
T fetch_add(T arg, std::memory_order order = std::memory_order_seq_cst) volatile noexcept
{
return builtin_atomic(fetch_add)(&this->value, arg, static_cast<int>(order));
}
/**
* @brief Fetch and subtract the value of the atomic variable
*
* @param arg The value to subtract
* @param order Memory order constraint to use
* @return The value of the atomic variable before the subtraction
*/
T fetch_sub(T arg, std::memory_order order = std::memory_order_seq_cst) noexcept
{
return builtin_atomic(fetch_sub)(&this->value, arg, static_cast<int>(order));
}
/**
* @copydoc fetch_sub()
*/
T fetch_sub(T arg, std::memory_order order = std::memory_order_seq_cst) volatile noexcept
{
return builtin_atomic(fetch_sub)(&this->value, arg, static_cast<int>(order));
}
/**
* @brief Fetch and bitwise AND the value of the atomic variable
*
* @param arg The value to AND
* @param order Memory order constraint to use
* @return The value of the atomic variable before the AND
*/
T fetch_and(T arg, std::memory_order order = std::memory_order_seq_cst) noexcept
{
return builtin_atomic(fetch_and)(&this->value, arg, static_cast<int>(order));
}
/**
* @copydoc fetch_and()
*/
T fetch_and(T arg, std::memory_order order = std::memory_order_seq_cst) volatile noexcept
{
return builtin_atomic(fetch_and)(&this->value, arg, static_cast<int>(order));
}
/**
* @brief Fetch and bitwise OR the value of the atomic variable
*
* @param arg The value to OR
* @param order Memory order constraint to use
* @return The value of the atomic variable before the OR
*/
T fetch_or(T arg, std::memory_order order = std::memory_order_seq_cst) noexcept
{
return builtin_atomic(fetch_or)(&this->value, arg, static_cast<int>(order));
}
/**
* @copydoc fetch_or()
*/
T fetch_or(T arg, std::memory_order order = std::memory_order_seq_cst) volatile noexcept
{
return builtin_atomic(fetch_or)(&this->value, arg, static_cast<int>(order));
}
/**
* @brief Fetch and bitwise XOR the value of the atomic variable
*
* @param arg The value to XOR
* @param order Memory order constraint to use
* @return The value of the atomic variable before the XOR
*/
T fetch_xor(T arg, std::memory_order order = std::memory_order_seq_cst) noexcept
{
return builtin_atomic(fetch_xor)(&this->value, arg, static_cast<int>(order));
}
/**
* @copydoc fetch_xor()
*/
T fetch_xor(T arg, std::memory_order order = std::memory_order_seq_cst) volatile noexcept
{
return builtin_atomic(fetch_xor)(&this->value, arg, static_cast<int>(order));
}
/**
* @brief Fetch and bitwise NAND the value of the atomic variable
*
* @param arg The value to NAND
* @param order Memory order constraint to use
* @return The value of the atomic variable before the NAND
*/
T fetch_nand(T arg, std::memory_order order = std::memory_order_seq_cst) noexcept
{
return builtin_atomic(fetch_nand)(&this->value, arg, static_cast<int>(order));
}
/**
* @copydoc fetch_nand()
*/
T fetch_nand(T arg, std::memory_order order = std::memory_order_seq_cst) volatile noexcept
{
return builtin_atomic(fetch_nand)(&this->value, arg, static_cast<int>(order));
}
/**
* @brief Notify all threads waiting on this atomic variable
*/
void notify_all() noexcept
{
fixme("not implemented");
}
/**
* @copydoc notify_all()
*/
void notify_all() volatile noexcept
{
fixme("not implemented");
}
/**
* @brief Notify one thread waiting on this atomic variable
*/
void notify_one() noexcept
{
fixme("not implemented");
}
/**
* @copydoc notify_one()
*/
void notify_one() volatile noexcept
{
fixme("not implemented");
}
/**
* @brief Wait for the atomic variable to change
*
* @param old The value to wait for
* @param order Memory order constraint to use
*/
void wait(T old, std::memory_order order = std::memory_order::seq_cst) const noexcept
{
fixme("not implemented");
}
/**
* @copydoc wait()
*/
void wait(T old, std::memory_order order = std::memory_order::seq_cst) const volatile noexcept
{
fixme("not implemented");
}
/**
* @brief Check whether this atomic type is lock-free
* @return True if this atomic type is lock-free
*/
bool is_lock_free() const noexcept
{
fixme("not implemented");
return true;
}
/**
* @copydoc is_lock_free()
*/
bool is_lock_free() const volatile noexcept
{
fixme("not implemented");
return true;
}
/**
* @brief Equals true if this atomic type is always lock-free
*/
static constexpr bool is_always_lock_free = true;
T operator++() noexcept { return this->fetch_add(1) + 1; }
T operator--() noexcept { return this->fetch_sub(1) - 1; }
T operator++(int) noexcept { return this->fetch_add(1); }
T operator--(int) noexcept { return this->fetch_sub(1); }
T operator+=(T desired) noexcept { return this->fetch_add(desired) + desired; }
T operator-=(T desired) noexcept { return this->fetch_sub(desired) - desired; }
// T operator+=(std::ptrdiff_t desired) noexcept { return this->fetch_add(desired) + desired; }
// T operator-=(std::ptrdiff_t desired) noexcept { return this->fetch_sub(desired) - desired; }
T operator&=(T desired) noexcept { return this->fetch_and(desired) & desired; }
T operator|=(T desired) noexcept { return this->fetch_or(desired) | desired; }
T operator^=(T desired) noexcept { return this->fetch_xor(desired) ^ desired; }
T operator->() noexcept { return this->load(); }
T operator~() noexcept { return this->fetch_nand(-1); }
bool operator==(const atomic &other) const noexcept { return this->load() == other.load(); }
bool operator==(T other) const noexcept { return this->load() == other; }
atomic &operator=(const atomic &) = delete;
T operator=(T desired) noexcept
{
this->store(desired);
return desired;
}
operator bool() noexcept { return this->load() != 0; }
// operator T() noexcept { return this->load(); }
operator T() const noexcept { return this->load(); }
};
typedef atomic<bool> atomic_bool;
typedef atomic<char> atomic_char;
typedef atomic<signed char> atomic_schar;
typedef atomic<unsigned char> atomic_uchar;
typedef atomic<short> atomic_short;
typedef atomic<unsigned short> atomic_ushort;
typedef atomic<int> atomic_int;
typedef atomic<unsigned int> atomic_uint;
typedef atomic<long> atomic_long;
typedef atomic<unsigned long> atomic_ulong;
typedef atomic<long long> atomic_llong;
typedef atomic<unsigned long long> atomic_ullong;
typedef atomic<char16_t> atomic_char16_t;
typedef atomic<char32_t> atomic_char32_t;
typedef atomic<wchar_t> atomic_wchar_t;
typedef atomic<int8_t> atomic_int8_t;
typedef atomic<uint8_t> atomic_uint8_t;
typedef atomic<int16_t> atomic_int16_t;
typedef atomic<uint16_t> atomic_uint16_t;
typedef atomic<int32_t> atomic_int32_t;
typedef atomic<uint32_t> atomic_uint32_t;
typedef atomic<int64_t> atomic_int64_t;
typedef atomic<uint64_t> atomic_uint64_t;
typedef atomic<int_least8_t> atomic_int_least8_t;
typedef atomic<uint_least8_t> atomic_uint_least8_t;
typedef atomic<int_least16_t> atomic_int_least16_t;
typedef atomic<uint_least16_t> atomic_uint_least16_t;
typedef atomic<int_least32_t> atomic_int_least32_t;
typedef atomic<uint_least32_t> atomic_uint_least32_t;
typedef atomic<int_least64_t> atomic_int_least64_t;
typedef atomic<uint_least64_t> atomic_uint_least64_t;
typedef atomic<int_fast8_t> atomic_int_fast8_t;
typedef atomic<uint_fast8_t> atomic_uint_fast8_t;
typedef atomic<int_fast16_t> atomic_int_fast16_t;
typedef atomic<uint_fast16_t> atomic_uint_fast16_t;
typedef atomic<int_fast32_t> atomic_int_fast32_t;
typedef atomic<uint_fast32_t> atomic_uint_fast32_t;
typedef atomic<int_fast64_t> atomic_int_fast64_t;
typedef atomic<uint_fast64_t> atomic_uint_fast64_t;
typedef atomic<intptr_t> atomic_intptr_t;
typedef atomic<uintptr_t> atomic_uintptr_t;
typedef atomic<size_t> atomic_size_t;
typedef atomic<ptrdiff_t> atomic_ptrdiff_t;
typedef atomic<intmax_t> atomic_intmax_t;
typedef atomic<uintmax_t> atomic_uintmax_t;
}
#undef builtin_atomic_n
#undef builtin_atomic
#endif // !__FENNIX_KERNEL_STD_ATOMIC_H__

View File

@ -21,6 +21,7 @@
#include <types.h>
#include <std/functional.hpp>
#include <std/utility.hpp>
#include <std/vector.hpp>
#include <std/list.hpp>
namespace std

View File

@ -21,13 +21,13 @@
#include <types.h>
#include <filesystem.hpp>
#include <ints.hpp>
#include <symbols.hpp>
#include <vector.hpp>
#include <memory.hpp>
#include <atomic.hpp>
#include <ints.hpp>
#include <ipc.hpp>
#include <debug.h>
#include <vector>
#include <atomic>
#include <abi.h>
namespace Tasking
@ -136,18 +136,14 @@ namespace Tasking
void Rename(const char *name)
{
CriticalSection cs;
if (!Name[0])
if (strlen(name) > 256 || strlen(name) == 0)
{
warn("Tried to rename thread %d to NULL", ID);
debug("Invalid thread name");
return;
}
trace("Renaming thread %s to %s", Name, name);
for (int i = 0; i < 256; i++)
{
Name[i] = name[i];
if (name[i] == '\0')
break;
}
strncpy(Name, name, 256);
}
void SetPriority(TaskPriority priority)
@ -243,12 +239,12 @@ namespace Tasking
UPID NextPID = 0;
UTID NextTID = 0;
std::vector<PCB *> ListProcess;
std::vector<PCB *> ProcessList;
PCB *IdleProcess = nullptr;
TCB *IdleThread = nullptr;
TCB *CleanupThread = nullptr;
Atomic<uint64_t> SchedulerTicks = 0;
Atomic<uint64_t> LastTaskTicks = 0;
std::atomic_uint64_t SchedulerTicks = 0;
std::atomic_uint64_t LastTaskTicks = 0;
bool StopScheduler = false;
bool InvalidPCB(PCB *pcb);
bool InvalidTCB(TCB *tcb);
@ -280,9 +276,9 @@ namespace Tasking
public:
void SetCleanupThread(TCB *Thread) { CleanupThread = Thread; }
uint64_t GetSchedulerTicks() { return SchedulerTicks.Load(); }
uint64_t GetLastTaskTicks() { return LastTaskTicks.Load(); }
std::vector<PCB *> GetProcessList() { return ListProcess; }
uint64_t GetSchedulerTicks() { return SchedulerTicks.load(); }
uint64_t GetLastTaskTicks() { return LastTaskTicks.load(); }
std::vector<PCB *> GetProcessList() { return ProcessList; }
Security *GetSecurityManager() { return &SecurityManager; }
void CleanupProcessesThread();
void Panic() { StopScheduler = true; }