diff --git a/.gitmodules b/.gitmodules index e9ec8162..84805ac4 100644 --- a/.gitmodules +++ b/.gitmodules @@ -12,3 +12,6 @@ path = 3rdparty/ihk url = https://github.com/ManyThreads/ihk.git ignore = dirty +[submodule "3rdparty/tbb"] + path = 3rdparty/tbb + url = git@github.com:ManyThreads/tbb.git diff --git a/3rdparty/install-tbb.sh b/3rdparty/install-tbb.sh new file mode 100755 index 00000000..0001ea8d --- /dev/null +++ b/3rdparty/install-tbb.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +TBBDIR="${SCRIPTDIR}/tbb" +CXXDIR="${SCRIPTDIR}/cxx-amd64" +TBB_BUILD_PREFIX="my_tbb" + +MYFLAGS=" -std=c++11 -march=native -Wfatal-errors -g" +MYFLAGS+=" -fno-stack-protector" +MYFLAGS+=" -nostdlib -nostdinc -nostdinc++" +MYFLAGS+=" -isystem ${CXXDIR}/usr/include/c++/v1" +MYFLAGS+=" -isystem ${CXXDIR}/usr/include" + +export CXXFLAGS="${MYFLAGS} ${CPLUS_FLAGS}" +export tbb_build_prefix="${TBB_BUILD_PREFIX}" + +echo $CPLUS_FLAGS + +cd $TBBDIR +make clean default +make extra_inc=big_iron.inc +cd - diff --git a/3rdparty/musl b/3rdparty/musl index 790bd394..88986a3b 160000 --- a/3rdparty/musl +++ b/3rdparty/musl @@ -1 +1 @@ -Subproject commit 790bd3943e419933cf56366197b9f6668ae8c53e +Subproject commit 88986a3bd33c6b5b97c8ec4502cc97093155cfe9 diff --git a/3rdparty/tbb b/3rdparty/tbb new file mode 160000 index 00000000..efa9881d --- /dev/null +++ b/3rdparty/tbb @@ -0,0 +1 @@ +Subproject commit efa9881db0d47930f1e713daabc6ac5689efe96c diff --git a/Makefile.user b/Makefile.user index 56127727..c648bc7b 100644 --- a/Makefile.user +++ b/Makefile.user @@ -1,4 +1,4 @@ -CPPFLAGS+= -DMLOG_APP=FilterAny +CPPFLAGS+= -DMLOG_APP=FilterInfo CPPFLAGS+= -DMLOG_ASYNC=FilterError CPPFLAGS+= -DMLOG_BOOT=FilterError CPPFLAGS+= -DMLOG_CPU=FilterError diff --git a/kernel-amd64.config b/kernel-amd64.config index d311f6d9..2408fc08 100644 --- a/kernel-amd64.config +++ b/kernel-amd64.config @@ -18,10 +18,12 @@ "plugin-dump-multiboot", "plugin-rapl-driver-intel", "app-init-example", - "test-synchronous-task", - "plugin-processor-allocator" + #"test-synchronous-task", + "objects-thread-team" ] [config.vars] mythos_root = ".." cxx_path = "../3rdparty/cxx-amd64/usr" + tbb_build_path = "../3rdparty/tbb/build/my_tbb_release" + tbb_inc_path = "../3rdparty/tbb/include" diff --git a/kernel-ihk.config b/kernel-ihk.config index 070dc3f3..4bbf9be7 100644 --- a/kernel-ihk.config +++ b/kernel-ihk.config @@ -23,3 +23,5 @@ [config.vars] mythos_root = ".." cxx_path = "../3rdparty/cxx-amd64/usr" + tbb_build_path = "../3rdparty/tbb/build/my_tbb_release" + tbb_inc_path = "../3rdparty/tbb/include" diff --git a/kernel-knc.config b/kernel-knc.config index 1a7b2262..ec7b4cb3 100644 --- a/kernel-knc.config +++ b/kernel-knc.config @@ -26,3 +26,5 @@ [config.vars] mythos_root = ".." cxx_path = "../3rdparty/cxx-knc/usr" + tbb_build_path = "../3rdparty/tbb/build/my_tbb_release" + tbb_inc_path = "../3rdparty/tbb/include" diff --git a/kernel/app/init-example/app/init.cc b/kernel/app/init-example/app/init.cc index e27c8497..3b803976 100644 --- a/kernel/app/init-example/app/init.cc +++ b/kernel/app/init-example/app/init.cc @@ -36,7 +36,7 @@ #include "runtime/Example.hh" #include "runtime/PageMap.hh" #include "runtime/KernelMemory.hh" -#include "runtime/ProcessorAllocator.hh" +#include "runtime/ThreadTeam.hh" #include "runtime/CapAlloc.hh" #include "runtime/tls.hh" #include "runtime/mlog.hh" @@ -47,6 +47,7 @@ #include "runtime/Mutex.hh" #include "runtime/cgaScreen.hh" #include "runtime/process.hh" +#include "tbb/tbb.h" #include #include @@ -68,13 +69,14 @@ char initstack[stacksize]; char* initstack_top = initstack+stacksize; mythos::Portal portal(mythos::init::PORTAL, info_ptr->getInvocationBuf()); +mythos::Frame infoFrame(mythos::init::INFO_FRAME); mythos::CapMap myCS(mythos::init::CSPACE); mythos::PageMap myAS(mythos::init::PML4); mythos::KernelMemory kmem(mythos::init::KM); mythos::KObject device_memory(mythos::init::DEVICE_MEM); cap_alloc_t capAlloc(myCS); mythos::RaplDriverIntel rapl(mythos::init::RAPL_DRIVER_INTEL); -mythos::ProcessorAllocator pa(mythos::init::PROCESSOR_ALLOCATOR); +mythos::ThreadTeam team(mythos::init::THREAD_TEAM); char threadstack[stacksize]; char* thread1stack_top = threadstack+stacksize/2; @@ -187,13 +189,13 @@ void test_tls() auto tls = mythos::setupNewTLS(); MLOG_INFO(mlog::app, "test_EC: create ec1 TLS", DVARhex(tls)); ASSERT(tls != nullptr); - auto sc = pa.alloc(pl).wait(); - TEST(sc); - auto res1 = ec1.create(kmem).as(myAS).cs(myCS).sched(sc->cap) + auto res1 = ec1.create(kmem).as(myAS).cs(myCS) .prepareStack(thread1stack_top).startFun(threadFun, nullptr) .suspended(false).fs(tls) .invokeVia(pl).wait(); TEST(res1); + auto res2 = team.tryRunEC(pl, ec1).wait(); + TEST(res2); TEST(ec1.setFSGS(pl,(uint64_t) tls, 0).wait()); mythos::syscall_signal(ec1.cap()); MLOG_INFO(mlog::app, "End test tls"); @@ -204,7 +206,7 @@ void test_tls() void test_heap() { MLOG_INFO(mlog::app, "Test heap"); mythos::PortalLock pl(portal); - auto size = 4*1024*1024; // 2 MB + auto size = 64*1024*1024; // 2 MB auto align = 2*1024*1024; // 2 MB uintptr_t vaddr = mythos::round_up(info_ptr->getInfoEnd() + align2M, align2M); // allocate a 2MiB frame @@ -327,24 +329,24 @@ void test_ExecutionContext() auto tls1 = mythos::setupNewTLS(); ASSERT(tls1 != nullptr); - auto sc1 = pa.alloc(pl).wait(); - TEST(sc1); - auto res1 = ec1.create(kmem).as(myAS).cs(myCS).sched(sc1->cap) + auto res1 = ec1.create(kmem).as(myAS).cs(myCS) .prepareStack(thread1stack_top).startFun(&thread_main, nullptr) .suspended(false).fs(tls1) .invokeVia(pl).wait(); TEST(res1); + auto tres1 = team.tryRunEC(pl, ec1).wait(); + TEST(tres1); MLOG_INFO(mlog::app, "test_EC: create ec2"); auto tls2 = mythos::setupNewTLS(); ASSERT(tls2 != nullptr); - auto sc2 = pa.alloc(pl).wait(); - TEST(sc2); - auto res2 = ec2.create(kmem).as(myAS).cs(myCS).sched(sc2->cap) + auto res2 = ec2.create(kmem).as(myAS).cs(myCS)/*.sched(sc2->cap)*/ .prepareStack(thread2stack_top).startFun(&thread_main, nullptr) .suspended(false).fs(tls2) .invokeVia(pl).wait(); TEST(res2); + auto tres2 = team.tryRunEC(pl, ec2).wait(); + TEST(tres2); } for (volatile int i=0; i<100000; i++) { @@ -371,19 +373,75 @@ void test_InterruptControl() { mythos::ExecutionContext ec(capAlloc()); auto tls = mythos::setupNewTLS(); ASSERT(tls != nullptr); - auto sc = pa.alloc(pl).wait(); - TEST(sc); - auto res1 = ec.create(kmem).as(myAS).cs(myCS).sched(sc->cap) + auto res1 = ec.create(kmem).as(myAS).cs(myCS) .prepareStack(thread3stack_top).startFun(&thread_main, nullptr) .suspended(false).fs(tls) .invokeVia(pl).wait(); TEST(res1); + auto tres = team.tryRunEC(pl, ec).wait(); + TEST(tres); TEST(ic.registerForInterrupt(pl, ec.cap(), 0x32).wait()); TEST(ic.unregisterInterrupt(pl, 0x32).wait()); TEST(capAlloc.free(ec, pl)); MLOG_INFO(mlog::app, "test_InterruptControl end"); } +long SerialFib( long n ) { + return n<2 ? n :SerialFib(n-1)+SerialFib(n-2); +} + +class FibTask: public tbb::task { +public: + const long n; + long* const sum; + FibTask( long n_, long* sum_ ) : + n(n_), sum(sum_) + {} + tbb::task* execute() { // Overrides virtual function task::execute + if( n<10 ) { + *sum = SerialFib(n); + } else { + long x, y; + FibTask& a = *new( allocate_child() ) FibTask(n-1,&x); + FibTask& b = *new( allocate_child() ) FibTask(n-2,&y); + // Set ref_count to 'two children plus one for the wait". + set_ref_count(3); + // Start b running. + spawn( b ); + // Start a running and wait for all children (a and b). + spawn_and_wait_for_all(a); + // Do the sum + *sum = x+y; + } + return NULL; + } +}; + +long ParallelFib( long n ) { + long sum; + tbb::task_scheduler_init tsi; // manually initialize TBB scheduler + FibTask& a = *new(tbb::task::allocate_root()) FibTask(n,&sum); + tbb::task::spawn_root_and_wait(a); + tsi.blocking_terminate(); // wait until TBB is terminated + return sum; +} + +void test_TBB(){ + MLOG_INFO(mlog::app, "Test TBB"); + + //test thread team limitation + mythos::PortalLock pl(portal); + auto res = team.setLimit(pl, 2).wait(); + TEST(res); + + tbb::enableDynamicThreading(); + + long f = 40; + MLOG_INFO(mlog::app, "fib(", f, ") = ", DVAR(ParallelFib(f))); + + MLOG_INFO(mlog::app, "Test finished"); +} + bool primeTest(uint64_t n){ if(n == 0 | n == 1){ @@ -496,16 +554,6 @@ void test_CgaScreen(){ MLOG_INFO(mlog::app, "Test CGA finished"); } -void test_processor_allocator(){ - MLOG_INFO(mlog::app, "Test processor allocator"); - mythos::PortalLock pl(portal); - auto sc = pa.alloc(pl).wait(); - TEST(sc); - auto res = pa.free(pl, sc->cap).wait(); - TEST(res); - MLOG_INFO(mlog::app, "Test processor allocator finished"); -} - void test_process(){ MLOG_INFO(mlog::app, "Test process"); @@ -513,10 +561,57 @@ void test_process(){ Process p(&process_test_image_start); p.createProcess(pl); + p.join(pl); MLOG_INFO(mlog::app, "Test process finished"); } +void test_scalability(){ + MLOG_INFO(mlog::app, "Test runtime/energy scalability"); + mythos::PortalLock pl(portal); + timeval start_run, end_run; + + tbb::enableDynamicThreading(); + long f = 40; + + for(unsigned nThreads = 2; nThreads <= info_ptr->getNumThreads(); nThreads++){ + auto res = team.setLimit(pl, nThreads).wait(); + ASSERT(res); + + asm volatile ("":::"memory"); + auto start = rapl.getRaplVal(pl).wait().get(); + gettimeofday(&start_run, 0); + asm volatile ("":::"memory"); + + auto result = ParallelFib(f); + + asm volatile ("":::"memory"); + auto end = rapl.getRaplVal(pl).wait().get(); + gettimeofday(&end_run, 0); + asm volatile ("":::"memory"); + + double seconds =(end_run.tv_usec - start_run.tv_usec)/1000000.0 + end_run.tv_sec - start_run.tv_sec; + double pp0 = (end.pp0 - start.pp0) * pow(0.5, start.cpu_energy_units); + double pp1 = (end.pp1 - start.pp1) * pow(0.5, start.cpu_energy_units); + double psys = (end.psys - start.psys) * pow(0.5, start.cpu_energy_units); + double pkg = (end.pkg - start.pkg) * pow(0.5, start.cpu_energy_units); + double dram = (end.dram - start.dram) * pow(0.5, start.dram_energy_units); + + std::cout << "Prime test: fib(" << f << ") = " << result << ", "<< seconds << " seonds, " << nThreads + << " threads" << std::endl; + + std::cout << "Energy consumption (energy/avg. power):" + << " PP0:" << pp0 << "J/" << pp0/seconds << "W" + << " PP1:" << pp1 << "J/" << pp1/seconds << "W" + << " Platform:" << psys << "J/" << psys/seconds << "W" + << " Package: " << pkg << "J/ " << pkg/seconds << "W" + << " DRAM:" << dram << "J/" << dram/seconds << "W" + << std::endl; + } + + MLOG_INFO(mlog::app, "Finished scalability test"); +} + int main() { char const str[] = "Hello world!"; @@ -533,13 +628,13 @@ int main() //test_HostChannel(portal, 24*1024*1024, 2*1024*1024); test_ExecutionContext(); test_pthreads(); - test_Rapl(); - test_processor_allocator(); + //test_Rapl(); + //test_TBB(); + test_scalability(); test_process(); //test_CgaScreen(); char const end[] = "bye, cruel world!"; mythos::syscall_debug(end, sizeof(end)-1); - return 0; } diff --git a/kernel/app/process_test/process_test/process_test.cc b/kernel/app/process_test/process_test/process_test.cc index 1a23ffa0..d95e3544 100644 --- a/kernel/app/process_test/process_test/process_test.cc +++ b/kernel/app/process_test/process_test/process_test.cc @@ -34,15 +34,18 @@ #include "runtime/Example.hh" #include "runtime/PageMap.hh" #include "runtime/KernelMemory.hh" +#include "runtime/ThreadTeam.hh" #include "runtime/SimpleCapAlloc.hh" -#include "runtime/ProcessorAllocator.hh" #include "runtime/tls.hh" #include "runtime/mlog.hh" #include "runtime/InterruptControl.hh" #include #include "util/optional.hh" +#include "util/align.hh" #include "runtime/umem.hh" #include "runtime/Mutex.hh" +#include "runtime/thread-extra.hh" +#include "mythos/caps.hh" mythos::InfoFrame* info_ptr asm("info_ptr"); @@ -53,6 +56,7 @@ char initstack[stacksize]; char* initstack_top = initstack+stacksize; mythos::Portal portal(mythos::init::PORTAL, info_ptr->getInvocationBuf()); +mythos::Frame infoFrame(mythos::init::INFO_FRAME); mythos::CapMap myCS(mythos::init::CSPACE); mythos::PageMap myAS(mythos::init::PML4); mythos::KernelMemory kmem(mythos::init::KM); @@ -60,13 +64,30 @@ mythos::KObject device_memory(mythos::init::DEVICE_MEM); mythos::SimpleCapAlloc< mythos::init::APP_CAP_START , mythos::init::SIZE-mythos::init::APP_CAP_START> capAlloc(myCS); mythos::RaplDriverIntel rapl(mythos::init::RAPL_DRIVER_INTEL); -mythos::ProcessorAllocator pa(mythos::init::PROCESSOR_ALLOCATOR); +mythos::ThreadTeam team(mythos::init::THREAD_TEAM); int main() { - MLOG_ERROR(mlog::app, "New process started :)"); + MLOG_ERROR(mlog::app, "New process started :)", DVAR(sizeof(mythos::InfoFrame)), DVAR(mythos_get_pthread_ec_self())); + MLOG_INFO(mlog::app, "info frame", DVARhex(info_ptr), DVAR(info_ptr->getNumThreads()), DVAR(info_ptr->getPsPerTSC())); + mythos::PortalLock pl(portal); + auto size = 32*1024*1024; // 2 MB + auto align = 2*1024*1024; // 2 MB + uintptr_t vaddr = mythos::round_up(info_ptr->getInfoEnd() + mythos::align2M, mythos::align2M); + // allocate a 2MiB frame + mythos::Frame f(capAlloc()); + auto res2 = f.create(pl, kmem, size, align).wait(); + TEST(res2); + // map the frame into our address space + auto res3 = myAS.mmap(pl, f, vaddr, size, 0x1).wait(); + TEST(res3); + MLOG_INFO(mlog::app, "mmap frame", DVAR(res3.state()), + DVARhex(res3->vaddr), DVAR(res3->level)); + mythos::heap.addRange(vaddr, size); + + MLOG_ERROR(mlog::app, "process finished :("); return 0; } diff --git a/kernel/boot/apboot-common/boot/DeployHWThread.hh b/kernel/boot/apboot-common/boot/DeployHWThread.hh index 946fc25c..e5be3b3f 100644 --- a/kernel/boot/apboot-common/boot/DeployHWThread.hh +++ b/kernel/boot/apboot-common/boot/DeployHWThread.hh @@ -38,6 +38,7 @@ #include "cpu/idle.hh" #include "async/Place.hh" #include "objects/DeleteBroadcast.hh" +#include "objects/PML4InvalidationBroadcastAmd64.hh" #include "objects/SchedulingContext.hh" #include "objects/InterruptControl.hh" #include "boot/memory-layout.h" @@ -78,6 +79,7 @@ struct DeployHWThread { idt.init(); DeleteBroadcast::init(); // depends on hwthread enumeration + PML4InvalidationBroadcast::init(); // depends on hwthread enumeration } void prepare(cpu::ThreadID threadID, cpu::ApicID apicID) diff --git a/kernel/boot/init-loader-amd64/boot/load_init.cc b/kernel/boot/init-loader-amd64/boot/load_init.cc index 4dbc9c75..7bdd01fd 100644 --- a/kernel/boot/init-loader-amd64/boot/load_init.cc +++ b/kernel/boot/init-loader-amd64/boot/load_init.cc @@ -51,6 +51,7 @@ namespace mythos { Event event::initLoader; Event event::initLoaderEarly; +Event event::initEC; Event event::initInfoFrame; namespace boot { @@ -63,8 +64,7 @@ InitLoader::InitLoader(char* image) init::CAP_ALLOC_END-init::CAP_ALLOC_START) , memMapper(&capAlloc, mythos::init::KM) // default: no processor allocator present - , processorAllocatorPresent(false) - , initSC(init::SCHEDULERS_START) + , mapSchedulingContexts(true) { MLOG_INFO(mlog::boot, "found init application image at", (void*)image); } @@ -167,7 +167,7 @@ optional InitLoader::initCSpace() if (!res) RETHROW(res); } - if(!processorAllocatorPresent){ + if(mapSchedulingContexts){ ASSERT(cpu::getNumThreads() <= init::SCHEDULERS_START - init::APP_CAP_START); MLOG_INFO(mlog::boot, "... create scheduling context caps in caps", init::SCHEDULERS_START, "till", init::SCHEDULERS_START+cpu::getNumThreads()-1); @@ -309,11 +309,12 @@ optional InitLoader::createEC(uintptr_t ipc_vaddr) optional res(Error::SUCCESS); if (res) res = ec->setCapSpace(capAlloc.get(init::CSPACE)); if (res) res = ec->setAddressSpace(capAlloc.get(init::PML4)); - if (res) res = ec->setSchedulingContext(capAlloc.get(initSC)); + if (mapSchedulingContexts && res) res = ec->setSchedulingContext(capAlloc.get(init::SCHEDULERS_START)); if (!res) RETHROW(res); ec->getThreadState().rdi = ipc_vaddr; ec->setEntryPoint(_img.header()->entry); ec->setTrapped(false); + event::initEC.emit(*ec); RETURN(Error::SUCCESS); } diff --git a/kernel/boot/init-loader-amd64/boot/load_init.hh b/kernel/boot/init-loader-amd64/boot/load_init.hh index fa180f2f..0a0cd5c8 100644 --- a/kernel/boot/init-loader-amd64/boot/load_init.hh +++ b/kernel/boot/init-loader-amd64/boot/load_init.hh @@ -31,6 +31,7 @@ #include "util/VectorMax.hh" #include "objects/CapEntry.hh" #include "objects/IPageMap.hh" +#include "objects/ExecutionContext.hh" #include "util/events.hh" #include "mythos/InfoFrame.hh" #include "boot/MemMapper.hh" @@ -82,8 +83,7 @@ namespace mythos { Portal* _portal; /* to be manipulated by optional processor allocator */ - bool processorAllocatorPresent; - CapPtr initSC; + bool mapSchedulingContexts; }; } // namespace boot @@ -91,6 +91,7 @@ namespace mythos { namespace event { extern Event initLoader; extern Event initLoaderEarly; + extern Event initEC; extern Event initInfoFrame; } diff --git a/kernel/build/emu-quemu-amd64/mcconf.module b/kernel/build/emu-quemu-amd64/mcconf.module index fa058a05..338781e1 100644 --- a/kernel/build/emu-quemu-amd64/mcconf.module +++ b/kernel/build/emu-quemu-amd64/mcconf.module @@ -6,7 +6,7 @@ makefile_head = ''' QEMU_MEMSIZE ?= 1024M -QEMUFLAGS += -cpu SandyBridge -smp 4 +QEMUFLAGS += -cpu Skylake-Client-v1 -smp 4 # com1: text output to terminal QEMUFLAGS += -serial stdio # com2: object channel for remote procedure calls into mythos @@ -23,7 +23,7 @@ qemu: boot32.elf qemu-system-x86_64 $(QEMUFLAGS) -m $(QEMU_MEMSIZE) -kernel boot32.elf qemu-text: boot32.elf - qemu-system-x86_64 -m $(QEMU_MEMSIZE) -cpu SandyBridge -smp 4 -curses -kernel boot32.elf + qemu-system-x86_64 -m $(QEMU_MEMSIZE) -cpu Skylake-Client-v1 -smp 4 -curses -kernel boot32.elf qemuidbg: boot32.elf qemu-system-x86_64 $(QEMUFLAGS) -kernel boot32.elf -qmp stdio diff --git a/kernel/mythos/infoFrame/mythos/InfoFrame.hh b/kernel/mythos/infoFrame/mythos/InfoFrame.hh index d467105b..1440ded8 100644 --- a/kernel/mythos/infoFrame/mythos/InfoFrame.hh +++ b/kernel/mythos/infoFrame/mythos/InfoFrame.hh @@ -26,26 +26,43 @@ #pragma once #include "mythos/InvocationBuf.hh" - -#define PS_PER_TSC_DEFAULT (0x180) +#include "mythos/init.hh" +#include namespace mythos { +constexpr uint64_t PS_PER_TSC_DEFAULT = 0x180; +constexpr size_t MAX_IB = 4096; + class InfoFrame{ public: InfoFrame() : psPerTsc(PS_PER_TSC_DEFAULT) , numThreads(1) + , parentEC(0) + , running(true) {} - InvocationBuf* getInvocationBuf() {return &ib; } + InvocationBuf* getInvocationBuf() {return &ib[0]; } + InvocationBuf* getInvocationBuf(size_t i) {return &ib[i]; } + size_t getIbOffset(size_t i){ + return reinterpret_cast(&ib[i]) - reinterpret_cast(this); + } + uint64_t getPsPerTSC() { return psPerTsc; } size_t getNumThreads() { return numThreads; } uintptr_t getInfoEnd () { return reinterpret_cast(this) + sizeof(InfoFrame); } + // process synchronization + void setParent(CapPtr ptr) { parentEC.store(ptr); } + CapPtr getParent() { return parentEC.load(); } + bool isRunning() { return running.load(); } + void setRunning(bool running) { this->running.store(running); } - InvocationBuf ib; // needs to be the first member (see Initloader::createPortal) + InvocationBuf ib[MAX_IB]; // needs to be the first member (see Initloader::createPortal) uint64_t psPerTsc; // picoseconds per time stamp counter size_t numThreads; // number of hardware threads available in the system + std::atomic parentEC; + std::atomic running; // process not finished }; } // namespace mythos diff --git a/kernel/mythos/init/mythos/init.hh b/kernel/mythos/init/mythos/init.hh index 6da39c12..766ae9fd 100644 --- a/kernel/mythos/init/mythos/init.hh +++ b/kernel/mythos/init/mythos/init.hh @@ -38,6 +38,7 @@ namespace init { CSPACE, PML4, EC, + PARENT_EC, //ec of parent process PORTAL, EXAMPLE_FACTORY, MEMORY_REGION_FACTORY, @@ -46,6 +47,7 @@ namespace init { CAPMAP_FACTORY, PAGEMAP_FACTORY, UNTYPED_MEMORY_FACTORY, + THREADTEAM_FACTORY, CAP_ALLOC_START, CAP_ALLOC_END = CAP_ALLOC_START+200, MSG_FRAME, @@ -54,6 +56,7 @@ namespace init { CPUDRIVER = SCHEDULERS_START+256, RAPL_DRIVER_INTEL, PROCESSOR_ALLOCATOR, + THREAD_TEAM, INFO_FRAME, INTERRUPT_CONTROL_START, INTERRUPT_CONTROL_END = INTERRUPT_CONTROL_START+256, diff --git a/kernel/mythos/invocation/mythos/protocol/CapMap.hh b/kernel/mythos/invocation/mythos/protocol/CapMap.hh index c3fb1689..cb9867d7 100644 --- a/kernel/mythos/invocation/mythos/protocol/CapMap.hh +++ b/kernel/mythos/invocation/mythos/protocol/CapMap.hh @@ -37,7 +37,6 @@ namespace mythos { enum Methods : uint8_t { DERIVE, REFERENCE, - MOVE, DELETE, REVOKE }; @@ -90,16 +89,6 @@ namespace mythos { CapRequest request; }; - struct Move : public BinaryOp { - constexpr static uint16_t label = (proto<<8) + MOVE; - - Move(CapPtr src, uint8_t srcDepth, - CapPtr dstCS, CapPtr dst, uint8_t dstDepth) - : BinaryOp(label, getLength(this), src, srcDepth, dstCS, dst, dstDepth) - {} - - }; - struct Delete : public InvocationBase { constexpr static uint16_t label = (proto<<8) + DELETE; @@ -142,7 +131,6 @@ namespace mythos { switch(Methods(m)) { case REFERENCE: return obj->invokeReference(args...); case DERIVE: return obj->invokeDerive(args...); - case MOVE: return obj->invokeMove(args...); case DELETE: return obj->invokeDelete(args...); case REVOKE: return obj->invokeRevoke(args...); default: return Error::NOT_IMPLEMENTED; diff --git a/kernel/mythos/invocation/mythos/protocol/ExecutionContext.hh b/kernel/mythos/invocation/mythos/protocol/ExecutionContext.hh index c8d21d44..0cd5e742 100644 --- a/kernel/mythos/invocation/mythos/protocol/ExecutionContext.hh +++ b/kernel/mythos/invocation/mythos/protocol/ExecutionContext.hh @@ -119,7 +119,12 @@ namespace mythos { struct Create : public KernelMemory::CreateBase { typedef InvocationBase response_type; Create(CapPtr dst, CapPtr factory) - : CreateBase(dst, factory, getLength(this), 3), start(false) { } + : CreateBase(dst, factory, getLength(this), 3), start(false) + { + as(null_cap); + cs(null_cap); + sched(null_cap); + } Amd64Registers regs; bool start; CapPtr as() const { return this->capPtrs[2]; } diff --git a/kernel/mythos/invocation/mythos/protocol/common.hh b/kernel/mythos/invocation/mythos/protocol/common.hh index 2214f382..d882c2e6 100644 --- a/kernel/mythos/invocation/mythos/protocol/common.hh +++ b/kernel/mythos/invocation/mythos/protocol/common.hh @@ -46,6 +46,7 @@ namespace mythos { CPUDRIVERKNC, RAPLDRIVERINTEL, PROCESSORALLOCATOR, + THREADTEAM, INTERRUPT_CONTROL, }; diff --git a/kernel/objects/capability-spinning/objects/CapEntry.cc b/kernel/objects/capability-spinning/objects/CapEntry.cc index 52c134ed..36202038 100644 --- a/kernel/objects/capability-spinning/objects/CapEntry.cc +++ b/kernel/objects/capability-spinning/objects/CapEntry.cc @@ -82,25 +82,29 @@ namespace mythos { { ASSERT(other.cap().isAllocated()); ASSERT(!other.isLinked()); + lock_cap(); if (!lock_prev()) { + unlock_cap(); other.reset(); THROW(Error::GENERIC_ERROR); } - lock(); + lock_next(); auto thisCap = cap(); if (isRevoking() || !thisCap.isUsable()) { other.reset(); - unlock(); + unlock_next(); unlock_prev(); + unlock_cap(); THROW(Error::INVALID_CAPABILITY); } + // using these values removes lock auto next= Link(_next).withoutFlags(); auto prev= Link(_prev).withoutFlags(); next->setPrevPreserveFlags(&other); other._next.store(next.value()); - // deleted or revoking can not be set in other._prev + // deletion, deleted or revoking can not be set in other._prev // as we allocated other for moving other._prev.store(prev.value()); prev->_next.store(Link(&other).value()); @@ -125,13 +129,25 @@ namespace mythos { return true; } - optional CapEntry::unlink() + // fails if cap was changed concurrently + bool CapEntry::try_kill(Cap expected) { - auto next = Link(_next).withoutFlags(); - auto prev = Link(_prev).withoutFlags(); - next->_prev.store(prev.value()); - prev->_next.store(next.value()); - _prev.store(Link().value()); + CapValue expectedValue = expected.value(); + MLOG_DETAIL(mlog::cap, this, ".try_kill", DVAR(expected)); + if (!_cap.compare_exchange_strong(expectedValue, expected.asZombie().value())) { + // if the cap was just zombified by sb. else, thats okay + return (Cap(expectedValue).asZombie() == expected.asZombie()); + } else return true; + } + + + optional CapEntry::unlinkAndUnlockLinks() + { + auto next = Link(_next); + auto prev = Link(_prev); + next->setPrevPreserveFlags(prev.ptr()); + prev->_next.store(next.withoutFlags().value()); + _prev.store(Link().withoutPtr().value()); _next.store(Link().value()); RETURN(Error::SUCCESS); } @@ -142,14 +158,9 @@ namespace mythos { if (!prev) { return Error::GENERIC_ERROR; } - if (prev->try_lock()) { - if (Link(_prev.load()).ptr() == prev) { - return Error::SUCCESS; - } else { // my _prev has changed in the mean time - prev->unlock(); - return Error::RETRY; - } - } else return Error::RETRY; + auto success = prev->try_lock_next(this); + ASSERT(Link(_prev.load()).ptr() == prev); + return success ? Error::SUCCESS : Error::RETRY; } bool CapEntry::lock_prev() diff --git a/kernel/objects/capability-spinning/objects/CapEntry.hh b/kernel/objects/capability-spinning/objects/CapEntry.hh index e837ed87..cda75dc2 100644 --- a/kernel/objects/capability-spinning/objects/CapEntry.hh +++ b/kernel/objects/capability-spinning/objects/CapEntry.hh @@ -44,7 +44,15 @@ namespace mythos { * thus prev and next of root are locked independently * * operations that write to the capability - * or flags (zombie) must lock both next and prev + * or call deleteCap on an obejct must lock_cap + * + * lock order: lock_cap, lock_prev, lock_next + * + * acquired status means someone exclusively acquired an unlinked CapEntry + * therefore no races with others trying to insert it. + * + * acquired status must be only hold shortly + * */ class CapEntry { @@ -83,22 +91,59 @@ namespace mythos { * allocated. Returns true if zombified. */ bool kill(); - optional unlink(); - bool try_lock() { return !(_next.fetch_or(LOCKED_FLAG) & LOCKED_FLAG); } - void lock() { while (!try_lock()) { hwthread_pause(); } } - void unlock() { auto res = _next.fetch_and(~LOCKED_FLAG); ASSERT(res & LOCKED_FLAG); } + bool try_kill(Cap expected); + + optional unlinkAndUnlockLinks(); + + /* lock next functions protect the link to the next CapEntry */ + + bool try_lock_next(CapEntry* next) + { + Link expected(next); + uintlink_t expectedValue = expected.value(); + return _next.compare_exchange_strong(expectedValue, expected.withFlags(LOCKED_FLAG).value()); + } + + void lock_next(){ while (!try_lock_next()) { hwthread_pause(); } } + + void unlock_next() + { + auto res = _next.fetch_and(~LOCKED_FLAG); + ASSERT(res & LOCKED_FLAG); + } + + /* deletion lock functions protect the deletion of a object */ + + bool try_lock_cap() + { + bool ret = !(_prev.fetch_or(LOCKED_FLAG) & LOCKED_FLAG); + return ret; + } + + void lock_cap() { while (!try_lock_cap()) { hwthread_pause(); } } + + void unlock_cap() + { + auto res = _prev.fetch_and(~LOCKED_FLAG); + ASSERT(res & LOCKED_FLAG); + } /// only for assertions and debugging /// only trust the result if it is false and it should be true - bool is_locked() const { return _next.load() & CapEntry::LOCKED_FLAG; } + bool next_is_locked() const { return _next.load() & CapEntry::LOCKED_FLAG; } + + /* lock prev functions protect the link to the next CapEntry */ Error try_lock_prev(); bool lock_prev(); - void unlock_prev() { Link(_prev)->unlock(); } + + void unlock_prev() + { + Link(_prev)->unlock_next(); + } CapEntry* next() { - ASSERT(is_locked()); return Link(_next).ptr(); } @@ -115,9 +160,18 @@ namespace mythos { // called by move and insertAfter void setPrevPreserveFlags(CapEntry* ptr); + // called by lock_next + bool try_lock_next() { return !(_next.fetch_or(LOCKED_FLAG) & LOCKED_FLAG); } + + // lock flag in _next and _prev + // _next protects the link to the next entry (lock_next) + // _prev protects the capability in the entry from being changed (lock_cap) static constexpr uintlink_t LOCKED_FLAG = 1; - static constexpr uintlink_t REVOKING_FLAG = 1 << 1; - static constexpr uintlink_t DELETED_FLAG = 1 << 2; + + // flags describing the entry in _prev + static constexpr uintlink_t REVOKING_FLAG = 1 << 1; // prevents from moving + static constexpr uintlink_t DELETED_FLAG = 1 << 2; // prevents from inserting in soon-to-be-deleted object + static constexpr uintlink_t FLAG_MASK = 7; static_assert((DELETED_FLAG | REVOKING_FLAG | FLAG_MASK) == FLAG_MASK, "prev flags do not fit"); @@ -134,9 +188,10 @@ namespace mythos { CapEntry* operator->() { ASSERT(ptr()); return ptr(); } Link withFlags(uintlink_t flags) const { return Link(_offset(), flags); } - Link withoutFlags() const { return Link(_offset(), 0); } + Link withoutFlags() const { return withFlags(0); } Link withPtr(CapEntry* ptr) const { return Link(ptr, flags()); } + Link withoutPtr() const { return withPtr(nullptr); } CapEntry* ptr() const { @@ -187,11 +242,15 @@ namespace mythos { { ASSERT(isKernelAddress(this)); ASSERT(targetEntry.cap().isAllocated()); - lock(); // lock the parent entry, the child is already acquired + // lock the parent entry, the child is already acquired + lock_cap(); + lock_next(); auto curCap = cap(); // lazy-locking: check that we still operate on the same parent capability if (!curCap.isUsable() || curCap != parentCap) { - unlock(); // unlock the parent entry + // unlock the parent entry + unlock_next(); + unlock_cap(); targetEntry.reset(); // release exclusive usage and revert to an empty entry THROW(Error::LOST_RACE); } @@ -201,11 +260,14 @@ namespace mythos { auto next = Link(_next.load()).withoutFlags(); next->setPrevPreserveFlags(&targetEntry); + // dest was never locked MLOG_ERROR(mlog::cap, "this unlocks next in child", DVAR(targetEntry)); targetEntry._next.store(next.value()); // deleted or revoking can not be set in target._prev // as we allocated target for being inserted + // dest was never locked MLOG_ERROR(mlog::cap, "this unlocks cap in child", DVAR(targetEntry)); targetEntry._prev.store(Link(this).value()); this->_next.store(Link(&targetEntry).value()); // unlocks the parent entry + unlock_cap(); targetEntry.commit(targetCap); // release the target entry as usable RETURN(Error::SUCCESS); } @@ -219,7 +281,7 @@ namespace mythos { if (entry.isLinked()) out << ":linked"; if (entry.isDeleted()) out << ":deleted"; if (entry.isUnlinked()) out << ":unlinked"; - if (entry.is_locked()) out << ":locked"; + if (entry.next_is_locked()) out << ":next_locked"; if (entry.isRevoking()) out << ":revoking"; return out; } diff --git a/kernel/objects/capability-spinning/objects/RevokeOperation.cc b/kernel/objects/capability-spinning/objects/RevokeOperation.cc index 18e373ca..7f584cb4 100644 --- a/kernel/objects/capability-spinning/objects/RevokeOperation.cc +++ b/kernel/objects/capability-spinning/objects/RevokeOperation.cc @@ -85,11 +85,11 @@ namespace mythos { monitor.requestDone(); return; } - entry.lock(); + entry.lock_cap(); auto rootCap = entry.cap(); if (!rootCap.isUsable()) { // this is not the cap you are locking for ... - entry.unlock(); + entry.unlock_cap(); res->response(t, Error::LOST_RACE); release(); monitor.requestDone(); @@ -99,7 +99,7 @@ namespace mythos { // if some other revoke or delete clears the flag or changes the cap values // all children have been deleted in the mean time and we are done entry.setRevoking(); - entry.unlock(); + entry.unlock_cap(); _result = _delete(&entry, rootCap).state(); _startAsyncDelete(t); } @@ -107,90 +107,81 @@ namespace mythos { optional RevokeOperation::_delete(CapEntry* root, Cap rootCap) { CapEntry* leaf; + Cap leafCap; do { - if (_startTraversal(root, rootCap)) { - leaf = _findLockedLeaf(root); - MLOG_DETAIL(mlog::cap, "_findLockedLeaf returned", DVAR(*leaf), DVAR(rootCap)); - if (leaf == root && !rootCap.isZombie()) { - // this is a revoke, do not delete the root. no more children -> we are done - root->finishRevoke(); - root->unlock(); - root->unlock_prev(); - RETURN(Error::SUCCESS); - } - auto leafCap = leaf->cap(); - ASSERT(leafCap.isZombie()); - if (leafCap.getPtr() == _guarded) { - leaf->unlock(); - leaf->unlock_prev(); - // attempted to delete guarded object - THROW(Error::CYCLIC_DEPENDENCY); - } - auto delRes = leafCap.getPtr()->deleteCap(*leaf, leafCap, *this); - if (delRes) { - leaf->unlink(); - leaf->reset(); - } else { - // Either tried to delete a portal that is currently deleting - // or tried to to delete _guarded via a recursive call. - leaf->unlock(); - leaf->unlock_prev(); - RETHROW(delRes); - } - } else RETURN(Error::SUCCESS); // could not restart, must be done + // compare only value, not the zombie state + if (root->cap().asZombie() != rootCap.asZombie()) { + // start has a new value + // must be the work of another deleter ... success! + RETURN(Error::SUCCESS); // could not restart, must be done + } + if (!_findLeaf(root, rootCap, leaf, leafCap)) continue; + MLOG_DETAIL(mlog::cap, "_findLockedLeaf returned", DVAR(*leaf), DVAR(rootCap)); + if (leaf == root && !rootCap.isZombie()) { + // this is a revoke, do not delete the root. no more children -> we are done + root->finishRevoke(); + RETURN(Error::SUCCESS); + } + ASSERT(leafCap.isZombie()); + if (leafCap.getPtr() == _guarded) { + leaf->unlock_cap(); + // attempted to delete guarded object + THROW(Error::CYCLIC_DEPENDENCY); + } + leaf->lock_cap(); + if (leaf->cap() != leafCap) { + MLOG_DETAIL(mlog::cap, "leaf cap changed concurrently"); + leaf->unlock_cap(); + continue; + } + auto delRes = leafCap.getPtr()->deleteCap(*leaf, leafCap, *this); + auto prevres = leaf->lock_prev(); + ASSERT_MSG(prevres, "somebody unlinked CapEntry currently in unlinking process"); + leaf->lock_next(); + if (delRes) { + leaf->unlinkAndUnlockLinks(); + leaf->reset(); + } else { + // deletion failed in the object specific handler + // this can be also from trying to delete rhw guarded object (currently deleting portal) + leaf->unlock_cap(); + RETHROW(delRes); + } } while (leaf != root); // deleted root RETURN(Error::SUCCESS); } - bool RevokeOperation::_startTraversal(CapEntry* root, Cap rootCap) - { - if (!root->lock_prev()) { - // start is currently unlinked - // must be the work of another deleter ... success! - return false; - } - if (root->prev() == root) { - // this is the actual root of the tree - // and has no children - // avoid deadlocks and finish the operation - root->unlock_prev(); - return false; - } - root->lock(); - // compare only value, not the zombie state - if (root->cap().asZombie() != rootCap.asZombie()) { - // start has a new value - // must be the work of another deleter ... success! - root->unlock(); - root->unlock_prev(); - return false; - } - return true; - } - - CapEntry* RevokeOperation::_findLockedLeaf(CapEntry* root) + // not sure if we even need that locking + bool RevokeOperation::_findLeaf(CapEntry* const root, Cap const rootCap, CapEntry*& leafEntry, Cap& leafCap) { - auto curEntry = root; + leafEntry = root; + leafCap = rootCap; while (true) { - auto curCap = curEntry->cap(); - auto nextEntry = curEntry->next(); - // wait for potencially allocated cap to become usable/zombie - Cap nextCap; - for (nextCap = nextEntry->cap(); - nextCap.isAllocated(); - nextCap = nextEntry->cap()) { - ASSERT(!nextEntry->isDeleted()); - hwthread_pause(); + auto nextEntry = leafEntry->next(); + if (nextEntry) { + Cap nextCap; + // wait for potencially allocated cap to become usable/zombie + for (nextCap = nextEntry->cap(); + nextCap.isAllocated(); + nextCap = nextEntry->cap()) { + ASSERT(!nextEntry->isDeleted()); + hwthread_pause(); + } + if (cap::isParentOf(*leafEntry, leafCap, *nextEntry, nextCap)) { + if (!nextEntry->try_kill(nextCap)) { + MLOG_DETAIL(mlog::cap, "cap to be killed changed concurrently"); + return false; + } + // go to next child + leafEntry = nextEntry; + leafCap = nextCap.asZombie(); + continue; + } else return true; + } else { + MLOG_DETAIL(mlog::cap, "found dead end scanning for leaf"); + return false; // restart at root } - if (cap::isParentOf(*curEntry, curCap, *nextEntry, nextCap)) { - // go to next child - curEntry->unlock_prev(); - nextEntry->kill(); - nextEntry->lock(); - curEntry = nextEntry; - continue; - } else return curEntry; } } diff --git a/kernel/objects/capability-spinning/objects/RevokeOperation.hh b/kernel/objects/capability-spinning/objects/RevokeOperation.hh index 6ad3a33c..0ec3c42b 100644 --- a/kernel/objects/capability-spinning/objects/RevokeOperation.hh +++ b/kernel/objects/capability-spinning/objects/RevokeOperation.hh @@ -39,6 +39,29 @@ namespace mythos { +/** + * The revoke operation consists of 2 phases seperated + * by an invalidation broadcast. + * + * 1. synchronous phase removes entries from the capability tree + * 2. delete broadcast ensures there are no more references in stack + * 3. asynchronous phase interacts with a KM to recycle objects + * + * 1. synchronous phase + * + * - Traverses the tree starting from an node (root of that subtree). + * - this traverses the list without locking + * - It finds a leaf, killing all the capabilities inbetween. + * - the leaf is deleted by killing deleteCap of the object (protected by lock_cap). + * if that succeedes, the CapEntry MUST be removed from the tree, as we can't do that twice + * and can't hold lock_cap forever. + * - If there are problems because of concurrent access, the operation + * restarts at the root of the subtree. + * - If we can't fix problems synchronously, we switch to asynch. phase + * without finishing, reporting "Error::RETRY" to the user. + * - a guarded object is the object containing the RevokeOperation, it can't be deleted + * + */ class RevokeOperation : public IResult , protected IDeleter @@ -107,8 +130,7 @@ private: void _deleteObject(Tasklet* t); optional _delete(CapEntry* root, Cap rootCap); - bool _startTraversal(CapEntry* root, Cap rootCap); - CapEntry* _findLockedLeaf(CapEntry* root); + bool _findLeaf(CapEntry* const root, Cap const rootCap, CapEntry*& leaf, Cap& leafCap); void _startAsyncDelete(Tasklet* t); diff --git a/kernel/objects/capability-utils/objects/ops.hh b/kernel/objects/capability-utils/objects/ops.hh index a0cc8a9f..068c3074 100644 --- a/kernel/objects/capability-utils/objects/ops.hh +++ b/kernel/objects/capability-utils/objects/ops.hh @@ -72,10 +72,14 @@ namespace mythos { bool resetReference(CapEntry& dst, const COMMITFUN& fun) { if (!dst.kill()) return false; // not killable (allocated but not usable) - if (!dst.lock_prev()) return true; // was already unlinked, should be empty eventually - dst.lock(); + dst.lock_cap(); + if (!dst.lock_prev()) { + dst.unlock_cap(); + return true; // was already unlinked, should be empty eventually + } + dst.lock_next(); dst.kill(); // kill again because someone might have inserted something usable meanwhile - dst.unlink(); + dst.unlinkAndUnlockLinks(); fun(); // perform some caller-defined action while still in an exclusive state dst.reset(); // this markes the entry as writeable again, leaves exclusive state return true; diff --git a/kernel/objects/capmap/objects/CapMap.cc b/kernel/objects/capmap/objects/CapMap.cc index 3615bd21..1b926f01 100644 --- a/kernel/objects/capmap/objects/CapMap.cc +++ b/kernel/objects/capmap/objects/CapMap.cc @@ -66,15 +66,15 @@ namespace mythos { CapMapFactory::factory(CapEntry* dstEntry, CapEntry* memEntry, Cap memCap, IAllocator* mem, CapPtrDepth indexbits, CapPtrDepth guardbits, CapPtr guard) { - auto obj = initial(memEntry, memCap, mem, indexbits, guardbits, guard); - if (!obj) RETHROW(obj); - auto& root = obj->getRoot(); - auto cap = root.cap(); - // Just a reference is stored in the target capability entry. - // The CapMap contains its original capability internally - // in order to resolve cyclic dependencies during deletion. - auto res = cap::inherit(root, cap, *dstEntry, cap.asReference()); - if (!res) RETHROW(res); // the object was deleted concurrently + auto ptr = mem->alloc(CapMap::size(indexbits), 64); + if (!ptr) RETHROW(ptr); + auto obj = new(*ptr) CapMap(mem, indexbits, guardbits, guard); + auto cap = Cap(obj).withData(CapMapData().writable(true)); + auto res = cap::inherit(*memEntry, memCap, *dstEntry, cap); + if (!res) { + mem->free(*ptr, CapMap::size(indexbits)); + RETHROW(res); + } return obj; } @@ -86,7 +86,9 @@ namespace mythos { optional CapMap::deleteCap(CapEntry&, Cap self, IDeleter& del) { + MLOG_INFO(mlog::cap, "CapMap::deleteCap"); if (self.isOriginal()) { + MLOG_DETAIL(mlog::cap, "delete original"); // delete all entries, leaves them in a locked state (allocated) // in order to prevent concurrent insertion of new caps. for (size_t i=0; igetProtocol()) { @@ -186,21 +188,6 @@ namespace mythos { return cap::reference(*srcRef->entry, *dstRef->entry, srcRef->entry->cap(), data.request).state(); } - Error CapMap::invokeMove(Tasklet*, Cap cap, IInvocation* msg) - { - auto data = msg->getMessage()->read(); - // retrieve dst cap entry - auto dstRef = lookupDst(cap, msg, data); - if (!dstRef) return dstRef.state(); - // retrieve source cap entry - auto srcRef = this->lookup(cap, data.srcPtr(), data.srcDepth, true); - if (!srcRef) return srcRef.state(); - // move - auto res = dstRef->entry->acquire(); - if (!res) { return res.state(); } - return srcRef->entry->moveTo(*dstRef->entry).state(); - } - Error CapMap::invokeDelete(Tasklet*, Cap cap, IInvocation* msg) { auto data = msg->getMessage()->read(); diff --git a/kernel/objects/capmap/objects/CapMap.hh b/kernel/objects/capmap/objects/CapMap.hh index 51b7a967..6bc3c757 100644 --- a/kernel/objects/capmap/objects/CapMap.hh +++ b/kernel/objects/capmap/objects/CapMap.hh @@ -75,7 +75,6 @@ public: // IKernelObject interface Error invokeReference(Tasklet*, Cap, IInvocation*); Error invokeDerive(Tasklet*, Cap, IInvocation*); - Error invokeMove(Tasklet*, Cap, IInvocation*); Error invokeDelete(Tasklet*, Cap, IInvocation*); Error invokeRevoke(Tasklet*, Cap, IInvocation*); Error getDebugInfo(Cap, IInvocation*); diff --git a/kernel/objects/execution-context/objects/ExecutionContext.cc b/kernel/objects/execution-context/objects/ExecutionContext.cc index 7c2bd5e4..b6767947 100644 --- a/kernel/objects/execution-context/objects/ExecutionContext.cc +++ b/kernel/objects/execution-context/objects/ExecutionContext.cc @@ -104,6 +104,28 @@ namespace mythos { RETURN(_sched.set(this, *sce, obj.cap())); } + void ExecutionContext::setSchedulingContext( + Tasklet* t, IResult* r, CapEntry* sce) + { + MLOG_INFO(mlog::ec, "setScheduler", DVAR(this), DVAR(sce)); + ASSERT(r); + + monitor.request(t,[=](Tasklet*){ + if(currentPlace.load() != nullptr){ + r->response(t, optional(false)); + }else{ + TypedCap obj(sce); + if (!obj){ + MLOG_ERROR(mlog::ec, "invalid SC cap entry", DVAR(this), DVAR(sce)); + r->response(t, optional()); + } + auto ret = (_sched.set(this, sce, obj.cap())); + r->response(t, ret); + } + monitor.responseAndRequestDone(); + }); + } + optional ExecutionContext::setSchedulingContext( Tasklet* t, IInvocation* msg, optional sce) { @@ -403,8 +425,15 @@ namespace mythos { switch(SyscallCode(code)) { case SYSCALL_EXIT: - MLOG_INFO(mlog::syscall, "exit"); - setFlags(IS_TRAPPED); + { + MLOG_INFO(mlog::syscall, "exit"); + setFlags(IS_TRAPPED); + auto place = currentPlace.load(); + if (place) synchronousAt(place) << [this]() { + this->saveState(); + this->_sched.reset(); + }; + } break; case SYSCALL_POLL: diff --git a/kernel/objects/execution-context/objects/ExecutionContext.hh b/kernel/objects/execution-context/objects/ExecutionContext.hh index d912da87..26d58f19 100644 --- a/kernel/objects/execution-context/objects/ExecutionContext.hh +++ b/kernel/objects/execution-context/objects/ExecutionContext.hh @@ -78,6 +78,8 @@ namespace mythos { /// only for initial setup optional setSchedulingContext(optional sce); + // for ThreadTeam + void setSchedulingContext(Tasklet* t, IResult* r, CapEntry* sce); optional setSchedulingContext(Tasklet* t, IInvocation* msg, optional sce); Error unsetSchedulingContext(); @@ -117,6 +119,7 @@ namespace mythos { if (id == typeId()) return static_cast(this); if (id == typeId()) return static_cast(this); if (id == typeId()) return static_cast(this); + if (id == typeId()) return this; THROW(Error::TYPE_MISMATCH); } optional deleteCap(CapEntry&, Cap self, IDeleter& del) override; @@ -180,6 +183,9 @@ namespace mythos { LinkedList::Queueable del_handle = {this}; IAsyncFree* memory; + + public: + Tasklet threadTeamTasklet; }; class ExecutionContextFactory : public FactoryBase diff --git a/kernel/objects/memory-amd64/objects/PML4InvalidationBroadcastAmd64.cc b/kernel/objects/memory-amd64/objects/PML4InvalidationBroadcastAmd64.cc index 9129a0b6..1a0cfcac 100644 --- a/kernel/objects/memory-amd64/objects/PML4InvalidationBroadcastAmd64.cc +++ b/kernel/objects/memory-amd64/objects/PML4InvalidationBroadcastAmd64.cc @@ -58,6 +58,7 @@ namespace mythos { } // propagate PML4InvalidationBroadcast* pnext = this->next; + ASSERT(pnext); while (pnext != start && pnext->home->getCR3() != pml4) pnext = pnext->next; if (pnext != start) { MLOG_DETAIL(mlog::cap, "relay pml4 invalidation"); diff --git a/kernel/objects/memory-amd64/objects/PageMapAmd64.cc b/kernel/objects/memory-amd64/objects/PageMapAmd64.cc index a3d88dbb..ba52a250 100644 --- a/kernel/objects/memory-amd64/objects/PageMapAmd64.cc +++ b/kernel/objects/memory-amd64/objects/PageMapAmd64.cc @@ -383,7 +383,7 @@ namespace mythos { auto res = visitTables(&_pm_table(0), level(), op); *failaddr = op.failaddr; *faillevel = op.current_level; - RETHROW(res.state()); + RETURN(res.state()); } Error PageMap::invokeInstallMap(Tasklet*, Cap self, IInvocation* msg) diff --git a/kernel/objects/processor-allocator/mcconf.module b/kernel/objects/processor-allocator/mcconf.module deleted file mode 100644 index 634a47fb..00000000 --- a/kernel/objects/processor-allocator/mcconf.module +++ /dev/null @@ -1,5 +0,0 @@ -# -*- mode:toml; -*- -[module.plugin-processor-allocator] - incfiles = [ "objects/PluginProcessorAllocator.hh", "objects/ProcessorAllocator.hh", "mythos/protocol/ProcessorAllocator.hh" ] - kernelfiles = [ "objects/ProcessorAllocator.cc", "objects/PluginProcessorAllocator.cc"] - diff --git a/kernel/objects/processor-allocator/mythos/protocol/ProcessorAllocator.hh b/kernel/objects/processor-allocator/mythos/protocol/ProcessorAllocator.hh deleted file mode 100644 index ab7a2571..00000000 --- a/kernel/objects/processor-allocator/mythos/protocol/ProcessorAllocator.hh +++ /dev/null @@ -1,91 +0,0 @@ -/* -*- mode:C++; indent-tabs-mode:nil; -*- */ -/* MIT License -- MyThOS: The Many-Threads Operating System - * - * Permission is hereby granted, free of charge, to any person - * obtaining a copy of this software and associated documentation - * files (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, copy, - * modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * Copyright 2020 Philipp Gypser and contributors, BTU Cottbus-Senftenberg - */ -#pragma once - -#include "mythos/protocol/common.hh" - -namespace mythos { - namespace protocol { - - struct ProcessorAllocator { - constexpr static uint8_t proto = PROCESSORALLOCATOR; - - enum Methods : uint8_t { - ALLOC, - RETALLOC, - FREE, - RETFREE - }; - - struct Alloc : public InvocationBase { - constexpr static uint16_t label = (proto<<8) + ALLOC; - Alloc(CapPtr dstMap = null_cap) : InvocationBase(label,getLength(this)) { - addExtraCap(dstMap); - } - - // target cap map - CapPtr dstSpace() const { return this->capPtrs[0]; } - }; - - struct RetAlloc : public InvocationBase { - constexpr static uint16_t label = (proto<<8) + RETALLOC; - RetAlloc(CapPtr sc) : InvocationBase(label,getLength(this)) { - addExtraCap(sc); - } - - // allocated scheduling context - CapPtr sc() const { return this->capPtrs[0]; } - }; - - struct Free : public InvocationBase { - constexpr static uint16_t label = (proto<<8) + FREE; - Free(CapPtr sc) : InvocationBase(label,getLength(this)) { - addExtraCap(sc); - } - - // scheduling context to be freed - CapPtr sc() const { return this->capPtrs[0]; } - }; - - struct RetFree : public InvocationBase { - constexpr static uint16_t label = (proto<<8) + RETFREE; - RetFree() : InvocationBase(label,getLength(this)) { - } - }; - - template - static Error dispatchRequest(IMPL* obj, uint8_t m, ARGS const&...args) { - switch(Methods(m)) { - case ALLOC: return obj->invokeAlloc(args...); - case FREE: return obj->invokeFree(args...); - default: return Error::NOT_IMPLEMENTED; - } - } - - }; - - }// namespace protocol -}// namespace mythos diff --git a/kernel/objects/processor-allocator/objects/ProcessorAllocator.cc b/kernel/objects/processor-allocator/objects/ProcessorAllocator.cc deleted file mode 100644 index 7a7868d5..00000000 --- a/kernel/objects/processor-allocator/objects/ProcessorAllocator.cc +++ /dev/null @@ -1,177 +0,0 @@ -/* -*- mode:C++; indent-tabs-mode:nil; -*- */ -/* MIT License -- MyThOS: The Many-Threads Operating System - * - * Permission is hereby granted, free of charge, to any person - * obtaining a copy of this software and associated documentation - * files (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, copy, - * modify, merge, publish, distribute, sublicense, and/or sell copies - * of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - * - * Copyright 2020 Philipp Gypser and contributors, BTU Cottbus-Senftenberg - */ - - -#include "objects/ProcessorAllocator.hh" -#include "objects/mlog.hh" - -namespace mythos { - -/* IKernelObject */ - optional ProcessorAllocator::deleteCap(CapEntry&, Cap, IDeleter&) - { - MLOG_DETAIL(mlog::pm, __func__); - RETURN(Error::SUCCESS); - } - - void ProcessorAllocator::deleteObject(Tasklet*, IResult*) - { - MLOG_DETAIL(mlog::pm, __func__); - } - - void ProcessorAllocator::invoke(Tasklet* t, Cap self, IInvocation* msg) - { - MLOG_DETAIL(mlog::pm, __func__, DVAR(t), DVAR(msg)); - monitor.request(t, [=](Tasklet* t){ - Error err = Error::NOT_IMPLEMENTED; - switch (msg->getProtocol()) { - case protocol::ProcessorAllocator::proto: - err = protocol::ProcessorAllocator::dispatchRequest(this, msg->getMethod(), t, self, msg); - break; - } - if (err != Error::INHIBIT) { - msg->replyResponse(err); - monitor.requestDone(); - } - } ); - } - -/* IResult */ - void ProcessorAllocator::response(Tasklet* /*t*/, optional res){ - MLOG_DETAIL(mlog::pm, "revoke response:", res.state(), DVAR(toBeFreed)); - free(toBeFreed); - toBeFreed = 0; - } - -/* ProcessorAllocator */ - ProcessorAllocator::ProcessorAllocator() - : sc(image2kernel(&mySC[0])) - {} - - void ProcessorAllocator::init(){ - MLOG_DETAIL(mlog::pm, "PM::init"); - for (cpu::ThreadID id = 0; id < cpu::getNumThreads(); ++id) { - sc[id].initRoot(Cap(image2kernel(&boot::getScheduler(id)))); - free(id); - } - } - - Error ProcessorAllocator::invokeAlloc(Tasklet*, Cap, IInvocation* msg){ - MLOG_DETAIL(mlog::pm, __func__); - auto id = alloc(); - - if(id){ - MLOG_DETAIL(mlog::pm, "allocated ", DVAR(*id)); - - auto data = msg->getMessage()->read(); - - optional dstEntry; - if(data.dstSpace() == null_cap){ // direct access - dstEntry = msg->lookupEntry(init::SCHEDULERS_START+*id, 32, true); // lookup for write access - if (!dstEntry){ - MLOG_WARN(mlog::pm, "Warning: cannot find dstEntry!"); - free(*id); - return dstEntry.state(); - } - - }else{ // indirect access - TypedCap dstSpace(msg->lookupEntry(data.dstSpace())); - if (!dstSpace){ - MLOG_WARN(mlog::pm, "Warning: cannot find dstSpace!"); - free(*id); - return dstSpace.state(); - } - - //auto dstEntryRef = dstSpace.lookup(data.dstPtr, data.dstDepth, true); // lookup for write - auto dstEntryRef = dstSpace.lookup(init::SCHEDULERS_START+*id, 32, true); // lookup for write - if (!dstEntryRef){ - MLOG_WARN(mlog::pm, "Warning: cannot find dstEntryRef!"); - free(*id); - return dstEntryRef.state(); - } - - dstEntry = dstEntryRef->entry; - } - - auto res = cap::reference(sc[*id], **dstEntry, sc[*id].cap()); - if(res){ - msg->getMessage()->write(init::SCHEDULERS_START+*id); - MLOG_DETAIL(mlog::pm, "map new sc ", DVAR(*id)); - }else{ - MLOG_WARN(mlog::pm, "Warning: cannot create SC entry!"); - free(*id); - msg->getMessage()->write(null_cap); - } - - }else{ - MLOG_WARN(mlog::pm, "allocation failed: no free cores available!"); - msg->getMessage()->write(null_cap); - } - return Error::SUCCESS; - } - - // todo: implement new revokation mechanism that suits this scenario - Error ProcessorAllocator::invokeFree(Tasklet* /*t*/, Cap, IInvocation* /*msg*/){ - MLOG_ERROR(mlog::pm, __func__, " NYI!"); - //auto data = msg->getMessage()->cast(); - //ASSERT(data->sc() >= init::SCHEDULERS_START); - //cpu::ThreadID id = data->sc() - init::SCHEDULERS_START; - //ASSERT(id < cpu::getNumThreads()); - //MLOG_ERROR(mlog::pm, "free SC", DVAR(data->sc()), DVAR(id)); - //toBeFreed = id; - //revokeOp._revoke(t, this, sc[id], this); - return Error::SUCCESS; - } - - void ProcessorAllocator::freeSC(Tasklet* t, cpu::ThreadID id){ - MLOG_DETAIL(mlog::pm, "freeSC", DVAR(id)); - monitor.request(t, [=](Tasklet* t){ - MLOG_DETAIL(mlog::pm, "monitor free", DVAR(id)); - toBeFreed = id; - revokeOp._revoke(t, this, sc[id], this); - } - ); - } - -/* LiFoProcessorAllocator */ - LiFoProcessorAllocator::LiFoProcessorAllocator() - : nFree(0) - {} - - optional LiFoProcessorAllocator::alloc(){ - optional ret; - if(nFree > 0){ - nFree--; - ret = freeList[nFree]; - } - return ret; - } - - void LiFoProcessorAllocator::free(cpu::ThreadID id) { - freeList[nFree] = id; - nFree++; - } -} // namespace mythos diff --git a/kernel/objects/scheduling-context/objects/SchedulingContext.cc b/kernel/objects/scheduling-context/objects/SchedulingContext.cc index 456dcbdc..7637cf90 100644 --- a/kernel/objects/scheduling-context/objects/SchedulingContext.cc +++ b/kernel/objects/scheduling-context/objects/SchedulingContext.cc @@ -47,8 +47,14 @@ namespace mythos { readyQueue.remove(ec); current_handle.store(nullptr); if(readyQueue.empty()){ - MLOG_DETAIL(mlog::sched, "call idleSC"); - event::idleSC.emit(&paTask, home->getThreadID()); + MLOG_INFO(mlog::sched, "call idleSC"); + auto team = myTeam.load(); + if(team != nullptr){ + team->notifyIdle(&paTask, home->getThreadID()); + }else{ + MLOG_WARN(mlog::sched, "No ThreadTeam registered!"); + } + }else{ MLOG_INFO(mlog::sched, "ready queue not empty!"); } diff --git a/kernel/objects/scheduling-context/objects/SchedulingContext.hh b/kernel/objects/scheduling-context/objects/SchedulingContext.hh index 1f82f78e..2f30faae 100644 --- a/kernel/objects/scheduling-context/objects/SchedulingContext.hh +++ b/kernel/objects/scheduling-context/objects/SchedulingContext.hh @@ -35,6 +35,11 @@ namespace mythos { + class INotifyIdle{ + public: + virtual void notifyIdle(Tasklet* t, cpu::ThreadID id) = 0; + }; + /** Scheduler for multiple application threads on a single hardware * thread. It implements an cooperative FIFO strategy that switches only * when the currently selected application thread becomes blocked. @@ -73,7 +78,9 @@ namespace mythos { , public IScheduler { public: - SchedulingContext() { } + SchedulingContext() + : myTeam(nullptr) + { } void init(async::Place* home) { this->home = home; } virtual ~SchedulingContext() {} @@ -87,10 +94,20 @@ namespace mythos { void unbind(handle_t* ec_handle) override; void ready(handle_t* ec_handle) override; + //todo: use capref + public: //ThreadTeam + void registerThreadTeam(INotifyIdle* tt){ + myTeam.store(tt); + }; + void resetThreadTeam(){ + myTeam.store(nullptr); + } + public: // IKernelObject interface optional deleteCap(CapEntry&, Cap, IDeleter&) override { RETURN(Error::SUCCESS); } optional vcast(TypeId id) const override { if (id == typeId()) return static_cast(this); + if (id == typeId()) return this; THROW(Error::TYPE_MISMATCH); } @@ -100,6 +117,7 @@ namespace mythos { std::atomic current_handle = {nullptr}; //< the currently selected execution context Tasklet paTask; //task for communication with processor allocator + std::atomic myTeam; }; namespace event { diff --git a/kernel/objects/thread-team/mcconf.module b/kernel/objects/thread-team/mcconf.module new file mode 100644 index 00000000..8e5a7c7a --- /dev/null +++ b/kernel/objects/thread-team/mcconf.module @@ -0,0 +1,5 @@ +# -*- mode:toml; -*- +[module.objects-thread-team] + incfiles = [ "objects/ProcessorAllocator.hh", "objects/ThreadTeam.hh", "mythos/protocol/ThreadTeam.hh", "objects/PluginThreadTeam.hh" ] + kernelfiles = [ "objects/ProcessorAllocator.cc", "objects/ThreadTeam.cc", "objects/PluginThreadTeam.cc"] + diff --git a/kernel/objects/thread-team/mythos/protocol/ThreadTeam.hh b/kernel/objects/thread-team/mythos/protocol/ThreadTeam.hh new file mode 100644 index 00000000..3d6396c6 --- /dev/null +++ b/kernel/objects/thread-team/mythos/protocol/ThreadTeam.hh @@ -0,0 +1,170 @@ +/* -*- mode:C++; indent-tabs-mode:nil; -*- */ +/* MIT License -- MyThOS: The Many-Threads Operating System + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright 2021 Philipp Gypser, BTU Cottbus-Senftenberg + */ +#pragma once + +#include "mythos/protocol/common.hh" +#include "mythos/protocol/KernelMemory.hh" +#include "util/align.hh" + + +namespace mythos { + namespace protocol { + + struct ThreadTeam { + constexpr static uint8_t proto = THREADTEAM; + + enum Methods : uint8_t { + TRYRUNEC, + RETTRYRUNEC, + REVOKEDEMAND, + RETREVOKEDEMAND, + RUNNEXTTOEC, + RETRUNNEXTTOEC, + SETLIMIT, + }; + + //needs to be equal to pthread_alloc_type_t in pthread.h of musl + enum TeamAllocType{ + FAIL = 0, + FORCE = 1, + DEMAND = 2 + }; + + struct Create : public KernelMemory::CreateBase { + typedef InvocationBase response_type; + //constexpr static uint16_t label = (proto<<8) + CREATE; + Create(CapPtr dst, CapPtr factory, CapPtr pa) + : CreateBase(dst, factory, getLength(this), 0) + { + this->capPtrs[2] = pa; + } + CapPtr pa() const { return this->capPtrs[2]; } + }; + + struct TryRunEC : public InvocationBase { + constexpr static uint16_t label = (proto<<8) + TRYRUNEC; + TryRunEC(CapPtr ec, int allocType) + : InvocationBase(label,getLength(this)) + , allocType(allocType) + { + addExtraCap(ec); + } + + // execution context to be scheduled + CapPtr ec() const { return this->capPtrs[0]; } + int allocType; + }; + + struct RetTryRunEC : public InvocationBase { + constexpr static uint16_t label = (proto<<8) + RETTRYRUNEC; + + enum response{ + FAILED = 0, + ALLOCATED = 1, + DEMANDED = 2, + FORCED = 3 + }; + + RetTryRunEC() + : InvocationBase(label,getLength(this)) + , response(FAILED) + {} + + void setResponse(int response){ this->response = response; } + int getResponse() { return response; } + bool failed() { return response == FAILED; } + bool allocated() { return response == ALLOCATED; } + bool notFailed() { return response == ALLOCATED || response == DEMANDED + || response == FORCED; } + int response; + }; + + struct RevokeDemand : public InvocationBase { + constexpr static uint16_t label = (proto<<8) + REVOKEDEMAND; + RevokeDemand(CapPtr ec) : InvocationBase(label,getLength(this)) + { + addExtraCap(ec); + } + + // execution context to be scheduled + CapPtr ec() const { return this->capPtrs[0]; } + }; + + struct RetRevokeDemand : public InvocationBase { + constexpr static uint16_t label = (proto<<8) + RETREVOKEDEMAND; + RetRevokeDemand() + : InvocationBase(label,getLength(this)) + , revoked(false) + {} + + bool revoked; + }; + + struct RunNextToEC : public InvocationBase { + constexpr static uint16_t label = (proto<<8) + RUNNEXTTOEC; + RunNextToEC(CapPtr ec, CapPtr ec_place) : InvocationBase(label,getLength(this)) { + addExtraCap(ec); + addExtraCap(ec_place); + } + + // execution context to be scheduled + CapPtr ec() const { return this->capPtrs[0]; } + CapPtr ec_place() const { return this->capPtrs[1]; } + }; + + struct RetRunNextToEC : public InvocationBase { + constexpr static uint16_t label = (proto<<8) + RETRUNNEXTTOEC; + RetRunNextToEC() : InvocationBase(label,getLength(this)) { + } + }; + + // restrict to maximum number of threads + struct SetLimit : public InvocationBase { + constexpr static uint16_t label = (proto<<8) + SETLIMIT; + SetLimit(unsigned limit) + : InvocationBase(label,getLength(this)) + , limit(limit) + {} + + // maximum number of threads to be allocated + unsigned limit; + }; + + template + static Error dispatchRequest(IMPL* obj, uint8_t m, ARGS const&...args) { + switch(Methods(m)) { + case TRYRUNEC: return obj->invokeTryRunEC(args...); + case REVOKEDEMAND: return obj->invokeRevokeDemand(args...); + case RUNNEXTTOEC: return obj->invokeRunNextToEC(args...); + case SETLIMIT: return obj->invokeSetLimit(args...); + default: return Error::NOT_IMPLEMENTED; + } + } + + }; + + }// namespace protocol +}// namespace mythos diff --git a/kernel/objects/processor-allocator/objects/PluginProcessorAllocator.cc b/kernel/objects/thread-team/objects/PluginThreadTeam.cc similarity index 82% rename from kernel/objects/processor-allocator/objects/PluginProcessorAllocator.cc rename to kernel/objects/thread-team/objects/PluginThreadTeam.cc index 02323d58..bb39cf43 100644 --- a/kernel/objects/processor-allocator/objects/PluginProcessorAllocator.cc +++ b/kernel/objects/thread-team/objects/PluginThreadTeam.cc @@ -21,11 +21,12 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - * Copyright 2020 Philipp Gypser and contributors, BTU Cottbus-Senftenberg + * Copyright 2021 Philipp Gypser, BTU Cottbus-Senftenberg */ -#include "objects/PluginProcessorAllocator.hh" +#include "objects/PluginThreadTeam.hh" -mythos::PluginProcessorAllocator pluginProcessorAllocator; -mythos::PluginProcessorAllocatorActivator pluginProcessorAllocatorActivator; + +mythos::PluginThreadTeam pluginThreadTeam; +mythos::PluginThreadTeamActivator pluginThreadTeamActivator; diff --git a/kernel/objects/processor-allocator/objects/PluginProcessorAllocator.hh b/kernel/objects/thread-team/objects/PluginThreadTeam.hh similarity index 52% rename from kernel/objects/processor-allocator/objects/PluginProcessorAllocator.hh rename to kernel/objects/thread-team/objects/PluginThreadTeam.hh index edf39256..c71df33a 100644 --- a/kernel/objects/processor-allocator/objects/PluginProcessorAllocator.hh +++ b/kernel/objects/thread-team/objects/PluginThreadTeam.hh @@ -21,12 +21,11 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - * Copyright 2020 Philipp Gypser and contributors, BTU Cottbus-Senftenberg + * Copyright 2021 Philipp Gypser, BTU Cottbus-Senftenberg */ #pragma once -#include "objects/ProcessorAllocator.hh" -#include "objects/SchedulingContext.hh" +#include "objects/ThreadTeam.hh" #include "util/events.hh" #include "boot/load_init.hh" #include "mythos/init.hh" @@ -34,56 +33,71 @@ #include "util/assert.hh" namespace mythos { - class PluginProcessorAllocatorActivator + +namespace factory { + ThreadTeamFactory threadTeam; +} + + class PluginThreadTeamActivator : public EventHook { public: - PluginProcessorAllocatorActivator() { + PluginThreadTeamActivator() { event::initLoaderEarly.add(this); } - virtual ~PluginProcessorAllocatorActivator() {} + virtual ~PluginThreadTeamActivator() {} void processEvent(boot::InitLoader& loader) override { - MLOG_DETAIL(mlog::pm, "prevent mapping of all scheduling contexts into CSpace"); - loader.processorAllocatorPresent = true; + MLOG_INFO(mlog::pm, "prevent mapping of all scheduling contexts into CSpace"); + loader.mapSchedulingContexts = false; } }; - class PluginProcessorAllocator + class PluginThreadTeam : public EventHook - , public EventHook + , public EventHook { public: - PluginProcessorAllocator() { + PluginThreadTeam() + : tt(nullptr) + { event::initLoader.add(this); - event::idleSC.add(this); + event::initEC.add(this); } - virtual ~PluginProcessorAllocator() {} + virtual ~PluginThreadTeam() {} void processEvent(boot::InitLoader& loader) override { - MLOG_DETAIL(mlog::pm, "Init processor allocator"); + MLOG_DETAIL(mlog::pm, "init processor allocator"); pa.init(); - MLOG_DETAIL(mlog::pm, "CSset processor allocator"); + MLOG_DETAIL(mlog::pm, "map processor allocator"); OOPS(loader.csSet(init::PROCESSOR_ALLOCATOR, pa)); - auto sc = pa.alloc(); - ASSERT(sc); - MLOG_DETAIL(mlog::pm, "allocated SC for init app", DVAR(init::SCHEDULERS_START+*sc)); - loader.initSC = init::SCHEDULERS_START+*sc; - MLOG_DETAIL(mlog::pm, "map SC for init app"); - loader.csSet(init::SCHEDULERS_START+*sc, boot::getScheduler(*sc)); - + MLOG_DETAIL(mlog::pm, "map thread team factory"); + OOPS(loader.csSet(init::THREADTEAM_FACTORY, factory::threadTeam)); + MLOG_DETAIL(mlog::pm, "get processor allocator"); + auto pae = loader.capAlloc.get(init::PROCESSOR_ALLOCATOR); + ASSERT(pae); + MLOG_DETAIL(mlog::pm, "create initial thread team"); + auto obj = loader.create(loader.capAlloc.get(init::THREAD_TEAM), *pae ); + if(obj){ + MLOG_DETAIL(mlog::pm, "thread team created successfully"); + tt = *obj; + }else{ + MLOG_ERROR(mlog::pm, "ERROR: thread team creation failed!"); + } } - void processEvent(Tasklet* t, cpu::ThreadID id) override { - MLOG_DETAIL(mlog::pm, "idleSc -> freeSC", DVAR(id)); - pa.freeSC(t, id); + void processEvent(ExecutionContext* ec) override { + MLOG_DETAIL(mlog::pm, "runEc"); + ASSERT(tt != nullptr); + tt->tryRun(ec); } - LiFoProcessorAllocator pa; + ThreadTeam* tt; + ProcessorAllocator pa; }; +extern mythos::PluginThreadTeam pluginThreadTeam; +extern mythos::PluginThreadTeamActivator pluginThreadTeamActivator; } // namespace mythos -extern mythos::PluginProcessorAllocator pluginProcessorAllocator; -extern mythos::PluginProcessorAllocatorActivator pluginProcessorAllocatorActivator; diff --git a/kernel/objects/thread-team/objects/ProcessorAllocator.cc b/kernel/objects/thread-team/objects/ProcessorAllocator.cc new file mode 100644 index 00000000..445ec382 --- /dev/null +++ b/kernel/objects/thread-team/objects/ProcessorAllocator.cc @@ -0,0 +1,74 @@ +/* -*- mode:C++; indent-tabs-mode:nil; -*- */ +/* MIT License -- MyThOS: The Many-Threads Operating System + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright 2020 Philipp Gypser, BTU Cottbus-Senftenberg + */ + + +#include "objects/ProcessorAllocator.hh" +#include "objects/mlog.hh" + +namespace mythos { + +/* IKernelObject */ + optional ProcessorAllocator::deleteCap(CapEntry&, Cap, IDeleter&) + { + MLOG_DETAIL(mlog::pm, __func__); + RETURN(Error::SUCCESS); + } + + void ProcessorAllocator::deleteObject(Tasklet*, IResult*) + { + MLOG_DETAIL(mlog::pm, __func__); + } + +/* ProcessorAllocator */ + ProcessorAllocator::ProcessorAllocator() + : sc(image2kernel(&mySC[0])) + , nTeams(0) + {} + + void ProcessorAllocator::init(){ + MLOG_DETAIL(mlog::pm, "PM::init"); + for (cpu::ThreadID id = 0; id < cpu::getNumThreads(); ++id) { + sc[id].initRoot(Cap(image2kernel(&boot::getScheduler(id)))); + free(id); + } + } + + optional ProcessorAllocator::alloc(){ + MLOG_INFO(mlog::pm, __func__); + optional ret; + if(nFree > 0){ + nFree--; + ret = freeList[nFree]; + } + return ret; + } + + void ProcessorAllocator::free(cpu::ThreadID id) { + MLOG_INFO(mlog::pm, __func__, DVAR(id)); + freeList[nFree] = id; + nFree++; + } +} // namespace mythos diff --git a/kernel/objects/processor-allocator/objects/ProcessorAllocator.hh b/kernel/objects/thread-team/objects/ProcessorAllocator.hh similarity index 50% rename from kernel/objects/processor-allocator/objects/ProcessorAllocator.hh rename to kernel/objects/thread-team/objects/ProcessorAllocator.hh index 4c733818..bbead690 100644 --- a/kernel/objects/processor-allocator/objects/ProcessorAllocator.hh +++ b/kernel/objects/thread-team/objects/ProcessorAllocator.hh @@ -21,7 +21,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * - * Copyright 2020 Philipp Gypser and contributors, BTU Cottbus-Senftenberg + * Copyright 2020 Philipp Gypser, BTU Cottbus-Senftenberg */ #pragma once @@ -29,16 +29,15 @@ #include "objects/IFactory.hh" #include "objects/IKernelObject.hh" #include "cpu/hwthreadid.hh" -#include "mythos/protocol/ProcessorAllocator.hh" #include "boot/mlog.hh" -#include "objects/RevokeOperation.hh" #include "async/IResult.hh" namespace mythos { +class ThreadTeam; + class ProcessorAllocator : public IKernelObject - , public IResult { public: ProcessorAllocator(); @@ -46,42 +45,74 @@ class ProcessorAllocator /* IKernelObject */ optional deleteCap(CapEntry&, Cap self, IDeleter& del) override; void deleteObject(Tasklet* t, IResult* r) override; - void invoke(Tasklet* t, Cap self, IInvocation* msg) override; - - /* IResult */ - void response(Tasklet* /*t*/, optional res) override; + optional vcast(TypeId id) const override { + if (id == typeId()) return this; + THROW(Error::TYPE_MISMATCH); + } void init(); - Error invokeAlloc(Tasklet*, Cap, IInvocation* msg); - Error invokeFree(Tasklet* t, Cap, IInvocation* msg); - void freeSC(Tasklet* t, cpu::ThreadID id); - protected: - friend class PluginProcessorAllocator; - virtual optional alloc() = 0; - virtual void free(cpu::ThreadID id) = 0; - virtual unsigned numFree() = 0; + optional alloc(); + void free(cpu::ThreadID id); - private: - async::NestedMonitorDelegating monitor; - RevokeOperation revokeOp = {monitor}; - cpu::ThreadID toBeFreed = 0; - CapEntry *sc; - CapEntry mySC[MYTHOS_MAX_THREADS]; -}; + void alloc(Tasklet* t, IResult* r){ + monitor.request(t,[=](Tasklet*){ + ASSERT(r); + r->response(t, alloc()); + monitor.responseAndRequestDone(); + }); + } -class LiFoProcessorAllocator : public ProcessorAllocator -{ - public: - LiFoProcessorAllocator(); + void free(Tasklet* t, cpu::ThreadID id){ + monitor.request(t,[=](Tasklet*){ + free(id); + monitor.responseAndRequestDone(); + }); + } + + CapEntry* getSC(cpu::ThreadID id){ + ASSERT(id < cpu::getNumThreads()); + return &sc[id]; + } - unsigned numFree() override { return nFree; } - optional alloc() override; - void free(cpu::ThreadID id) override; + void bind(optional /*tt*/){} + void unbind(optional tt){ + MLOG_INFO(mlog::pm, "unregistered thread team", DVAR(tt)); + ASSERT(tt); + for(unsigned i = 0; i < nTeams; i++){ + auto ti = teamList[i]; + auto t = teamRefs[ti].get(); + if(t && *t == *tt){ + nTeams--; + for(; i < nTeams; i++){ + teamList[i] = teamList[i+1]; + } + } + } + } + + void registerThreadTeam(Tasklet* t, CapEntry* threadTeam){ + monitor.request(t,[=](Tasklet*){ + if(nTeams < MAX_TEAMS){ + teamRefs[nTeams].set(this, threadTeam, threadTeam->cap()); + MLOG_INFO(mlog::pm, "registered thread team on index ", nTeams); + nTeams++; + }else{ + MLOG_INFO(mlog::pm, "ERROR: too many thread teams registered!"); + } + monitor.responseAndRequestDone(); + }); + } private: + static constexpr unsigned MAX_TEAMS = MYTHOS_MAX_THREADS; + async::NestedMonitorDelegating monitor; + CapEntry *sc; + CapEntry mySC[MYTHOS_MAX_THREADS]; unsigned nFree; cpu::ThreadID freeList[MYTHOS_MAX_THREADS]; + CapRef teamRefs[MAX_TEAMS]; + unsigned teamList[MAX_TEAMS]; + unsigned nTeams; }; - } // namespace mythos diff --git a/kernel/objects/thread-team/objects/ThreadTeam.cc b/kernel/objects/thread-team/objects/ThreadTeam.cc new file mode 100644 index 00000000..927e884b --- /dev/null +++ b/kernel/objects/thread-team/objects/ThreadTeam.cc @@ -0,0 +1,516 @@ +/* -*- mode:C++; indent-tabs-mode:nil; -*- */ +/* MIT License -- MyThOS: The Many-Threads Operating System + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright 2021 Philipp Gypser, BTU Cottbus-Senftenberg + */ + + +#include "objects/ThreadTeam.hh" +#include "objects/mlog.hh" + +namespace mythos { + +/* IKernelObject */ + optional ThreadTeam::deleteCap(CapEntry&, Cap self, IDeleter& del) + { + MLOG_DETAIL(mlog::pm, __func__); + ASSERT(state == IDLE); + if (self.isOriginal()) { // the object gets deleted, not a capability reference + ASSERT(pa); + + // free used SCs + auto used = popUsed(); + while(used){ + auto sce = pa->getSC(*used); + TypedCap sc(sce->cap()); + sc->resetThreadTeam(); + //todo: synchronize!!!! + pa->free(*used); + numAllocated--; + used = popUsed(); + } + + // free unused SCs + auto free = popFree(); + while(free){ + auto sce = pa->getSC(*free); + TypedCap sc(sce->cap()); + sc->resetThreadTeam(); + //todo:: synchronize!!! + pa->free(*free); + numAllocated--; + free = popFree(); + } + del.deleteObject(del_handle); + } + RETURN(Error::SUCCESS); + } + + void ThreadTeam::deleteObject(Tasklet* t, IResult* r) + { + MLOG_DETAIL(mlog::pm, __func__); + ASSERT(state == IDLE); + monitor.doDelete(t, [=](Tasklet* t) { this->memory->free(t, r, this, sizeof(ThreadTeam)); }); + } + + void ThreadTeam::invoke(Tasklet* t, Cap self, IInvocation* msg) + { + MLOG_DETAIL(mlog::pm, __func__, DVAR(t), DVAR(msg)); + monitor.request(t, [=](Tasklet* t){ + ASSERT(state == IDLE); + state = INVOCATION; + tmp_msg = msg; + Error err = Error::NOT_IMPLEMENTED; + switch (msg->getProtocol()) { + case protocol::ThreadTeam::proto: + err = protocol::ThreadTeam::dispatchRequest(this, msg->getMethod(), t, self, msg); + break; + } + if (err != Error::INHIBIT) { + ASSERT(state == INVOCATION); + state = IDLE; + tmp_msg = nullptr; + msg->replyResponse(err); + monitor.requestDone(); + } + } ); + } + +/* IResult */ + void ThreadTeam::response(Tasklet* t, optional id){ + MLOG_DETAIL(mlog::pm, __PRETTY_FUNCTION__); + ASSERT(pa); + ASSERT(tmp_msg); + ASSERT(tmp_msg->getMethod() == protocol::ThreadTeam::TRYRUNEC); + + auto data = tmp_msg->getMessage()->read(); + auto ece = tmp_msg->lookupEntry(data.ec()); + auto ret = tmp_msg->getMessage()->cast(); + + ret->setResponse(protocol::ThreadTeam::RetTryRunEC::FAILED); + + ASSERT(ece); + + if(id){ + MLOG_DETAIL(mlog::pm, DVAR(*id)); + numAllocated++; + auto sce = pa->getSC(*id); + TypedCap sc(sce->cap()); + sc->registerThreadTeam(this); + + ret->setResponse(protocol::ThreadTeam::RetTryRunEC::DEMANDED); + TypedCap ec(ece); + ASSERT(ec); + tryRunAt(t, *ec, *id); + return; + } + + MLOG_DETAIL(mlog::pm, "SC allocation failed"); + + if(data.allocType == protocol::ThreadTeam::DEMAND){ + if(enqueueDemand(*ece)){ + MLOG_DETAIL(mlog::pm, "enqueued to demand list", DVARhex(*ece)); + ret->setResponse(protocol::ThreadTeam::RetTryRunEC::ALLOCATED); + } + }else if(data.allocType == protocol::ThreadTeam::FORCE){ + if(nUsed > 0){ + MLOG_DETAIL(mlog::pm, "force run"); + //todo: use a more sophisticated mapping scheme + ret->setResponse(protocol::ThreadTeam::RetTryRunEC::FORCED); + TypedCap ec(ece); + ASSERT(ec); + tryRunAt(t, *ec, usedList[0]); + return; + } + } + + state = IDLE; + tmp_msg->replyResponse(Error::SUCCESS); + monitor.responseAndRequestDone(); + + } + + void ThreadTeam::response(Tasklet* /*t*/, optional bound){ + MLOG_DETAIL(mlog::pm, __PRETTY_FUNCTION__); + ASSERT(tmp_id != INV_ID); + + if(state == INVOCATION){ + MLOG_DETAIL(mlog::pm, "state = INVOCATION"); + ASSERT(tmp_msg); + ASSERT(tmp_msg->getMethod() == protocol::ThreadTeam::TRYRUNEC); + + auto ret = tmp_msg->getMessage()->cast(); + + if(bound){ + MLOG_INFO(mlog::pm, "EC successfully bound to SC"); + pushUsed(tmp_id); + //allready set! ret->setResponse(protocol::ThreadTeam::RetTryRunEC::ALLOCATED); + tmp_msg->replyResponse(Error::SUCCESS); + }else{ + MLOG_ERROR(mlog::pm, "ERROR: failed to bind EC to SC!"); + //todo: reuse sc? + ret->setResponse(protocol::ThreadTeam::RetTryRunEC::FAILED); + tmp_msg->replyResponse(Error::GENERIC_ERROR); + } + }else if(state == SC_NOTIFY){ + MLOG_DETAIL(mlog::pm, "state = SC_NOTIFY"); + if(bound){ + MLOG_INFO(mlog::pm, "EC successfully bound to SC"); + // remove ec from demand queue + removeDemand(tmp_ec, true); + }else{ + removeUsed(tmp_id); + pushFree(tmp_id); + } + }else{ + MLOG_ERROR(mlog::pm, "ERROR: Invalid operational state"); + } + + state = IDLE; + tmp_id = INV_ID; + monitor.responseAndRequestDone(); + } + +/* ThreadTeam */ + ThreadTeam::ThreadTeam(IAsyncFree* memory) + : memory(memory) + , tmp_msg(nullptr) + , tmp_id(INV_ID) + , tmp_ec(nullptr) + , rm_ec(nullptr) + , state(IDLE) + , pa(nullptr) + , nFree(0) + , nUsed(0) + , nDemand(0) + , limit(0) + , numAllocated(0) + { + for(unsigned d = 0; d < MYTHOS_MAX_THREADS; d++){ + demandList[d] = d; + } + } + + bool ThreadTeam::tryRun(ExecutionContext* ec){ + ASSERT(state == IDLE); + ASSERT(pa); + ASSERT(ec); + + auto id = popFree(); + if(!id && !limitReached()){ + id = pa->alloc(); + if(id){ numAllocated++; } + } + + if(id){ + if(ec->setSchedulingContext(pa->getSC(*id))){ + pushUsed(*id); + return true; + }else{ + pushFree(*id); + } + } + return false; + } + + void ThreadTeam::bind(optional paPtr) { + MLOG_DETAIL(mlog::pm, "bind processor allocator"); + //todo:: synchronize + if(paPtr){ + pa = *paPtr; + }else{ + MLOG_ERROR(mlog::pm, "ERROR: binding processor allocator failed"); + pa = nullptr; + } + } + + void ThreadTeam::unbind(optional ) { + MLOG_ERROR(mlog::pm, "ERROR: unbind processor allocator"); + //todo:: synchronize + pa = nullptr; + } + + void ThreadTeam::tryRunAt(Tasklet* t, ExecutionContext* ec, cpu::ThreadID id){ + MLOG_DETAIL(mlog::pm, __func__, DVARhex(ec), DVAR(id)); + ASSERT(pa); + ASSERT(tmp_id == INV_ID); + tmp_id = id; + auto sce = pa->getSC(id); + ec->setSchedulingContext(t, this, sce); + } + + Error ThreadTeam::invokeTryRunEC(Tasklet* t, Cap, IInvocation* msg){ + MLOG_INFO(mlog::pm, __func__); + ASSERT(state == INVOCATION); + + auto data = msg->getMessage()->read(); + auto ece = msg->lookupEntry(data.ec()); + auto ret = msg->getMessage()->cast(); + + if(!ece){ + MLOG_ERROR(mlog::pm, "Error: Did not find EC!"); + ret->setResponse(protocol::ThreadTeam::RetTryRunEC::FAILED); + return Error::INVALID_CAPABILITY; + } + + auto id = popFree(); + + if(id){ + MLOG_DETAIL(mlog::pm, "take SC from Team ", DVAR(*id)); + ret->setResponse(protocol::ThreadTeam::RetTryRunEC::DEMANDED); + TypedCap ec(ece); + ASSERT(ec); + tryRunAt(t, *ec, *id); + }else if(limitReached()){ + MLOG_DETAIL(mlog::pm, "limit reached!"); + response(t, optional()); + }else{ + MLOG_DETAIL(mlog::pm, "try alloc SC from PA"); + ASSERT(pa); + pa->alloc(t, this); + } + + return Error::INHIBIT; + } + + Error ThreadTeam::invokeRevokeDemand(Tasklet* /*t*/, Cap, IInvocation* msg){ + MLOG_INFO(mlog::pm, __func__); + ASSERT(state == INVOCATION); + + auto data = msg->getMessage()->read(); + auto ece = msg->lookupEntry(data.ec()); + auto ret = msg->getMessage()->cast(); + + ret->revoked = false; + + if(!ece){ + MLOG_ERROR(mlog::pm, "Error: Did not find EC!"); + return Error::INVALID_CAPABILITY; + } + + TypedCap ec(ece); + if(ec && removeDemand(*ec, true)){ + ret->revoked = true; + }else{ + MLOG_INFO(mlog::pm, "revoke demand failed"); + } + return Error::SUCCESS; + } + + Error ThreadTeam::invokeSetLimit(Tasklet* /*t*/, Cap, IInvocation* msg){ + MLOG_INFO(mlog::pm, __func__); + ASSERT(state == INVOCATION); + + auto data = msg->getMessage()->read(); + auto oldLimit = limit; + limit = data.limit; + MLOG_DETAIL(mlog::pm, DVAR(oldLimit), DVAR(limit)); + + return Error::SUCCESS; + } + + Error ThreadTeam::invokeRunNextToEC(Tasklet* /*t*/, Cap, IInvocation* /*msg*/){ + MLOG_ERROR(mlog::pm, __func__, " NYI!"); + return Error::NOT_IMPLEMENTED; + } + + void ThreadTeam::pushFree(cpu::ThreadID id){ + MLOG_DETAIL(mlog::pm, __func__, DVAR(id)); + freeList[nFree] = id; + nFree++; + } + + optional ThreadTeam::popFree(){ + MLOG_DETAIL(mlog::pm, __func__); + optional ret; + if(nFree > 0){ + nFree--; + ret = freeList[nFree]; + } + return ret; + } + + void ThreadTeam::pushUsed(cpu::ThreadID id){ + MLOG_DETAIL(mlog::pm, __func__, DVAR(id)); + usedList[nUsed] = id; + nUsed++; + } + + optional ThreadTeam::popUsed(){ + MLOG_DETAIL(mlog::pm, __func__); + optional ret; + if(nUsed > 0){ + nUsed--; + ret = usedList[nUsed]; + } + return ret; + } + + void ThreadTeam::removeUsed(cpu::ThreadID id){ + MLOG_DETAIL(mlog::pm, __func__, DVAR(id)); + for(unsigned i = 0; i < nUsed; i++){ + if(usedList[i] == id){ + nUsed--; + for(; i < nUsed; i++){ + usedList[i] = usedList[i+1]; + } + return; + } + } + MLOG_ERROR(mlog::pm, "ERROR: did not find used ThreadID ", id); + } + + bool ThreadTeam::enqueueDemand(CapEntry* ec){ + MLOG_DETAIL(mlog::pm, __func__, DVARhex(ec)); + ASSERT(state == INVOCATION); + if(nDemand < MYTHOS_MAX_THREADS){ + demandEC[demandList[nDemand]].set(this, ec, ec->cap()); + nDemand++; + //dumpDemand(); + return true; + } + return false; + } + + bool ThreadTeam::removeDemand(ExecutionContext* ec, bool resetRef){ + MLOG_DETAIL(mlog::pm, __func__, DVARhex(ec), DVAR(resetRef)); + //find entry + for(unsigned i = 0; i < nDemand; i++){ + auto di = demandList[i]; + auto d = demandEC[di].get(); + if(d && *d == ec){ + nDemand--; + //move following entries + for(; i < nDemand; i++){ + demandList[i] = demandList[i+1]; + } + //move demand index behind used indexes + demandList[nDemand] = di; + //reset entry + if(resetRef){ + rm_ec = *d; + demandEC[di].reset(); + } + //dumpDemand(); + return true; + } + } + MLOG_INFO(mlog::pm, "did not find EC in demand list "); + //dumpDemand(); + return false; + } + + void ThreadTeam::notifyIdle(Tasklet* t, cpu::ThreadID id) { + MLOG_INFO(mlog::pm, __func__, DVAR(id)); + monitor.request(t, [=](Tasklet*){ + ASSERT(state == IDLE); + ASSERT(tmp_id == INV_ID); + state = SC_NOTIFY; + + if(numAllocated > limit || !tryRunDemandAt(t, id)) { + removeUsed(id); + //pushFree(id); + ASSERT(pa); + state = IDLE; + pa->free(t, id); + numAllocated--; + monitor.responseAndRequestDone(); + } + }); + } + + bool ThreadTeam::tryRunDemandAt(Tasklet* t, cpu::ThreadID id) { + MLOG_DETAIL(mlog::pm, __func__); + ASSERT(state == SC_NOTIFY); + // demand available? + if(nDemand){ + //take first + auto di = demandList[0]; + TypedCap ec(demandEC[di]); + ASSERT(ec); + MLOG_DETAIL(mlog::pm, DVARhex(*ec), DVAR(id)); + tmp_ec = *ec; + //try to run ec + tryRunAt(t, *ec, id); + return true; + } + //dumpDemand(); + return false; + } + + void ThreadTeam::bind(optional /*ec*/){} + + void ThreadTeam::unbind(optional ec){ + MLOG_DETAIL(mlog::pm, __func__, DVARhex(ec)); + if(ec){ + if(*ec == rm_ec){ + MLOG_DETAIL(mlog::pm, "synchronous unbind"); + rm_ec = nullptr; + }else{ + MLOG_DETAIL(mlog::pm, "asynchronous unbind"); + //todo: tasklet valid after unbind? + auto ecPtr = *ec; + monitor.request(&ec->threadTeamTasklet, [=](Tasklet*){ + ASSERT(state == IDLE); + removeDemand(ecPtr, false); + monitor.responseAndRequestDone(); + }); + } + } + } + + void ThreadTeam::dumpDemand(){ + MLOG_DETAIL(mlog::pm, "demand list:"); + for(unsigned i = 0; i < MYTHOS_MAX_THREADS; i++){ + auto di = demandList[i]; + if(i < nDemand){ + auto d = demandEC[di].get(); + ASSERT(d); + MLOG_DETAIL(mlog::pm, DVAR(i), DVAR(di), DVARhex(*d)); + }else{ + if(i != di) MLOG_DETAIL(mlog::pm, DVAR(i), DVAR(di)," slot free"); + } + } + } + + optional + ThreadTeamFactory::factory(CapEntry* dstEntry, CapEntry* memEntry, Cap memCap, IAllocator* mem, CapEntry* pae){ + auto obj = mem->create(); + if (!obj) { + dstEntry->reset(); + RETHROW(obj); + } + TypedCap pa(pae); + if (!pa) RETHROW(pa); + obj->paRef.set(*obj, pae, pa.cap()); + Cap cap(*obj); + auto res = cap::inherit(*memEntry, memCap, *dstEntry, cap); + if (!res) { + mem->free(*obj); // mem->release(obj) goes throug IKernelObject deletion mechanism + RETHROW(res); + } + pa->registerThreadTeam(&obj->paTasklet, dstEntry); + return *obj; + } + +} // namespace mythos diff --git a/kernel/objects/thread-team/objects/ThreadTeam.hh b/kernel/objects/thread-team/objects/ThreadTeam.hh new file mode 100644 index 00000000..cc2a5b49 --- /dev/null +++ b/kernel/objects/thread-team/objects/ThreadTeam.hh @@ -0,0 +1,155 @@ +/* -*- mode:C++; indent-tabs-mode:nil; -*- */ +/* MIT License -- MyThOS: The Many-Threads Operating System + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright 2021 Philipp Gypser, BTU Cottbus-Senftenberg + */ +#pragma once + +#include "async/NestedMonitorDelegating.hh" +#include "async/IResult.hh" +#include "objects/IFactory.hh" +#include "objects/IKernelObject.hh" +#include "cpu/hwthreadid.hh" +#include "mythos/protocol/ThreadTeam.hh" +#include "boot/mlog.hh" +#include "objects/RevokeOperation.hh" +#include "objects/ProcessorAllocator.hh" +#include "objects/ExecutionContext.hh" +#include "objects/SchedulingContext.hh" + +namespace mythos { + + class ThreadTeam + : public IKernelObject + , public INotifyIdle + , public IResult + , public IResult + { + public: + ThreadTeam(IAsyncFree* memory); + + /* IKernelObject */ + optional deleteCap(CapEntry&, Cap self, IDeleter& del) override; + void deleteObject(Tasklet* t, IResult* r) override; + void invoke(Tasklet* t, Cap self, IInvocation* msg) override; + optional vcast(TypeId id) const override { + if (id == typeId()) return this; + THROW(Error::TYPE_MISMATCH); + } + + /* IResult */ + //called from ProcessorAllocator::alloc + void response(Tasklet* t, optional id); + //called from ExecutionContext::setSchedulingContext + void response(Tasklet* t, optional bound); + + // only for init EC + bool tryRun(ExecutionContext* ec); + + // invocations + Error invokeTryRunEC(Tasklet* t, Cap, IInvocation* msg); + Error invokeRevokeDemand(Tasklet* t, Cap, IInvocation* msg); + Error invokeRunNextToEC(Tasklet* t, Cap, IInvocation* msg); + Error invokeSetLimit(Tasklet* t, Cap, IInvocation* msg); + + void bind(optional paPtr); + void unbind(optional ); + + void bind(optional /*ec*/); + void unbind(optional ec); + + void notifyIdle(Tasklet* t, cpu::ThreadID id) override; + + private: + void tryRunAt(Tasklet* t, ExecutionContext* ec, cpu::ThreadID id); + + void pushFree(cpu::ThreadID id); + optional popFree(); + + void pushUsed(cpu::ThreadID id); + void removeUsed(cpu::ThreadID id); + optional popUsed(); + + bool enqueueDemand(CapEntry* ec); + bool removeDemand(ExecutionContext* ec, bool resetRef); + bool tryRunDemandAt(Tasklet* t, cpu::ThreadID id); + void dumpDemand(); + bool limitReached() { return limit != 0 && numAllocated >= limit; } + + private: + LinkedList::Queueable del_handle = {this}; + IAsyncFree* memory; + async::NestedMonitorDelegating monitor; + + /* state for async operation handling */ + IInvocation* tmp_msg; + static constexpr cpu::ThreadID INV_ID = cpu::ThreadID(-1); + cpu::ThreadID tmp_id; + ExecutionContext* tmp_ec; + ExecutionContext* rm_ec; + enum OperationalState{ + IDLE, + INVOCATION, + SC_NOTIFY + }; + OperationalState state; + + friend class ThreadTeamFactory; + CapRef paRef; + ProcessorAllocator* pa; + Tasklet paTasklet; + cpu::ThreadID freeList[MYTHOS_MAX_THREADS]; + unsigned nFree; + cpu::ThreadID usedList[MYTHOS_MAX_THREADS]; + unsigned nUsed; + CapRef demandEC[MYTHOS_MAX_THREADS]; + // index < nDemand = demandEC slot in use + // index >= nDemand = demandEC slot is free + unsigned demandList[MYTHOS_MAX_THREADS]; // indexes to demandEC + unsigned nDemand; + unsigned limit; + unsigned numAllocated; + }; + + class ThreadTeamFactory : public FactoryBase + { + public: + typedef protocol::ThreadTeam::Create message_type; + + static optional + factory(CapEntry* dstEntry, CapEntry* memEntry, Cap memCap, IAllocator* mem, CapEntry* pae); + + Error factory(CapEntry* dstEntry, CapEntry* memEntry, Cap memCap, + IAllocator* mem, IInvocation* msg) const override { + MLOG_DETAIL(mlog::pm, __PRETTY_FUNCTION__); + auto data = msg->getMessage()->read(); + auto paEntry = msg->lookupEntry(data.pa()); + if (!paEntry){ + MLOG_ERROR(mlog::pm, "ERROR: cannot find processor allocator entry reference!"); + return Error::INVALID_CAPABILITY; + } + return factory(dstEntry, memEntry, memCap, mem, *paEntry).state(); + } + }; + +} // namespace mythos diff --git a/kernel/runtime/cxx/mcconf.module b/kernel/runtime/cxx/mcconf.module index 5856f73b..1a070528 100644 --- a/kernel/runtime/cxx/mcconf.module +++ b/kernel/runtime/cxx/mcconf.module @@ -9,5 +9,5 @@ makefile_head = ''' APP_CPPFLAGS += -nostdinc -nostdinc++ -isystem ${vars.cxx_path}/include/c++/v1 -isystem ${vars.cxx_path}/include APP_LDFLAGS += -Wl,--eh-frame-hdr --sysroot=${vars.cxx_path}/../ -APP_LIBS += ${vars.cxx_path}/lib/libomp.a ${vars.cxx_path}/lib/libc++.a ${vars.cxx_path}/lib/libc++abi.a ${vars.cxx_path}/lib/libunwind.a ${vars.cxx_path}/lib/libc.a -lgcc +APP_LIBS += ${vars.cxx_path}/lib/libomp.a ${vars.tbb_build_path}/libtbb.a ${vars.cxx_path}/lib/libc++.a ${vars.cxx_path}/lib/libc++abi.a ${vars.cxx_path}/lib/libunwind.a ${vars.cxx_path}/lib/libc.a -lgcc ''' diff --git a/kernel/runtime/cxx/runtime/cxxsupport.cc b/kernel/runtime/cxx/runtime/cxxsupport.cc index 8316e451..fce6d6c0 100644 --- a/kernel/runtime/cxx/runtime/cxxsupport.cc +++ b/kernel/runtime/cxx/runtime/cxxsupport.cc @@ -48,27 +48,93 @@ #include "runtime/Example.hh" #include "runtime/PageMap.hh" #include "runtime/KernelMemory.hh" -#include "runtime/ProcessorAllocator.hh" #include "runtime/CapAlloc.hh" #include "runtime/tls.hh" #include "runtime/futex.hh" #include "runtime/umem.hh" #include "runtime/thread-extra.hh" +#include "runtime/ThreadTeam.hh" +#include "runtime/Mutex.hh" +#include "runtime/process.hh" +#include "util/optional.hh" +#include "util/events.hh" #include "mythos/InfoFrame.hh" extern mythos::InfoFrame* info_ptr asm("info_ptr"); extern mythos::Portal portal; +extern mythos::Frame infoFrame; extern mythos::CapMap myCS; extern mythos::PageMap myAS; extern mythos::KernelMemory kmem; -extern mythos::ProcessorAllocator pa; +extern mythos::ThreadTeam team; + +static thread_local mythos::CapPtr localPortalPtr; +thread_local mythos::Portal localPortal( + mythos_get_pthread_ec_self() == mythos::init::EC ? mythos::init::PORTAL : localPortalPtr, + mythos_get_pthread_ec_self() == mythos::init::EC ? info_ptr->getInvocationBuf() + : info_ptr->getInvocationBuf(localPortalPtr)); + +void setRemotePortalPtr(uintptr_t targetTLS, mythos::CapPtr p){ + auto ptr = reinterpret_cast(targetTLS + - (mythos::getTLS() - reinterpret_cast(&localPortalPtr))); + *ptr = p; +} + +mythos::CapPtr getRemotePortalPtr(pthread_t t){ + auto rp = reinterpret_cast(t - (pthread_self() - reinterpret_cast(&localPortalPtr))); + return *rp; +} + +template +class ThreadPool{ + public: + ThreadPool() + : top(0) + {} + + struct ThreadPoolEntry{ + mythos::CapPtr ec; + mythos::CapPtr portal; + }; + + void push(mythos::CapPtr ec, mythos::CapPtr portal){ + mythos::Mutex::Lock guard(mutex); + if(top < SIZE){ + pool[top] = {ec, portal}; + top++; + MLOG_DETAIL(mlog::app, "pushed to thread pool", DVAR(ec), DVAR(portal)); + }else{ + MLOG_WARN(mlog::app, "Thread pool full!"); + } + } + + mythos::optional pop(){ + mythos::Mutex::Lock guard(mutex); + if(top){ + top--; + auto ec = pool[top].ec; + auto portal = pool[top].portal; + MLOG_DETAIL(mlog::app, "pop from thread pool", DVAR(ec), DVAR(portal)); + return pool[top]; + } + MLOG_DETAIL(mlog::app, "thread pool empty"); + return mythos::optional(); + } + + private: + mythos::Mutex mutex; + ThreadPoolEntry pool[SIZE]; + unsigned top; +}; + +ThreadPool<1024> threadPool; // synchronization for pthread deletion (exit/join) struct PthreadCleaner{ PthreadCleaner() : flag(FREE) { - //MLOG_ERROR(mlog::app, "PthreadCleaner"); + MLOG_DETAIL(mlog::app, "PthreadCleaner"); } enum state{ @@ -118,7 +184,17 @@ extern "C" [[noreturn]] void __assert_fail (const char *expr, const char *file, mythos::syscall_exit(-1); /// @TODO syscall_abort(); to see some stack backtrace etc } +mythos::Event<> groupExit; + void mythosExit(){ + PANIC(mythos_get_pthread_ec_self() == init::EC); + + mythos::PortalLock pl(localPortal); + MLOG_DETAIL(mlog::app, "Free all dynamically allocated Caps"); + capAlloc.freeAll(pl); + + MLOG_DETAIL(mlog::app, "notify parent process"); + groupExit.emit(); MLOG_ERROR(mlog::app, "MYTHOS:PLEASE KILL ME!!!!!!1 elf"); } @@ -147,16 +223,16 @@ int prlimit( return 0; } -int sched_setaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask) +int my_sched_setaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask) { //MLOG_DETAIL(mlog::app, "syscall sched_setaffinity", DVAR(pid), DVAR(cpusetsize), DVARhex(mask)); if(cpusetsize == info_ptr->getNumThreads() && mask == NULL) return -EFAULT; return 0; } -int sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask) +int my_sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask) { - //MLOG_DETAIL(mlog::app, "syscall sched_getaffinity", DVAR(pid), DVAR(cpusetsize), DVARhex(mask)); + MLOG_DETAIL(mlog::app, "syscall sched_getaffinity", DVAR(pid), DVAR(cpusetsize), DVARhex(mask)); if (mask) { //CPU_ZERO(mask); memset(mask, 0, cpusetsize); @@ -210,14 +286,18 @@ extern "C" long mythos_musl_syscall( case 24: // sched_yield //MLOG_ERROR(mlog::app, "syscall sched_yield NYI"); return 0; + case 25: // mremap + //MLOG_ERROR(mlog::app, "syscall sched_yield NYI"); + return 0; case 28: //madvise MLOG_WARN(mlog::app, "syscall madvise NYI"); return 0; case 39: // getpid - MLOG_WARN(mlog::app, "syscall getpid NYI"); - return 0; + MLOG_DETAIL(mlog::app, "syscall getpid NYI"); + //todo: use proper process identification + return 4711; case 60: // exit(exit_code) - //MLOG_ERROR(mlog::app, "syscall exit", DVAR(a1)); + MLOG_DETAIL(mlog::app, "syscall exit", DVAR(a1)); pthreadCleaner.exit(); asm volatile ("syscall" : : "D"(0), "S"(a1) : "memory"); return 0; @@ -238,9 +318,9 @@ extern "C" long mythos_musl_syscall( reinterpret_cast(a5) /*uaddr2*/, a4/*val2*/, a6/*val3*/); } case 203: // sched_setaffinity - return sched_setaffinity(a1, a2, reinterpret_cast(a3)); + return my_sched_setaffinity(a1, a2, reinterpret_cast(a3)); case 204: // sched_getaffinity - return sched_getaffinity(a1, a2, reinterpret_cast(a3)); + return my_sched_getaffinity(a1, a2, reinterpret_cast(a3)); case 228: // clock_gettime //MLOG_ERROR(mlog::app, "Error: mythos_musl_syscall clock_gettime", DVAR(num), //DVARhex(a1), DVARhex(a2), DVARhex(a3), @@ -248,8 +328,8 @@ extern "C" long mythos_musl_syscall( clock_gettime(a1, reinterpret_cast(a2)); return 0; case 231: // exit_group for all pthreads - MLOG_WARN(mlog::app, "syscall exit_group NYI"); - mythosExit(); + MLOG_WARN(mlog::app, "syscall exit_group "); + mythosExit(); return 0; case 302: // prlimit64 //MLOG_WARN(mlog::app, "syscall prlimit64 NYI", DVAR(a1), DVAR(a2), DVAR(a3), DVAR(a4), DVAR(a5), DVAR(a6)); @@ -282,14 +362,19 @@ extern "C" void * mmap(void *start, size_t len, int prot, int flags, int fd, off extern "C" int munmap(void *start, size_t len) { // dummy implementation - //MLOG_DETAIL(mlog::app, "munmap", DVAR(start), DVAR(len)); + MLOG_DETAIL(mlog::app, "munmap", DVAR(start), DVAR(len)); mythos::heap.free(reinterpret_cast(start)); return 0; } extern "C" int unmapself(void *start, size_t len) { - PANIC_MSG(false, "unmapself: NYI!"); + // see pthread_exit: another pthread might reuse the memory before unmapped thread exited + MLOG_DETAIL(mlog::app, "unmapself", DVARhex(start), DVAR(len)); + //todo: race condition? + mythos::heap.free(reinterpret_cast(start)); + threadPool.push(mythos_get_pthread_ec_self(), localPortalPtr); + asm volatile ("syscall" : : "D"(0), "S"(0) : "memory"); return 0; } @@ -305,42 +390,71 @@ extern "C" int mprotect(void *addr, size_t len, int prot) int myclone( int (*func)(void *), void *stack, int flags, - void *arg, int* ptid, void* tls, int* ctid) + void *arg, int* ptid, void* tls, int* ctid, + int allocType) { - //MLOG_DETAIL(mlog::app, "myclone"); + MLOG_DETAIL(mlog::app, "myclone", DVAR(allocType)); ASSERT(tls != nullptr); - // The compiler expect a kinda strange alignment coming from clone: // -> rsp % 16 must be 8 // You can see this also in musl/src/thread/x86_64/clone.s (rsi is stack) // We will use the same trick for alignment as musl libc auto rsp = (uintptr_t(stack) & uintptr_t(-16))-8; - mythos::PortalLock pl(portal); // future access will fail if the portal is in use already - mythos::ExecutionContext ec(capAlloc()); - if (ptid && (flags&CLONE_PARENT_SETTID)) *ptid = int(ec.cap()); + mythos::CapPtr ecPtr; + mythos::CapPtr portalPtr; + + auto tpe = threadPool.pop(); + if(tpe){ + ecPtr = tpe->ec; + portalPtr = tpe->portal; + }else{ + ecPtr = capAlloc(); + portalPtr = capAlloc(); + } + + mythos::PortalLock pl(localPortal); + mythos::ExecutionContext ec(ecPtr); + + if (ptid && (flags&CLONE_PARENT_SETTID)) *ptid = int(ecPtr); // @todo store thread-specific ctid pointer, which should set to 0 by the OS on the thread's exit - auto sc = pa.alloc(pl).wait(); - ASSERT(sc); - if(sc->cap == mythos::null_cap){ - MLOG_WARN(mlog::app, "Processor allocation failed!"); - //todo: set errno = EAGAIN - return (-1); + if(tpe){ + // Reuse EC -> configure regs + mythos::ExecutionContext::register_t regs; + regs.rsp = uintptr_t(rsp), // stack + regs.rip = uintptr_t(func); // start function; + regs.rdi = uintptr_t(arg); // user context + regs.fs_base = uintptr_t(tls); // thread local storage + auto res = ec.writeRegisters(pl, regs, true).wait(); + }else{ + //create new EC + auto res = ec.create(kmem) + .as(myAS) + .cs(myCS) + .rawStack(rsp) + .rawFun(func, arg) + .suspended(false) + .fs(tls) + .invokeVia(pl) + .wait(); + + // create Portal + ASSERT(ec.cap() < mythos::MAX_IB); + mythos::Portal newPortal(portalPtr, info_ptr->getInvocationBuf(portalPtr)); + newPortal.create(pl, kmem).wait(); + newPortal.bind(pl, infoFrame, info_ptr->getIbOffset(portalPtr), ec.cap()); } - auto res1 = ec.create(kmem) - .as(myAS) - .cs(myCS) - .sched(sc->cap) - .rawStack(rsp) - .rawFun(func, arg) - .suspended(false) - .fs(tls) - .invokeVia(pl) - .wait(); - //MLOG_DETAIL(mlog::app, DVAR(ec.cap())); - return ec.cap(); + setRemotePortalPtr(reinterpret_cast(tls), portalPtr); + + auto tres = team.tryRunEC(pl, ec, allocType).wait(); + if(tres && tres->notFailed()){ + return ecPtr; + } + MLOG_WARN(mlog::app, "Processor allocation failed!"); + //todo: set errno = EAGAIN + return (-1); } extern "C" int clone(int (*func)(void *), void *stack, int flags, void *arg, ...) @@ -351,8 +465,9 @@ extern "C" int clone(int (*func)(void *), void *stack, int flags, void *arg, ... int* ptid = va_arg(args, int*); void* tls = va_arg(args, void*); int* ctid = va_arg(args, int*); + int allocType = va_arg(args, int); va_end(args); - return myclone(func, stack, flags, arg, ptid, tls, ctid); + return myclone(func, stack, flags, arg, ptid, tls, ctid, allocType); } // synchronize and cleanup exited pthread @@ -361,12 +476,27 @@ extern "C" void mythos_pthread_cleanup(pthread_t t){ // wait for target pthread to exit pthreadCleaner.wait(t); // delete EC of target pthread - auto cap = mythos_get_pthread_ec(t); - mythos::PortalLock pl(portal); - capAlloc.free(cap, pl); + auto ec = mythos_get_pthread_ec(t); + auto portal = getRemotePortalPtr(t); + threadPool.push(ec, portal); // memory of target pthread will be free when returning from this function } +extern "C" int mythos_revoke_demand_hook(pthread_t t){ + MLOG_DETAIL(mlog::app, "revoke demand", mythos_get_pthread_ec(t)); + mythos::PortalLock pl(localPortal); + auto ecPtr = mythos_get_pthread_ec(t); + mythos::ExecutionContext ec(ecPtr); + auto res = team.revokeDemand(pl, ec).wait(); + if(res && res->revoked){ + auto portalPtr = getRemotePortalPtr(t); + threadPool.push(ecPtr, portalPtr); + return 0; + } + MLOG_DETAIL(mlog::app, "revoke failed"); + return (-1); +} + struct dl_phdr_info { void* dlpi_addr; /* Base address of object */ @@ -387,7 +517,7 @@ extern char __executable_start; //< provided by the default linker script extern "C" int dl_iterate_phdr( int (*callback) (dl_phdr_info *info, size_t size, void *data), void *data) { - MLOG_ERROR(mlog::app, "dl_iterate_phdr", DVAR((void*)callback), DVAR(&__executable_start)); + MLOG_DETAIL(mlog::app, "dl_iterate_phdr", DVAR((void*)callback), DVAR(&__executable_start)); mythos::elf64::Elf64Image img(&__executable_start); ASSERT(img.isValid()); diff --git a/kernel/runtime/cxx/runtime/pthread.cc b/kernel/runtime/cxx/runtime/pthread.cc index dca229b5..43bbbae4 100644 --- a/kernel/runtime/cxx/runtime/pthread.cc +++ b/kernel/runtime/cxx/runtime/pthread.cc @@ -37,10 +37,12 @@ extern "C" int pthread_create(pthread_t *res, const pthread_attr_t *attrp, void } #endif +#ifdef use_pthreads_stubs extern "C" int pthread_detach(pthread_t){ MLOG_ERROR(mlog::app, __PRETTY_FUNCTION__); return 0; } +#endif #ifdef use_pthreads_stubs extern "C" _Noreturn void pthread_exit(void *){ @@ -451,10 +453,12 @@ extern "C" int pthread_attr_setinheritsched(pthread_attr_t *, int){ } +#ifdef use_pthreads_stubs extern "C" int pthread_mutexattr_destroy(pthread_mutexattr_t *){ MLOG_ERROR(mlog::app, __PRETTY_FUNCTION__); return 0; } +#endif extern "C" int pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *__restrict, int *__restrict){ MLOG_ERROR(mlog::app, __PRETTY_FUNCTION__); @@ -508,10 +512,12 @@ extern "C" int pthread_mutexattr_setrobust(pthread_mutexattr_t *, int){ return 0; } +#ifdef use_pthreads_stubs extern "C" int pthread_mutexattr_settype(pthread_mutexattr_t *, int){ MLOG_ERROR(mlog::app, __PRETTY_FUNCTION__); return 0; } +#endif #ifdef use_pthreads_stubs @@ -627,15 +633,19 @@ extern "C" void _pthread_cleanup_pop(struct __ptcb *, int){ #ifdef _GNU_SOURCE +#ifdef use_pthreads_stubs extern "C" int pthread_getaffinity_np(pthread_t, size_t, struct cpu_set_t *){ MLOG_ERROR(mlog::app, __PRETTY_FUNCTION__); return 0; } +#endif +#ifdef use_pthreads_stubs extern "C" int pthread_setaffinity_np(pthread_t, size_t, const struct cpu_set_t *){ MLOG_ERROR(mlog::app, __PRETTY_FUNCTION__); return 0; } +#endif #ifdef use_pthreads_stubs extern "C" int pthread_getattr_np(pthread_t, pthread_attr_t *){ diff --git a/kernel/runtime/kobject/mcconf.module b/kernel/runtime/kobject/mcconf.module index 77712dbd..4e7b0589 100644 --- a/kernel/runtime/kobject/mcconf.module +++ b/kernel/runtime/kobject/mcconf.module @@ -11,5 +11,5 @@ incfiles = [ "runtime/CapAlloc.hh", "runtime/InterruptControl.hh", "runtime/RaplDriverIntel.hh", - "runtime/ProcessorAllocator.hh" + "runtime/ThreadTeam.hh" ] diff --git a/kernel/runtime/kobject/runtime/CapMap.hh b/kernel/runtime/kobject/runtime/CapMap.hh index 945e9edf..4b91a7ec 100644 --- a/kernel/runtime/kobject/runtime/CapMap.hh +++ b/kernel/runtime/kobject/runtime/CapMap.hh @@ -47,21 +47,16 @@ namespace mythos { PortalFuture derive(PortalLock pr, CapPtr src, CapPtrDepth srcDepth, CapPtr dstCs, CapPtr dst, CapPtrDepth dstDepth, - CapRequest req) { + CapRequest req = 1) { return pr.invoke(_cap, src, srcDepth, dstCs, dst, dstDepth, req); } PortalFuture reference(PortalLock pr, CapPtr src, CapPtrDepth srcDepth, CapPtr dstCs, CapPtr dst, CapPtrDepth dstDepth, - CapRequest req) { + CapRequest req = 1) { return pr.invoke(_cap, src, srcDepth, dstCs, dst, dstDepth, req); } - PortalFuture move(PortalLock pr, CapPtr src, CapPtrDepth srcDepth, - CapPtr dstCs, CapPtr dst, CapPtrDepth dstDepth) { - return pr.invoke(_cap, src, srcDepth, dstCs, dst, dstDepth); - } - PortalFuture deleteCap(PortalLock pr, CapPtr src, CapPtrDepth srcDepth) { return pr.invoke(_cap, src, srcDepth); } diff --git a/kernel/runtime/kobject/runtime/SimpleCapAlloc.hh b/kernel/runtime/kobject/runtime/SimpleCapAlloc.hh index e5b12ff8..c924f70f 100644 --- a/kernel/runtime/kobject/runtime/SimpleCapAlloc.hh +++ b/kernel/runtime/kobject/runtime/SimpleCapAlloc.hh @@ -86,6 +86,26 @@ namespace mythos { return res; } + void freeAll(PortalLock& pl){ + Mutex::Lock guard(m); + MLOG_DETAIL(mlog::app, __func__); + for(uint32_t i = START; i < next; i++){ + bool isFree = false; + for(uint32_t s = 0; s < top; s++){ + if(caps[s] == i){ + isFree = true; + break; + } + } + + if(!isFree){ + auto res = cs.deleteCap(pl, i).wait(); + } + } + next = START; + top = 0; + } + optional free(KObject p, PortalLock& pl) { return free(p.cap(), pl); } protected: diff --git a/kernel/runtime/kobject/runtime/ThreadTeam.hh b/kernel/runtime/kobject/runtime/ThreadTeam.hh new file mode 100644 index 00000000..2df51f38 --- /dev/null +++ b/kernel/runtime/kobject/runtime/ThreadTeam.hh @@ -0,0 +1,87 @@ +/* -*- mode:C++; indent-tabs-mode:nil; -*- */ +/* MIT License -- MyThOS: The Many-Threads Operating System + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Copyright 2021 Philipp Gypser, BTU Cottbus-Senftenberg + */ +#pragma once + +#include "runtime/PortalBase.hh" +#include "mythos/protocol/ThreadTeam.hh" +#include "runtime/ExecutionContext.hh" +#include "mythos/init.hh" + +namespace mythos { + + class ThreadTeam : public KObject + { + public: + + ThreadTeam() {} + ThreadTeam(CapPtr cap) : KObject(cap) {} + + PortalFuture create(PortalLock pr, KernelMemory kmem, CapPtr pa, CapPtr factory = init::THREADTEAM_FACTORY){ + return pr.invoke(kmem.cap(), _cap, factory, pa); + } + + struct RunResult{ + RunResult() {} + RunResult(InvocationBuf* ib) { + auto msg = ib->cast(); + response = msg->response; + } + + bool failed() const { return response == protocol::ThreadTeam::RetTryRunEC::FAILED; } + bool allocated() const { return response == protocol::ThreadTeam::RetTryRunEC::ALLOCATED; } + bool notFailed() const { return response == protocol::ThreadTeam::RetTryRunEC::ALLOCATED + || response == protocol::ThreadTeam::RetTryRunEC::DEMANDED + || response == protocol::ThreadTeam::RetTryRunEC::FORCED; } + int response = 0; + }; + + PortalFuture tryRunEC(PortalLock pr, ExecutionContext ec, int at = protocol::ThreadTeam::FAIL){ + return pr.invoke(_cap, ec.cap(), at); + } + + PortalFuture runNextToEC(PortalLock pr, ExecutionContext ec, ExecutionContext ec_place){ + return pr.invoke(_cap, ec.cap(), ec_place.cap()); + } + + struct RevokeResult{ + RevokeResult() {} + RevokeResult(InvocationBuf* ib) { + auto msg = ib->cast(); + revoked = msg->revoked; + } + + bool revoked = false; + }; + PortalFuture revokeDemand(PortalLock pr, ExecutionContext ec){ + return pr.invoke(_cap, ec.cap()); + } + + PortalFuture setLimit(PortalLock pr, unsigned limit){ + return pr.invoke(_cap, limit); + } + }; + +} // namespace mythos diff --git a/kernel/runtime/memory/runtime/tls.cc b/kernel/runtime/memory/runtime/tls.cc index fb66367b..766d6008 100644 --- a/kernel/runtime/memory/runtime/tls.cc +++ b/kernel/runtime/memory/runtime/tls.cc @@ -133,4 +133,9 @@ void* setupNewTLS() { return tcbAddr; } +uintptr_t getTLS() { + uintptr_t val; + asm volatile ( "movq %%fs:0, %0" : "=r"(val) ); + return val; +} } // namespace mythos diff --git a/kernel/runtime/memory/runtime/tls.hh b/kernel/runtime/memory/runtime/tls.hh index d8638bdf..8789c983 100644 --- a/kernel/runtime/memory/runtime/tls.hh +++ b/kernel/runtime/memory/runtime/tls.hh @@ -35,4 +35,5 @@ void setupInitialTLS(); void* setupNewTLS(); +uintptr_t getTLS(); } // namespace mythos diff --git a/kernel/runtime/process/mcconf.module b/kernel/runtime/process/mcconf.module index 5fb45292..1b1e4ef5 100644 --- a/kernel/runtime/process/mcconf.module +++ b/kernel/runtime/process/mcconf.module @@ -1,3 +1,4 @@ # -*- mode:toml; -*- [module.process] incfiles = [ "runtime/process.hh" ] + appfiles = [ "runtime/process.cc" ] diff --git a/kernel/runtime/process/runtime/process.cc b/kernel/runtime/process/runtime/process.cc new file mode 100644 index 00000000..189886c0 --- /dev/null +++ b/kernel/runtime/process/runtime/process.cc @@ -0,0 +1,3 @@ +#include "runtime/process.hh" + +ProcessExitEvent processExitEvent; diff --git a/kernel/runtime/process/runtime/process.hh b/kernel/runtime/process/runtime/process.hh index 77c9cd7d..0b2d9b6e 100644 --- a/kernel/runtime/process/runtime/process.hh +++ b/kernel/runtime/process/runtime/process.hh @@ -7,25 +7,88 @@ #include "util/optional.hh" #include "util/elf64.hh" #include "util/align.hh" +#include "util/events.hh" #include "mythos/InfoFrame.hh" #include "runtime/CapAlloc.hh" +#include "runtime/ThreadTeam.hh" +#include "runtime/thread-extra.hh" +#include "mythos/syscall.hh" +#include extern mythos::InfoFrame* info_ptr asm("info_ptr"); extern mythos::CapMap myCS; extern mythos::PageMap myAS; extern mythos::KernelMemory kmem; -extern mythos::ProcessorAllocator pa; +extern mythos::ThreadTeam team; + +class ProcessExitEvent; +extern mythos::Event<> groupExit; +extern ProcessExitEvent processExitEvent; using namespace mythos; +class ProcessExitEvent : public EventHook<> { + public: + ProcessExitEvent(){ + groupExit.add(this); + } + + void processEvent() override { + MLOG_DETAIL(mlog::app, __PRETTY_FUNCTION__); + info_ptr->setRunning(false); + auto parent = info_ptr->getParent(); + if(parent != null_cap){ + mythos::syscall_signal(parent); + } + } +}; + class Process{ public: Process(char* image) : cs(capAlloc()) , pCapAlloc(cs) , img(image) + , pInfoFrame(nullptr) + , parent(mythos_get_pthread_ec_self()) {} + void wait(){ + MLOG_DETAIL(mlog::app, __PRETTY_FUNCTION__); + if(pInfoFrame){ + if(pInfoFrame->isRunning()){ + ASSERT(parent == mythos_get_pthread_ec_self()); + pInfoFrame->setParent(init::PARENT_EC); + while(pInfoFrame->isRunning()){ + mythos_wait(); + } + pInfoFrame->setParent(null_cap); + } + } + MLOG_DETAIL(mlog::app, "wait return"); + } + + void remove(PortalLock& pl){ + MLOG_DETAIL(mlog::app, __PRETTY_FUNCTION__); + pInfoFrame = nullptr; + MLOG_DETAIL(mlog::app, "free dynamically allocated caps in own CS"); + for (std::vector::reverse_iterator i = caps.rbegin(); + i != caps.rend(); ++i ) { + MLOG_DETAIL(mlog::app, DVAR(*i)); + capAlloc.free(*i, pl); + } + caps.clear(); + MLOG_DETAIL(mlog::app, "free capmap"); + capAlloc.free(cs, pl); + } + + void join(PortalLock& pl){ + wait(); + remove(pl); + } + + ~Process(){} + optional loadProgramHeader(PortalLock& pl, const elf64::PHeader* ph, uintptr_t tmp_vaddr, Frame& f, size_t offset, PageMap& pm) { @@ -45,7 +108,7 @@ class Process{ mf.executable = ph->flags&elf64::PF_X; MLOG_DETAIL(mlog::app, "... mmap program header", DVARhex(vbegin), DVARhex(vend), DVARhex(offset)); auto res = pm.mmap(pl, f, vbegin, vend-vbegin, mf, offset); - TEST(res); + ASSERT(res); // zero the pages, then copy the data MLOG_DETAIL(mlog::app, " zeroing", DVARhex(tmp_vaddr + offset), DVARhex(vend-vbegin)); @@ -80,28 +143,29 @@ class Process{ MLOG_DETAIL(mlog::app, "allocate frame for application image ...") Frame f(capAlloc()); auto res = f.create(pl, kmem, 2*size, align2M).wait(); + caps.push_back(f.cap()); uintptr_t tmp_vaddr = 42*align512G; MLOG_DETAIL(mlog::app, "temporary map frame to own address space ...", DVARhex(tmp_vaddr)); MLOG_DETAIL(mlog::app, " create PageMap"); PageMap pm3(capAlloc()); res = pm3.create(pl, kmem, 3).wait(); - TEST(res); + ASSERT(res); PageMap pm2(capAlloc()); res = pm2.create(pl, kmem, 2).wait(); - TEST(res); + ASSERT(res); MLOG_DETAIL(mlog::app, " installMap"); res = myAS.installMap(pl, pm3, ((tmp_vaddr >> 39) & 0x1FF)<< 39, 4, protocol::PageMap::MapFlags().writable(true).configurable(true)).wait(); - TEST(res); + ASSERT(res); res = pm3.installMap(pl, pm2, ((tmp_vaddr >> 30) & 0x1FF) << 30, 3, protocol::PageMap::MapFlags().writable(true).configurable(true)).wait(); - TEST(res); + ASSERT(res); MLOG_DETAIL(mlog::app, " mmap"); res = myAS.mmap(pl, f, tmp_vaddr, size, 0x1).wait(); - TEST(res); + ASSERT(res); // 3) process each program header: map to page, copy contents size_t offset = 0; @@ -120,22 +184,21 @@ class Process{ MLOG_DETAIL(mlog::app, "cleaning up ..."); MLOG_DETAIL(mlog::app, " unmap frame"); res = myAS.munmap(pl, tmp_vaddr, size).wait(); - TEST(res); + ASSERT(res); MLOG_DETAIL(mlog::app, " remove maps"); res = myAS.removeMap(pl, tmp_vaddr, 2).wait(); - TEST(res); + ASSERT(res); res = myAS.removeMap(pl, tmp_vaddr, 3).wait(); - TEST(res); + ASSERT(res); MLOG_DETAIL(mlog::app, " move frame"); - res = myCS.move(pl, f.cap(), max_cap_depth, cs.cap(), pCapAlloc(), max_cap_depth).wait(); - TEST(res); - capAlloc.freeEmpty(f.cap()); + res = myCS.reference(pl, f.cap(), max_cap_depth, cs.cap(), pCapAlloc(), max_cap_depth).wait(); + ASSERT(res); MLOG_DETAIL(mlog::app, " delete tables"); - capAlloc.free(pm3, pl); - capAlloc.free(pm2, pl); + capAlloc.free(pm3.cap(), pl); + capAlloc.free(pm2.cap(), pl); return ipc_addr; } @@ -146,34 +209,37 @@ class Process{ /* create CapMap */ MLOG_DETAIL(mlog::app, "create CapMap ..."); auto res = cs.create(pl, kmem, CapPtrDepth(12), CapPtrDepth(20), CapPtr(0)).wait(); - TEST(res); + ASSERT(res); + MLOG_DETAIL(mlog::app, "set CapMap reference..."); + res = myCS.reference(pl, cs.cap(), max_cap_depth, cs.cap(), init::CSPACE, max_cap_depth).wait(); + ASSERT(res); - /* copy relevant caps */ + //copy relevant caps MLOG_DETAIL(mlog::app, "copy relevant Caps ..."); MLOG_DETAIL(mlog::app, " kernel memory"); - res = myCS.reference(pl, init::KM, max_cap_depth, cs.cap(), init::KM, max_cap_depth, 0).wait(); - TEST(res); + res = myCS.reference(pl, init::KM, max_cap_depth, cs.cap(), init::KM, max_cap_depth).wait(); + ASSERT(res); MLOG_DETAIL(mlog::app, " factories"); for(CapPtr ptr = init::EXAMPLE_FACTORY; ptr <= init::UNTYPED_MEMORY_FACTORY; ptr++){ - res = myCS.reference(pl, ptr, max_cap_depth, cs.cap(), ptr, max_cap_depth, 0).wait(); - TEST(res); + res = myCS.reference(pl, ptr, max_cap_depth, cs.cap(), ptr, max_cap_depth).wait(); + ASSERT(res); } - + MLOG_DETAIL(mlog::app, " DEVICE_MEMORY"); - res = myCS.reference(pl, init::DEVICE_MEM, max_cap_depth, cs.cap(), init::DEVICE_MEM, max_cap_depth, 0).wait(); - TEST(res); + res = myCS.reference(pl, init::DEVICE_MEM, max_cap_depth, cs.cap(), init::DEVICE_MEM, max_cap_depth).wait(); + ASSERT(res); MLOG_DETAIL(mlog::app, " RAPL driver"); - res = myCS.reference(pl, init::RAPL_DRIVER_INTEL, max_cap_depth, cs.cap(), init::RAPL_DRIVER_INTEL, max_cap_depth, 0).wait(); - TEST(res); + res = myCS.reference(pl, init::RAPL_DRIVER_INTEL, max_cap_depth, cs.cap(), init::RAPL_DRIVER_INTEL, max_cap_depth).wait(); + ASSERT(res); MLOG_DETAIL(mlog::app, " Interrupt control"); MLOG_WARN(mlog::app, "SKIP: Interrupt control caps!"); //todo: how to check whether cap is existing? //for(CapPtr ptr = init::INTERRUPT_CONTROL_START; ptr < init::INTERRUPT_CONTROL_END; ptr++){ - //res = myCS.reference(pl, ptr, max_cap_depth, cs.cap(), ptr, max_cap_depth, 0).wait(); - //TEST(res); + //res = myCS.reference(pl, ptr, max_cap_depth, cs.cap(), ptr, max_cap_depth).wait(); + //ASSERT(res); //} /* create address space */ @@ -183,38 +249,57 @@ class Process{ // create tables PageMap pm4(capAlloc()); + caps.push_back(pm4.cap()); auto res_pm = pm4.create(pl, kmem, 4).wait(); - TEST(res_pm); + ASSERT(res_pm); PageMap pm3(capAlloc()); + caps.push_back(pm3.cap()); res_pm = pm3.create(pl, kmem, 3).wait(); - TEST(res_pm); + ASSERT(res_pm); PageMap pm2(capAlloc()); + caps.push_back(pm2.cap()); res_pm = pm2.create(pl, kmem, 2).wait(); - TEST(res_pm); + ASSERT(res_pm); PageMap pm10(capAlloc()); + caps.push_back(pm10.cap()); res_pm = pm10.create(pl, kmem, 1).wait(); - TEST(res_pm); + ASSERT(res_pm); PageMap pm11(capAlloc()); + caps.push_back(pm11.cap()); res_pm = pm11.create(pl, kmem, 1).wait(); - TEST(res_pm); + ASSERT(res_pm); PageMap pm12(capAlloc()); + caps.push_back(pm12.cap()); res_pm = pm12.create(pl, kmem, 1).wait(); - TEST(res_pm); + ASSERT(res_pm); PageMap pm13(capAlloc()); + caps.push_back(pm13.cap()); res_pm = pm13.create(pl, kmem, 1).wait(); - TEST(res_pm); + ASSERT(res_pm); PageMap pm14(capAlloc()); + caps.push_back(pm14.cap()); res_pm = pm14.create(pl, kmem, 1).wait(); - TEST(res_pm); + ASSERT(res_pm); PageMap pm15(capAlloc()); + caps.push_back(pm15.cap()); res_pm = pm15.create(pl, kmem, 1).wait(); - TEST(res_pm); + ASSERT(res_pm); PageMap pm16(capAlloc()); + caps.push_back(pm16.cap()); res_pm = pm16.create(pl, kmem, 1).wait(); - TEST(res_pm); + ASSERT(res_pm); PageMap pm17(capAlloc()); + caps.push_back(pm17.cap()); res_pm = pm17.create(pl, kmem, 1).wait(); - TEST(res_pm); + ASSERT(res_pm); + PageMap pm18(capAlloc()); + caps.push_back(pm18.cap()); + res_pm = pm18.create(pl, kmem, 1).wait(); + ASSERT(res_pm); + PageMap pm19(capAlloc()); + caps.push_back(pm19.cap()); + res_pm = pm19.create(pl, kmem, 1).wait(); + ASSERT(res_pm); // install tables pm4.installMap(pl, pm3, ((elf_vaddr >> 39) & 0x1FF) << 39, 4, @@ -237,74 +322,82 @@ class Process{ mythos::protocol::PageMap::MapFlags().writable(true).configurable(true)).wait(); pm2.installMap(pl, pm17, ((elf_vaddr >> 21) & 0x1FF) + 7 << 21, 2, mythos::protocol::PageMap::MapFlags().writable(true).configurable(true)).wait(); + pm2.installMap(pl, pm18, ((elf_vaddr >> 21) & 0x1FF) + 8 << 21, 2, + mythos::protocol::PageMap::MapFlags().writable(true).configurable(true)).wait(); + pm2.installMap(pl, pm19, ((elf_vaddr >> 21) & 0x1FF) + 9 << 21, 2, + mythos::protocol::PageMap::MapFlags().writable(true).configurable(true)).wait(); /* load image */ auto ipc_vaddr = loadImage(pl, pm4); - TEST(ipc_vaddr); + ASSERT(ipc_vaddr); /* create InfoFrame */ MLOG_DETAIL(mlog::app, "create InfoFrame ..."); auto size = round_up(sizeof(InfoFrame), align2M); - MLOG_DETAIL(mlog::app, " create frame", DVAR(size)); - Frame infoFrame(capAlloc()); - res = infoFrame.create(pl, kmem, size, align2M).wait(); - TEST(res); + MLOG_DETAIL(mlog::app, " create frame"); + Frame iFrame(capAlloc()); + caps.push_back(iFrame.cap()); + res = iFrame.create(pl, kmem, size, align2M).wait(); + ASSERT(res); + res = myCS.reference(pl, iFrame.cap(), max_cap_depth, cs.cap(), init::INFO_FRAME, max_cap_depth).wait(); + ASSERT(res); MLOG_DETAIL(mlog::app, " map frame to target page map", DVARhex(size), DVARhex(*ipc_vaddr)); - res = pm4.mmap(pl, infoFrame, *ipc_vaddr, align2M, 0x1).wait(); - TEST(res); + res = pm4.mmap(pl, iFrame, *ipc_vaddr, size, 0x1).wait(); + ASSERT(res); uintptr_t tmp_vaddr_if = 11*align512G; MLOG_DETAIL(mlog::app, " temporally map frame to own page map", DVARhex(tmp_vaddr_if)); PageMap pm3if(capAlloc()); + caps.push_back(pm3if.cap()); res = pm3if.create(pl, kmem, 3).wait(); - TEST(res); + ASSERT(res); PageMap pm2if(capAlloc()); + caps.push_back(pm2if.cap()); res = pm2if.create(pl, kmem, 2).wait(); - TEST(res); + ASSERT(res); MLOG_DETAIL(mlog::app, " installMap"); res = myAS.installMap(pl, pm3if, ((tmp_vaddr_if >> 39) & 0x1FF)<< 39, 4, protocol::PageMap::MapFlags().writable(true).configurable(true)).wait(); - TEST(res); + ASSERT(res); res = pm3if.installMap(pl, pm2if, ((tmp_vaddr_if >> 30) & 0x1FF) << 30, 3, protocol::PageMap::MapFlags().writable(true).configurable(true)).wait(); - TEST(res); + ASSERT(res); MLOG_DETAIL(mlog::app, " mmap"); - res = myAS.mmap(pl, infoFrame, tmp_vaddr_if, size, 0x1).wait(); - TEST(res); + res = myAS.mmap(pl, iFrame, tmp_vaddr_if, size, 0x1).wait(); + ASSERT(res); MLOG_DETAIL(mlog::app, " copy info frame content"); mythos::memcpy(reinterpret_cast(tmp_vaddr_if), info_ptr, sizeof(InfoFrame)); - MLOG_DETAIL(mlog::app, " unmap"); - res = myAS.munmap(pl, tmp_vaddr_if, size).wait(); - TEST(res); - - MLOG_DETAIL(mlog::app, " remove maps"); - res = myAS.removeMap(pl, tmp_vaddr_if, 2).wait(); - TEST(res); - res = myAS.removeMap(pl, tmp_vaddr_if, 3).wait(); - TEST(res); - - MLOG_DETAIL(mlog::app, " delete tables"); - capAlloc.free(pm3if, pl); - capAlloc.free(pm2if, pl); + pInfoFrame = reinterpret_cast(tmp_vaddr_if); + pInfoFrame->setRunning(true); + pInfoFrame->setParent(null_cap); + res = myCS.reference(pl, parent, max_cap_depth, cs.cap(), init::PARENT_EC, max_cap_depth).wait(); + ASSERT(res); /* create EC */ MLOG_DETAIL(mlog::app, "create EC ...", DVARhex(img.header()->entry)); - auto sc = pa.alloc(pl).wait(); - TEST(sc); - MLOG_DETAIL(mlog::app, "allocated SC", DVAR(sc->cap)); ExecutionContext ec(capAlloc()); - res = ec.create(kmem).as(pm4).cs(cs).sched(sc->cap) + res = ec.create(kmem).as(pm4).cs(cs) .rawFun(reinterpret_cast(img.header()->entry), reinterpret_cast(*ipc_vaddr)) .suspended(true) .invokeVia(pl).wait(); - TEST(res); - MLOG_DETAIL(mlog::app, "move SC"); - res = myCS.move(pl, sc->cap, max_cap_depth, cs.cap(), sc->cap, max_cap_depth).wait(); - TEST(res); + caps.push_back(ec.cap()); + ASSERT(res); + + /* create ThreadTeam */ + MLOG_DETAIL(mlog::app, "create ThreadTeam ..."); + ThreadTeam tt(capAlloc()); + res = tt.create(pl, kmem, init::PROCESSOR_ALLOCATOR).wait(); + caps.push_back(tt.cap()); + ASSERT(res); + MLOG_DETAIL(mlog::app, " register EC in ThreadTeam"); + res = tt.tryRunEC(pl, ec).wait(); + ASSERT(res); + res = myCS.reference(pl, tt.cap(), max_cap_depth, cs.cap(), init::THREAD_TEAM, max_cap_depth).wait(); + ASSERT(res); /* create portal */ MLOG_DETAIL(mlog::app, "create Portal ..."); @@ -313,82 +406,28 @@ class Process{ MLOG_DETAIL(mlog::app, " create"); Portal port(capAlloc(), reinterpret_cast(*ipc_vaddr)); res = port.create(pl, kmem).wait(); - TEST(res); - res = port.bind(pl, infoFrame, 0, ec.cap()).wait(); - TEST(res); + caps.push_back(port.cap()); + ASSERT(res); + res = port.bind(pl, iFrame, 0, ec.cap()).wait(); + ASSERT(res); MLOG_DETAIL(mlog::app, " move Portal"); - res = myCS.move(pl, port.cap(), max_cap_depth, cs.cap(), init::PORTAL, max_cap_depth).wait(); - TEST(res); - capAlloc.freeEmpty(port.cap()); + res = myCS.reference(pl, port.cap(), max_cap_depth, cs.cap(), init::PORTAL, max_cap_depth).wait(); + ASSERT(res); - MLOG_DETAIL(mlog::app, " move info frame"); - res = myCS.move(pl, infoFrame.cap(), max_cap_depth, cs.cap(), init::INFO_FRAME, max_cap_depth).wait(); - TEST(res); - capAlloc.freeEmpty(infoFrame.cap()); - /* move tables */ - MLOG_DETAIL(mlog::app, "move tables ..."); - res = myCS.move(pl, pm3.cap(), max_cap_depth, cs.cap(), pCapAlloc(), max_cap_depth).wait(); - TEST(res); - capAlloc.freeEmpty(pm3.cap()); - - res = myCS.move(pl, pm2.cap(), max_cap_depth, cs.cap(), pCapAlloc(), max_cap_depth).wait(); - TEST(res); - capAlloc.freeEmpty(pm2.cap()); - - res = myCS.move(pl, pm10.cap(), max_cap_depth, cs.cap(), pCapAlloc(), max_cap_depth).wait(); - TEST(res); - capAlloc.freeEmpty(pm10.cap()); - - res = myCS.move(pl, pm11.cap(), max_cap_depth, cs.cap(), pCapAlloc(), max_cap_depth).wait(); - TEST(res); - capAlloc.freeEmpty(pm11.cap()); - - res = myCS.move(pl, pm12.cap(), max_cap_depth, cs.cap(), pCapAlloc(), max_cap_depth).wait(); - TEST(res); - capAlloc.freeEmpty(pm12.cap()); - - res = myCS.move(pl, pm13.cap(), max_cap_depth, cs.cap(), pCapAlloc(), max_cap_depth).wait(); - TEST(res); - capAlloc.freeEmpty(pm13.cap()); - - res = myCS.move(pl, pm14.cap(), max_cap_depth, cs.cap(), pCapAlloc(), max_cap_depth).wait(); - TEST(res); - capAlloc.freeEmpty(pm14.cap()); - - res = myCS.move(pl, pm15.cap(), max_cap_depth, cs.cap(), pCapAlloc(), max_cap_depth).wait(); - TEST(res); - capAlloc.freeEmpty(pm15.cap()); - - res = myCS.move(pl, pm16.cap(), max_cap_depth, cs.cap(), pCapAlloc(), max_cap_depth).wait(); - TEST(res); - capAlloc.freeEmpty(pm16.cap()); - - res = myCS.move(pl, pm17.cap(), max_cap_depth, cs.cap(), pCapAlloc(), max_cap_depth).wait(); - TEST(res); - capAlloc.freeEmpty(pm17.cap()); - - res = myCS.move(pl, pm4.cap(), max_cap_depth, cs.cap(), init::PML4, max_cap_depth).wait(); - TEST(res); - capAlloc.freeEmpty(pm4.cap()); + MLOG_DETAIL(mlog::app, "reference pm4 ..."); + res = myCS.reference(pl, pm4.cap(), max_cap_depth, cs.cap(), init::PML4, max_cap_depth).wait(); /* wake EC */ MLOG_DETAIL(mlog::app, "start process ..."); MLOG_DETAIL(mlog::app, " move EC"); - res = myCS.move(pl, ec.cap(), max_cap_depth, cs.cap(), init::EC, max_cap_depth).wait(); - TEST(res); - - MLOG_DETAIL(mlog::app, " create reference"); - res = cs.reference(pl, init::EC, max_cap_depth, init::CSPACE, ec.cap(), max_cap_depth, 0).wait(); - TEST(res); + res = myCS.reference(pl, ec.cap(), max_cap_depth, cs.cap(), init::EC, max_cap_depth).wait(); + ASSERT(res); MLOG_DETAIL(mlog::app, " wake EC"); res = ec.resume(pl).wait(); - TEST(res); - - MLOG_DETAIL(mlog::app, " remove EC"); - capAlloc.free(ec.cap(), pl); + ASSERT(res); return cs.cap(); } @@ -398,6 +437,9 @@ class Process{ SimpleCapAlloc pCapAlloc; elf64::Elf64Image img; + InfoFrame* pInfoFrame; + CapPtr parent; + std::vector caps; }; diff --git a/kernel/runtime/tbb/mcconf.module b/kernel/runtime/tbb/mcconf.module new file mode 100644 index 00000000..b612ed74 --- /dev/null +++ b/kernel/runtime/tbb/mcconf.module @@ -0,0 +1,8 @@ +# -*- mode:toml; -*- +[module.tbb-app] + provides = [ "tbb/tbb.h" ] + + makefile_head = ''' +APP_CPPFLAGS += -I ${vars.tbb_inc_path} +INITAPP_CXXFLAGS += -DTBB_USE_EXCEPTIONS -DTBB_PREVIEW_WAITING_FOR_WORKERS +'''