diff --git a/kernel/app/init-example/app/init.cc b/kernel/app/init-example/app/init.cc index 588c53d6..1844d104 100644 --- a/kernel/app/init-example/app/init.cc +++ b/kernel/app/init-example/app/init.cc @@ -521,6 +521,53 @@ void test_process(){ MLOG_INFO(mlog::app, "Test process finished"); } +void pageMapPageFault(){ + MLOG_INFO(mlog::app, "Trigger PageMap page fault"); + + mythos::PortalLock pl(portal); + + mythos::PageMap pm4(capAlloc()); + + auto res = pm4.create(pl, kmem, 4).wait(); + ASSERT(res); + + MLOG_INFO(mlog::app, "Try to delete pm4 -> page fault"); + + capAlloc.free(pm4.cap(), pl); + + MLOG_INFO(mlog::app, "If you can read this, you might have fixed the page fault?!"); +} + +void pageMapDeadlock(){ + MLOG_INFO(mlog::app, "Trigger PageMap deadlock"); + + mythos::PortalLock pl(portal); + + mythos::PageMap pm4(capAlloc()); + mythos::PageMap pm3(capAlloc()); + mythos::PageMap pm2(capAlloc()); + + auto res = pm4.create(pl, kmem, 4).wait(); + ASSERT(res); + res = pm3.create(pl, kmem, 3).wait(); + ASSERT(res); + res = pm2.create(pl, kmem, 2).wait(); + ASSERT(res); + + uintptr_t vaddr = 0x4000000; + + pm4.installMap(pl, pm3, ((vaddr >> 39) & 0x1FF) << 39, 4, + mythos::protocol::PageMap::MapFlags().writable(true).configurable(true)).wait(); + pm3.installMap(pl, pm2, ((vaddr >> 30) & 0x1FF) << 30, 3, + mythos::protocol::PageMap::MapFlags().writable(true).configurable(true)).wait(); + + MLOG_INFO(mlog::app, "Try to delete pm3 -> deadlock"); + + capAlloc.free(pm3.cap(), pl); + + MLOG_INFO(mlog::app, "If you can read this, you might have fixed the deadlock?!"); +} + void testCapMapDeletion(){ MLOG_INFO(mlog::app, "Test CapMap deletion"); @@ -540,21 +587,23 @@ int main() mythos::syscall_debug(str, sizeof(str)-1); MLOG_ERROR(mlog::app, "application is starting :)", DVARhex(info_ptr), DVARhex(initstack_top)); - test_float(); - test_Example(); - test_Portal(); + //test_float(); + //test_Example(); + //test_Portal(); test_heap(); // heap must be initialized for tls test - test_tls(); - test_exceptions(); + //test_tls(); + //test_exceptions(); //test_InterruptControl(); //test_HostChannel(portal, 24*1024*1024, 2*1024*1024); test_ExecutionContext(); - test_pthreads(); - test_Rapl(); - test_processor_allocator(); + //test_pthreads(); + //test_Rapl(); + //test_processor_allocator(); //test_process(); //test_CgaScreen(); testCapMapDeletion(); + pageMapPageFault(); + pageMapDeadlock(); char const end[] = "bye, cruel world!"; mythos::syscall_debug(end, sizeof(end)-1); diff --git a/kernel/boot/apboot-common/boot/DeployHWThread.hh b/kernel/boot/apboot-common/boot/DeployHWThread.hh index 946fc25c..e5be3b3f 100644 --- a/kernel/boot/apboot-common/boot/DeployHWThread.hh +++ b/kernel/boot/apboot-common/boot/DeployHWThread.hh @@ -38,6 +38,7 @@ #include "cpu/idle.hh" #include "async/Place.hh" #include "objects/DeleteBroadcast.hh" +#include "objects/PML4InvalidationBroadcastAmd64.hh" #include "objects/SchedulingContext.hh" #include "objects/InterruptControl.hh" #include "boot/memory-layout.h" @@ -78,6 +79,7 @@ struct DeployHWThread { idt.init(); DeleteBroadcast::init(); // depends on hwthread enumeration + PML4InvalidationBroadcast::init(); // depends on hwthread enumeration } void prepare(cpu::ThreadID threadID, cpu::ApicID apicID) diff --git a/kernel/objects/capability-spinning/objects/CapEntry.cc b/kernel/objects/capability-spinning/objects/CapEntry.cc index 52c134ed..feb723db 100644 --- a/kernel/objects/capability-spinning/objects/CapEntry.cc +++ b/kernel/objects/capability-spinning/objects/CapEntry.cc @@ -32,11 +32,14 @@ namespace mythos { void CapEntry::initRoot(Cap c) { + MLOG_ERROR(mlog::cap, __PRETTY_FUNCTION__, DVAR(this)); ASSERT(isKernelAddress(this)); ASSERT(c.isUsable()); ASSERT(cap().isEmpty()); Link loopLink(this); + MLOG_ERROR(mlog::cap, "this unlocks _next"); _next.store(loopLink.value()); + MLOG_ERROR(mlog::cap, "this unlocks _prev"); _prev.store(loopLink.value()); _cap.store(c.value()); } @@ -62,6 +65,7 @@ namespace mythos { void CapEntry::reset() { + MLOG_ERROR(mlog::cap, __PRETTY_FUNCTION__, DVAR(this)); ASSERT(isUnlinked() || cap().isAllocated()); _prev.store(Link().value()); _next.store(Link().value()); @@ -80,32 +84,40 @@ namespace mythos { optional CapEntry::moveTo(CapEntry& other) { + MLOG_ERROR(mlog::cap, __PRETTY_FUNCTION__, DVAR(this), DVAR(other)); ASSERT(other.cap().isAllocated()); ASSERT(!other.isLinked()); + lock_cap(); if (!lock_prev()) { + unlock_cap(); other.reset(); THROW(Error::GENERIC_ERROR); } - lock(); + lock_next(); auto thisCap = cap(); if (isRevoking() || !thisCap.isUsable()) { other.reset(); - unlock(); + unlock_next(); unlock_prev(); + unlock_cap(); THROW(Error::INVALID_CAPABILITY); } + // using these values removes lock auto next= Link(_next).withoutFlags(); auto prev= Link(_prev).withoutFlags(); next->setPrevPreserveFlags(&other); other._next.store(next.value()); - // deleted or revoking can not be set in other._prev + // deletion, deleted or revoking can not be set in other._prev // as we allocated other for moving other._prev.store(prev.value()); + MLOG_ERROR(mlog::cap, "this unlocks prev"); prev->_next.store(Link(&other).value()); other.commit(thisCap); + MLOG_ERROR(mlog::cap, "this unlocks cap"); _prev.store(Link().value()); + MLOG_ERROR(mlog::cap, "this unlocks next"); _next.store(Link().value()); _cap.store(Cap().value()); RETURN(Error::SUCCESS); @@ -125,13 +137,28 @@ namespace mythos { return true; } - optional CapEntry::unlink() + // fails if cap was changed concurrently + bool CapEntry::try_kill(Cap expected) { - auto next = Link(_next).withoutFlags(); - auto prev = Link(_prev).withoutFlags(); - next->_prev.store(prev.value()); - prev->_next.store(next.value()); - _prev.store(Link().value()); + CapValue expectedValue = expected.value(); + MLOG_DETAIL(mlog::cap, this, ".try_kill", DVAR(expected)); + if (!_cap.compare_exchange_strong(expectedValue, expected.asZombie().value())) { + // if the cap was just zombified by sb. else, thats okay + return (Cap(expectedValue).asZombie() == expected.asZombie()); + } else return true; + } + + + optional CapEntry::unlinkAndUnlockLinks() + { + MLOG_ERROR(mlog::cap, __PRETTY_FUNCTION__, DVAR(this)); + auto next = Link(_next); + auto prev = Link(_prev); + next->setPrevPreserveFlags(prev.ptr()); + MLOG_ERROR(mlog::cap, "this unlocks _next of predecessor"); + prev->_next.store(next.withoutFlags().value()); + _prev.store(Link().withoutPtr().value()); + MLOG_ERROR(mlog::cap, "this unlocks _next"); _next.store(Link().value()); RETURN(Error::SUCCESS); } @@ -139,17 +166,13 @@ namespace mythos { Error CapEntry::try_lock_prev() { auto prev = Link(_prev).ptr(); + MLOG_ERROR(mlog::cap, __PRETTY_FUNCTION__, DVAR(this), DVAR(prev)); if (!prev) { return Error::GENERIC_ERROR; } - if (prev->try_lock()) { - if (Link(_prev.load()).ptr() == prev) { - return Error::SUCCESS; - } else { // my _prev has changed in the mean time - prev->unlock(); - return Error::RETRY; - } - } else return Error::RETRY; + auto success = prev->try_lock_next(this); + ASSERT(Link(_prev.load()).ptr() == prev); + return success ? Error::SUCCESS : Error::RETRY; } bool CapEntry::lock_prev() diff --git a/kernel/objects/capability-spinning/objects/CapEntry.hh b/kernel/objects/capability-spinning/objects/CapEntry.hh index e837ed87..f8539e9f 100644 --- a/kernel/objects/capability-spinning/objects/CapEntry.hh +++ b/kernel/objects/capability-spinning/objects/CapEntry.hh @@ -44,7 +44,15 @@ namespace mythos { * thus prev and next of root are locked independently * * operations that write to the capability - * or flags (zombie) must lock both next and prev + * or call deleteCap on an obejct must lock_cap + * + * lock order: lock_cap, lock_prev, lock_next + * + * acquired status means someone exclusively acquired an unlinked CapEntry + * therefore no races with others trying to insert it. + * + * acquired status must be only hold shortly + * */ class CapEntry { @@ -83,22 +91,84 @@ namespace mythos { * allocated. Returns true if zombified. */ bool kill(); - optional unlink(); - bool try_lock() { return !(_next.fetch_or(LOCKED_FLAG) & LOCKED_FLAG); } - void lock() { while (!try_lock()) { hwthread_pause(); } } - void unlock() { auto res = _next.fetch_and(~LOCKED_FLAG); ASSERT(res & LOCKED_FLAG); } + bool try_kill(Cap expected); + + optional unlinkAndUnlockLinks(); + + /* lock next functions protect the link to the next CapEntry */ + + bool try_lock_next(CapEntry* next) + { + Link expected(next); + uintlink_t expectedValue = expected.value(); + auto ret = _next.compare_exchange_strong(expectedValue, expected.withFlags(LOCKED_FLAG).value()); + MLOG_ERROR(mlog::cap, __PRETTY_FUNCTION__, DVAR(this), ret? " locked" : "locking failed!"); + return ret; + } + + + void lock_next() + { + int loop = 0; + while (!try_lock_next()) { + hwthread_pause(); +#warning remove counting for production + loop++; + PANIC_MSG(loop < 3, "locking next failed too many times"); + } + } + + void unlock_next() + { + MLOG_ERROR(mlog::cap, __PRETTY_FUNCTION__, DVAR(this)); + auto res = _next.fetch_and(~LOCKED_FLAG); + ASSERT(res & LOCKED_FLAG); + } + + /* deletion lock functions protect the deletion of a object */ + + bool try_lock_cap() + { + bool ret = !(_prev.fetch_or(LOCKED_FLAG) & LOCKED_FLAG); + MLOG_ERROR(mlog::cap, __PRETTY_FUNCTION__, DVAR(this), ret? " locked" : "locking failed!"); + return ret; + } + + void lock_cap() + { + int loop = 0; + while (!try_lock_cap()) { + hwthread_pause(); +#warning remove counting for production + loop++; + PANIC_MSG(loop < 3," locking failed too many times"); + } + } + + void unlock_cap() + { + MLOG_ERROR(mlog::cap, __PRETTY_FUNCTION__, DVAR(this)); + auto res = _prev.fetch_and(~LOCKED_FLAG); + ASSERT(res & LOCKED_FLAG); + } /// only for assertions and debugging /// only trust the result if it is false and it should be true - bool is_locked() const { return _next.load() & CapEntry::LOCKED_FLAG; } + bool next_is_locked() const { return _next.load() & CapEntry::LOCKED_FLAG; } + + /* lock prev functions protect the link to the next CapEntry */ Error try_lock_prev(); bool lock_prev(); - void unlock_prev() { Link(_prev)->unlock(); } + + void unlock_prev() + { + MLOG_ERROR(mlog::cap, __PRETTY_FUNCTION__, DVAR(this)); + Link(_prev)->unlock_next(); + } CapEntry* next() { - ASSERT(is_locked()); return Link(_next).ptr(); } @@ -115,9 +185,23 @@ namespace mythos { // called by move and insertAfter void setPrevPreserveFlags(CapEntry* ptr); + // called by lock_next + bool try_lock_next() + { + bool ret = !(_next.fetch_or(LOCKED_FLAG) & LOCKED_FLAG); + MLOG_ERROR(mlog::cap, __PRETTY_FUNCTION__, DVAR(this), ret? " locked" : "locking failed!"); + return ret; + } + + // lock flag in _next and _prev + // _next protects the link to the next entry (lock_next) + // _prev protects the capability in the entry from being changed (lock_cap) static constexpr uintlink_t LOCKED_FLAG = 1; - static constexpr uintlink_t REVOKING_FLAG = 1 << 1; - static constexpr uintlink_t DELETED_FLAG = 1 << 2; + + // flags describing the entry in _prev + static constexpr uintlink_t REVOKING_FLAG = 1 << 1; // prevents from moving + static constexpr uintlink_t DELETED_FLAG = 1 << 2; // prevents from inserting in soon-to-be-deleted object + static constexpr uintlink_t FLAG_MASK = 7; static_assert((DELETED_FLAG | REVOKING_FLAG | FLAG_MASK) == FLAG_MASK, "prev flags do not fit"); @@ -134,9 +218,10 @@ namespace mythos { CapEntry* operator->() { ASSERT(ptr()); return ptr(); } Link withFlags(uintlink_t flags) const { return Link(_offset(), flags); } - Link withoutFlags() const { return Link(_offset(), 0); } + Link withoutFlags() const { return withFlags(0); } Link withPtr(CapEntry* ptr) const { return Link(ptr, flags()); } + Link withoutPtr() const { return withPtr(nullptr); } CapEntry* ptr() const { @@ -185,13 +270,18 @@ namespace mythos { CapEntry& targetEntry, Cap targetCap, COMMITFUN const& commit) { + MLOG_ERROR(mlog::cap, __PRETTY_FUNCTION__, DVAR(this), DVAR(parentCap), DVAR(targetEntry), DVAR(targetCap)); ASSERT(isKernelAddress(this)); ASSERT(targetEntry.cap().isAllocated()); - lock(); // lock the parent entry, the child is already acquired + // lock the parent entry, the child is already acquired + lock_cap(); + lock_next(); auto curCap = cap(); // lazy-locking: check that we still operate on the same parent capability if (!curCap.isUsable() || curCap != parentCap) { - unlock(); // unlock the parent entry + // unlock the parent entry + unlock_next(); + unlock_cap(); targetEntry.reset(); // release exclusive usage and revert to an empty entry THROW(Error::LOST_RACE); } @@ -201,11 +291,15 @@ namespace mythos { auto next = Link(_next.load()).withoutFlags(); next->setPrevPreserveFlags(&targetEntry); + // dest was never locked MLOG_ERROR(mlog::cap, "this unlocks next in child", DVAR(targetEntry)); targetEntry._next.store(next.value()); // deleted or revoking can not be set in target._prev // as we allocated target for being inserted + // dest was never locked MLOG_ERROR(mlog::cap, "this unlocks cap in child", DVAR(targetEntry)); targetEntry._prev.store(Link(this).value()); + MLOG_ERROR(mlog::cap, "this unlocks _next "); this->_next.store(Link(&targetEntry).value()); // unlocks the parent entry + unlock_cap(); targetEntry.commit(targetCap); // release the target entry as usable RETURN(Error::SUCCESS); } @@ -219,7 +313,7 @@ namespace mythos { if (entry.isLinked()) out << ":linked"; if (entry.isDeleted()) out << ":deleted"; if (entry.isUnlinked()) out << ":unlinked"; - if (entry.is_locked()) out << ":locked"; + if (entry.next_is_locked()) out << ":next_locked"; if (entry.isRevoking()) out << ":revoking"; return out; } diff --git a/kernel/objects/capability-spinning/objects/RevokeOperation.cc b/kernel/objects/capability-spinning/objects/RevokeOperation.cc index 18e373ca..b8580dd9 100644 --- a/kernel/objects/capability-spinning/objects/RevokeOperation.cc +++ b/kernel/objects/capability-spinning/objects/RevokeOperation.cc @@ -85,11 +85,11 @@ namespace mythos { monitor.requestDone(); return; } - entry.lock(); + entry.lock_cap(); auto rootCap = entry.cap(); if (!rootCap.isUsable()) { // this is not the cap you are locking for ... - entry.unlock(); + entry.unlock_cap(); res->response(t, Error::LOST_RACE); release(); monitor.requestDone(); @@ -99,98 +99,91 @@ namespace mythos { // if some other revoke or delete clears the flag or changes the cap values // all children have been deleted in the mean time and we are done entry.setRevoking(); - entry.unlock(); + entry.unlock_cap(); _result = _delete(&entry, rootCap).state(); _startAsyncDelete(t); } optional RevokeOperation::_delete(CapEntry* root, Cap rootCap) { + MLOG_ERROR(mlog::cap, __PRETTY_FUNCTION__, DVAR(root), DVAR(rootCap)); CapEntry* leaf; + Cap leafCap; do { - if (_startTraversal(root, rootCap)) { - leaf = _findLockedLeaf(root); - MLOG_DETAIL(mlog::cap, "_findLockedLeaf returned", DVAR(*leaf), DVAR(rootCap)); - if (leaf == root && !rootCap.isZombie()) { - // this is a revoke, do not delete the root. no more children -> we are done - root->finishRevoke(); - root->unlock(); - root->unlock_prev(); - RETURN(Error::SUCCESS); - } - auto leafCap = leaf->cap(); - ASSERT(leafCap.isZombie()); - if (leafCap.getPtr() == _guarded) { - leaf->unlock(); - leaf->unlock_prev(); - // attempted to delete guarded object - THROW(Error::CYCLIC_DEPENDENCY); - } - auto delRes = leafCap.getPtr()->deleteCap(*leaf, leafCap, *this); - if (delRes) { - leaf->unlink(); - leaf->reset(); - } else { - // Either tried to delete a portal that is currently deleting - // or tried to to delete _guarded via a recursive call. - leaf->unlock(); - leaf->unlock_prev(); - RETHROW(delRes); - } - } else RETURN(Error::SUCCESS); // could not restart, must be done + // compare only value, not the zombie state + if (root->cap().asZombie() != rootCap.asZombie()) { + // start has a new value + // must be the work of another deleter ... success! + RETURN(Error::SUCCESS); // could not restart, must be done + } + if (!_findLeaf(root, rootCap, leaf, leafCap)) continue; + MLOG_ERROR(mlog::cap, "_findLockedLeaf returned", DVAR(*leaf), DVAR(rootCap)); + if (leaf == root && !rootCap.isZombie()) { + // this is a revoke, do not delete the root. no more children -> we are done + root->finishRevoke(); + RETURN(Error::SUCCESS); + } + ASSERT(leafCap.isZombie()); + if (leafCap.getPtr() == _guarded) { + leaf->unlock_cap(); + // attempted to delete guarded object + THROW(Error::CYCLIC_DEPENDENCY); + } + leaf->lock_cap(); + if (leaf->cap() != leafCap) { + MLOG_DETAIL(mlog::cap, "leaf cap changed concurrently"); + leaf->unlock_cap(); + continue; + } + auto delRes = leafCap.getPtr()->deleteCap(*leaf, leafCap, *this); + auto prevres = leaf->lock_prev(); + ASSERT_MSG(prevres, "somebody unlinked CapEntry currently in unlinking process"); + leaf->lock_next(); + if (delRes) { + leaf->unlinkAndUnlockLinks(); + leaf->reset(); + } else { + // deletion failed in the object specific handler + // this can be also from trying to delete rhw guarded object (currently deleting portal) + leaf->unlock_cap(); + RETHROW(delRes); + } } while (leaf != root); // deleted root RETURN(Error::SUCCESS); } - bool RevokeOperation::_startTraversal(CapEntry* root, Cap rootCap) - { - if (!root->lock_prev()) { - // start is currently unlinked - // must be the work of another deleter ... success! - return false; - } - if (root->prev() == root) { - // this is the actual root of the tree - // and has no children - // avoid deadlocks and finish the operation - root->unlock_prev(); - return false; - } - root->lock(); - // compare only value, not the zombie state - if (root->cap().asZombie() != rootCap.asZombie()) { - // start has a new value - // must be the work of another deleter ... success! - root->unlock(); - root->unlock_prev(); - return false; - } - return true; - } - - CapEntry* RevokeOperation::_findLockedLeaf(CapEntry* root) + // not sure if we even need that locking + bool RevokeOperation::_findLeaf(CapEntry* const root, Cap const rootCap, CapEntry*& leafEntry, Cap& leafCap) { - auto curEntry = root; + MLOG_ERROR(mlog::cap, __PRETTY_FUNCTION__, DVAR(root) ); + leafEntry = root; + leafCap = rootCap; while (true) { - auto curCap = curEntry->cap(); - auto nextEntry = curEntry->next(); - // wait for potencially allocated cap to become usable/zombie - Cap nextCap; - for (nextCap = nextEntry->cap(); - nextCap.isAllocated(); - nextCap = nextEntry->cap()) { - ASSERT(!nextEntry->isDeleted()); - hwthread_pause(); + auto nextEntry = leafEntry->next(); + if (nextEntry) { + Cap nextCap; + // wait for potencially allocated cap to become usable/zombie + for (nextCap = nextEntry->cap(); + nextCap.isAllocated(); + nextCap = nextEntry->cap()) { + ASSERT(!nextEntry->isDeleted()); + hwthread_pause(); + } + if (cap::isParentOf(*leafEntry, leafCap, *nextEntry, nextCap)) { + if (!nextEntry->try_kill(nextCap)) { + MLOG_DETAIL(mlog::cap, "cap to be killed changed concurrently"); + return false; + } + // go to next child + leafEntry = nextEntry; + leafCap = nextCap.asZombie(); + continue; + } else return true; + } else { + MLOG_DETAIL(mlog::cap, "found dead end scanning for leaf"); + return false; // restart at root } - if (cap::isParentOf(*curEntry, curCap, *nextEntry, nextCap)) { - // go to next child - curEntry->unlock_prev(); - nextEntry->kill(); - nextEntry->lock(); - curEntry = nextEntry; - continue; - } else return curEntry; } } diff --git a/kernel/objects/capability-spinning/objects/RevokeOperation.hh b/kernel/objects/capability-spinning/objects/RevokeOperation.hh index 6ad3a33c..0ec3c42b 100644 --- a/kernel/objects/capability-spinning/objects/RevokeOperation.hh +++ b/kernel/objects/capability-spinning/objects/RevokeOperation.hh @@ -39,6 +39,29 @@ namespace mythos { +/** + * The revoke operation consists of 2 phases seperated + * by an invalidation broadcast. + * + * 1. synchronous phase removes entries from the capability tree + * 2. delete broadcast ensures there are no more references in stack + * 3. asynchronous phase interacts with a KM to recycle objects + * + * 1. synchronous phase + * + * - Traverses the tree starting from an node (root of that subtree). + * - this traverses the list without locking + * - It finds a leaf, killing all the capabilities inbetween. + * - the leaf is deleted by killing deleteCap of the object (protected by lock_cap). + * if that succeedes, the CapEntry MUST be removed from the tree, as we can't do that twice + * and can't hold lock_cap forever. + * - If there are problems because of concurrent access, the operation + * restarts at the root of the subtree. + * - If we can't fix problems synchronously, we switch to asynch. phase + * without finishing, reporting "Error::RETRY" to the user. + * - a guarded object is the object containing the RevokeOperation, it can't be deleted + * + */ class RevokeOperation : public IResult , protected IDeleter @@ -107,8 +130,7 @@ private: void _deleteObject(Tasklet* t); optional _delete(CapEntry* root, Cap rootCap); - bool _startTraversal(CapEntry* root, Cap rootCap); - CapEntry* _findLockedLeaf(CapEntry* root); + bool _findLeaf(CapEntry* const root, Cap const rootCap, CapEntry*& leaf, Cap& leafCap); void _startAsyncDelete(Tasklet* t); diff --git a/kernel/objects/capability-utils/objects/ops.hh b/kernel/objects/capability-utils/objects/ops.hh index a0cc8a9f..068c3074 100644 --- a/kernel/objects/capability-utils/objects/ops.hh +++ b/kernel/objects/capability-utils/objects/ops.hh @@ -72,10 +72,14 @@ namespace mythos { bool resetReference(CapEntry& dst, const COMMITFUN& fun) { if (!dst.kill()) return false; // not killable (allocated but not usable) - if (!dst.lock_prev()) return true; // was already unlinked, should be empty eventually - dst.lock(); + dst.lock_cap(); + if (!dst.lock_prev()) { + dst.unlock_cap(); + return true; // was already unlinked, should be empty eventually + } + dst.lock_next(); dst.kill(); // kill again because someone might have inserted something usable meanwhile - dst.unlink(); + dst.unlinkAndUnlockLinks(); fun(); // perform some caller-defined action while still in an exclusive state dst.reset(); // this markes the entry as writeable again, leaves exclusive state return true; diff --git a/kernel/objects/memory-amd64/objects/PML4InvalidationBroadcastAmd64.cc b/kernel/objects/memory-amd64/objects/PML4InvalidationBroadcastAmd64.cc index 9129a0b6..1a0cfcac 100644 --- a/kernel/objects/memory-amd64/objects/PML4InvalidationBroadcastAmd64.cc +++ b/kernel/objects/memory-amd64/objects/PML4InvalidationBroadcastAmd64.cc @@ -58,6 +58,7 @@ namespace mythos { } // propagate PML4InvalidationBroadcast* pnext = this->next; + ASSERT(pnext); while (pnext != start && pnext->home->getCR3() != pml4) pnext = pnext->next; if (pnext != start) { MLOG_DETAIL(mlog::cap, "relay pml4 invalidation"); diff --git a/kernel/objects/memory-amd64/objects/PageMapAmd64.cc b/kernel/objects/memory-amd64/objects/PageMapAmd64.cc index a3d88dbb..ad337076 100644 --- a/kernel/objects/memory-amd64/objects/PageMapAmd64.cc +++ b/kernel/objects/memory-amd64/objects/PageMapAmd64.cc @@ -90,27 +90,31 @@ namespace mythos { for (size_t i = num_caps(); i < TABLE_SIZE; ++i) MLOG_INFO(mlog::cap, i, _pm_table(i)); } - optional PageMap::MappedFrame::deleteCap(CapEntry& entry, Cap /*self*/, IDeleter&) + optional PageMap::MappedFrame::deleteCap(CapEntry& entry, Cap self, IDeleter&) { + MLOG_ERROR(mlog::cap, __PRETTY_FUNCTION__, DVAR(entry), DVAR(self), DVARhex(this)); auto idx = &entry - &map->_cap_table(0); MLOG_DETAIL(mlog::cap, "delete mapped Frame", DVAR(idx)); map->_pm_table(idx).reset(); RETURN(Error::SUCCESS); } - optional PageMap::MappedPageMap::deleteCap(CapEntry& entry, Cap /*self*/, IDeleter&) + optional PageMap::MappedPageMap::deleteCap(CapEntry& entry, Cap self, IDeleter&) { + MLOG_ERROR(mlog::cap, __PRETTY_FUNCTION__, DVAR(entry), DVAR(self), DVARhex(this)); auto idx = &entry - &map->_cap_table(0); - MLOG_DETAIL(mlog::cap, "delete mapped PageMap", DVAR(idx)); + MLOG_ERROR(mlog::cap, "delete mapped PageMap", DVAR(idx)); map->_pm_table(idx).reset(); RETURN(Error::SUCCESS); } optional PageMap::deleteCap(CapEntry&, Cap self, IDeleter& del) { + MLOG_ERROR(mlog::cap, __PRETTY_FUNCTION__, self, DVARhex(this)); if (self.isOriginal()) { - MLOG_DETAIL(mlog::cap, "delete PageMap", self); + MLOG_ERROR(mlog::cap, "delete PageMap", self); for (size_t i = 0; i < num_caps(); ++i) { + if(_cap_table(i).next_is_locked()) MLOG_ERROR(mlog::cap, __PRETTY_FUNCTION__, "table is locked and cannot be deleted! -> deadlock detected!"); auto res = del.deleteEntry(_cap_table(i)); ASSERT_MSG(res, "Mapped entries must be deletable."); if (!res) RETHROW(res); @@ -383,7 +387,7 @@ namespace mythos { auto res = visitTables(&_pm_table(0), level(), op); *failaddr = op.failaddr; *faillevel = op.current_level; - RETHROW(res.state()); + RETURN(res.state()); } Error PageMap::invokeInstallMap(Tasklet*, Cap self, IInvocation* msg)