• A fun mutex, part II...

    From Chris M. Thomasson@3:633/280.2 to All on Mon Aug 18 09:51:27 2025
    Fwiw, here is a total experiment of mine. I think it should work, need
    to verify in Relacy (no time right now), but its a way to have a work
    stack and a critical section at the same time. I really need to add in a try_lock that pops the stack while trying to acquire the mutex. Anyway,
    can go give it a go in your spare time? Thanks. Show me some output?

    Be sure to take careful notice of the ct_stack_mutex struct...

    C++20:
    ___________________________________
    // A Fun Mutex Pattern? Or, a Nightmare? Humm...
    // By: Chris M. Thomasson
    //___________________________________________________


    #include <iostream>
    #include <random>
    #include <numeric>
    #include <algorithm>
    #include <thread>
    #include <atomic>
    #include <mutex>
    #include <string>
    #include <semaphore>


    #define CT_WORKERS (100)
    #define CT_ITERS (1000000)


    static std::atomic<unsigned long> g_ct_work_alloc = { 0 };
    static std::atomic<unsigned long> g_ct_work_dealloc = { 0 };


    #define CT_UNLOCKED (nullptr)
    #define CT_CONTENTION (reinterpret_cast<ct_work*>(0xDEADBEEF))


    struct ct_work
    {
    std::atomic<ct_work*> m_next;
    std::string m_payload;

    ct_work(std::string const& payload)
    : m_next(nullptr),
    m_payload(payload)
    {
    g_ct_work_alloc.fetch_add(1, std::memory_order_relaxed);
    }

    ~ct_work()
    {
    g_ct_work_dealloc.fetch_add(1, std::memory_order_relaxed);
    }

    void
    dump() const
    {
    std::cout << "(" << this << ")->ct_work::m_payload = " <<
    m_payload << "\n";
    }
    };



    struct ct_stack_mutex
    {
    std::atomic<ct_work*> m_head = { CT_UNLOCKED };
    std::binary_semaphore m_wset{ 0 };


    ct_work*
    lock(ct_work* work)
    {
    ct_work* self_work = m_head.exchange(work, std::memory_order_acquire);
    ct_work* local_work = nullptr;

    if (self_work != CT_UNLOCKED || self_work == CT_CONTENTION)
    {
    if (self_work != CT_CONTENTION)
    {
    self_work->m_next.store(local_work, std::memory_order_relaxed);
    local_work = self_work;
    }

    for (;;)
    {
    self_work = m_head.exchange(CT_CONTENTION, std::memory_order_acquire);

    if (self_work != CT_UNLOCKED && self_work != CT_CONTENTION)
    {
    self_work->m_next.store(local_work, std::memory_order_relaxed);
    local_work = self_work;
    }

    if (self_work == CT_UNLOCKED)
    {
    break;
    }

    m_wset.acquire();
    }

    }

    else if (self_work != CT_UNLOCKED)
    {
    self_work->m_next.store(local_work, std::memory_order_relaxed);
    local_work = self_work;
    }

    return local_work;
    }

    ct_work*
    unlock()
    {
    ct_work* self_work = m_head.exchange(CT_UNLOCKED);

    if (self_work == CT_CONTENTION)
    {
    m_wset.release();
    return nullptr;
    }

    return self_work;
    }

    };




    struct ct_shared
    {
    ct_stack_mutex m_mutex;
    unsigned long m_var0 = 0;
    };



    void
    ct_worker_entry(
    ct_shared& shared
    ) {
    //std::cout << "ct_worker_entry" << std::endl; // testing thread
    race for sure...

    {
    for (unsigned long i = 0; i < CT_ITERS; ++i)
    {
    ct_work* w0 = new ct_work("ct_work");

    ct_work* wlock = shared.m_mutex.lock(w0);

    {
    shared.m_var0 += 2;
    }

    ct_work* wunlock = shared.m_mutex.unlock();

    while (wlock)
    {
    ct_work* next = wlock->m_next.load(std::memory_order_relaxed);
    delete wlock;
    wlock = next;
    }

    while (wunlock)
    {
    ct_work* next = wunlock->m_next.load(std::memory_order_relaxed);
    delete wunlock;
    wunlock = next;
    }
    }
    }
    }


    int main()
    {
    // Hello... :^)
    {
    std::cout << "Hello ct_fun_mutex... lol? ;^) ver:(0.0.0)\n";
    std::cout << "By: Chris M. Thomasson\n";
    std::cout << "____________________________________________________\n";
    std::cout.flush();
    }

    // Create our fun things... ;^)
    ct_shared shared = { };
    std::thread workers[CT_WORKERS] = { };

    // Lanuch...
    {
    std::cout << "Launching " << CT_WORKERS << " Threads...\n";
    std::cout.flush();

    for (unsigned long i = 0; i < CT_WORKERS; ++i)
    {
    workers[i] = std::thread(ct_worker_entry, std::ref(shared));
    }
    }

    // Join...
    {
    std::cout << "Joining Threads... (computing :^)\n";
    std::cout.flush();
    for (unsigned long i = 0; i < CT_WORKERS; ++i)
    {
    workers[i].join();
    }
    }



    // Sanity Check...
    {
    std::cout << "shared.m_var0 = " << shared.m_var0 << "\n";
    std::cout << "g_ct_work_alloc = " << g_ct_work_alloc.load(std::memory_order_relaxed) << "\n";
    std::cout << "g_ct_work_dealloc = " << g_ct_work_dealloc.load(std::memory_order_relaxed) << "\n";

    if (g_ct_work_alloc != g_ct_work_dealloc ||
    shared.m_var0 != CT_WORKERS * CT_ITERS * 2)
    {
    std::cout << "\nOh God damn it!!!! ;^o\n";
    }
    }


    // Fin...
    {
    std::cout << "____________________________________________________\n";
    std::cout << "Fin... :^)\n" << std::endl;
    }

    return 0;
    }
    ___________________________________



    --- MBSE BBS v1.1.2 (Linux-x86_64)
    * Origin: A noiseless patient Spider (3:633/280.2@fidonet)
  • From Chris M. Thomasson@3:633/280.2 to All on Mon Aug 18 10:07:51 2025
    On 8/17/2025 4:51 PM, Chris M. Thomasson wrote:
    Fwiw, here is a total experiment of mine. I think it should work, need
    to verify in Relacy (no time right now), but its a way to have a work
    stack and a critical section at the same time. I really need to add in a try_lock that pops the stack while trying to acquire the mutex. Anyway,
    can go give it a go in your spare time? Thanks. Show me some output?

    Be sure to take careful notice of the ct_stack_mutex struct...

    C++20:
    ___________________________________
    [...]
    ct_work*
    unlock()
    {
    ct_work* self_work = m_head.exchange(CT_UNLOCKED);


    ^^^^^^^^^^^^^^^^^^^^^^^^^^^

    UGGGG! That should be using a std::memory_order_acq_rel. I think the
    default is seq_cst. Shit! GRRRR!!!!!!!!!!

    Sorry. I mean it will still work, but the membar of seq_cst is too
    strong. I think that acq_rel is in order because not only are we
    unlocking the mutex, but we are also flushing the atomic stack.


    if (self_work == CT_CONTENTION)
    {
    m_wset.release();
    return nullptr;
    }

    return self_work;
    }
    [...]


    --- MBSE BBS v1.1.2 (Linux-x86_64)
    * Origin: A noiseless patient Spider (3:633/280.2@fidonet)
  • From Chris M. Thomasson@3:633/280.2 to All on Mon Aug 18 10:15:35 2025
    On 8/17/2025 4:51 PM, Chris M. Thomasson wrote:
    Fwiw, here is a total experiment of mine. I think it should work, need
    to verify in Relacy (no time right now), but its a way to have a work
    stack and a critical section at the same time. I really need to add in a try_lock that pops the stack while trying to acquire the mutex. Anyway,
    can go give it a go in your spare time? Thanks. Show me some output?

    Be sure to take careful notice of the ct_stack_mutex struct...

    C++20:
    ___________________________________
    [...]
    struct ct_stack_mutex
    {
    std::atomic<ct_work*> m_head = { CT_UNLOCKED };
    std::binary_semaphore m_wset{ 0 };


    ct_work*
    lock(ct_work* work)
    {
    ct_work* self_work = m_head.exchange(work, std::memory_order_acquire);

    ^^^^^^^^^^^^

    Actually I think that should be acq_rel as well.


    ct_work* local_work = nullptr;

    if (self_work != CT_UNLOCKED || self_work == CT_CONTENTION)
    {
    if (self_work != CT_CONTENTION)
    {
    self_work->m_next.store(local_work, std::memory_order_relaxed);
    local_work = self_work;
    }

    for (;;)
    {
    self_work = m_head.exchange(CT_CONTENTION, std::memory_order_acquire);

    Ditto. Well, we are pushing ct_work nodes into the atomic stack, and
    trying to lock the mutex at the same time. So, acq_rel is in order.
    Humm... That is a bit of an expensive membar, so to speak, well, vs
    seq_cst is better, but humm...


    if (self_work != CT_UNLOCKED && self_work !=
    CT_CONTENTION)
    {
    self_work->m_next.store(local_work, std::memory_order_relaxed);
    local_work = self_work;
    }

    if (self_work == CT_UNLOCKED)
    {
    break;
    }

    m_wset.acquire();
    }

    }

    else if (self_work != CT_UNLOCKED)
    {
    self_work->m_next.store(local_work,
    std::memory_order_relaxed);
    local_work = self_work;
    }

    return local_work;
    }

    Will code it up in Relacy, perhaps tonight.


    [...]


    --- MBSE BBS v1.1.2 (Linux-x86_64)
    * Origin: A noiseless patient Spider (3:633/280.2@fidonet)
  • From Bonita Montero@3:633/280.2 to All on Mon Aug 18 17:19:25 2025
    Am 18.08.2025 um 01:51 schrieb Chris M. Thomasson:
    Fwiw, here is a total experiment of mine. I think it should work, need
    to verify in Relacy (no time right now), but its a way to have a work
    stack and a critical section at the same time. I really need to add in a try_lock that pops the stack while trying to acquire the mutex. Anyway,
    can go give it a go in your spare time? Thanks. Show me some output?

    Be sure to take careful notice of the ct_stack_mutex struct...


    Sorry, you still don't understand that your idea is complete nonsense
    because the "otherwise-task" could be completely omitted. And popping
    a stack alone isn't sth. meaningful.
    Find a paper that describes your idea. I'm pretty sure a lot of people
    had this idea and they've all withdrawn it because of the issues I
    mentioned.
    You seem to be manic. You're focussed on details and you don't see the abstraction levels above that which make ideas like that completely
    gratitious. Take care that it doesn't get worse.

    C++20:
    ___________________________________
    // A Fun Mutex Pattern? Or, a Nightmare? Humm...
    // By: Chris M. Thomasson //___________________________________________________


    #include <iostream>
    #include <random>
    #include <numeric>
    #include <algorithm>
    #include <thread>
    #include <atomic>
    #include <mutex>
    #include <string>
    #include <semaphore>


    #define CT_WORKERS (100)
    #define CT_ITERS (1000000)


    static std::atomic<unsigned long> g_ct_work_alloc = { 0 };
    static std::atomic<unsigned long> g_ct_work_dealloc = { 0 };


    #define CT_UNLOCKED (nullptr)
    #define CT_CONTENTION (reinterpret_cast<ct_work*>(0xDEADBEEF))


    struct ct_work
    {
    std::atomic<ct_work*> m_next;
    std::string m_payload;

    ct_work(std::string const& payload)
    : m_next(nullptr),
    m_payload(payload)
    {
    g_ct_work_alloc.fetch_add(1, std::memory_order_relaxed);
    }

    ~ct_work()
    {
    g_ct_work_dealloc.fetch_add(1, std::memory_order_relaxed);
    }

    void
    dump() const
    {
    std::cout << "(" << this << ")->ct_work::m_payload = " <<
    m_payload << "\n";
    }
    };



    struct ct_stack_mutex
    {
    std::atomic<ct_work*> m_head = { CT_UNLOCKED };
    std::binary_semaphore m_wset{ 0 };


    ct_work*
    lock(ct_work* work)
    {
    ct_work* self_work = m_head.exchange(work, std::memory_order_acquire);
    ct_work* local_work = nullptr;

    if (self_work != CT_UNLOCKED || self_work == CT_CONTENTION)
    {
    if (self_work != CT_CONTENTION)
    {
    self_work->m_next.store(local_work, std::memory_order_relaxed);
    local_work = self_work;
    }

    for (;;)
    {
    self_work = m_head.exchange(CT_CONTENTION, std::memory_order_acquire);

    if (self_work != CT_UNLOCKED && self_work !=
    CT_CONTENTION)
    {
    self_work->m_next.store(local_work, std::memory_order_relaxed);
    local_work = self_work;
    }

    if (self_work == CT_UNLOCKED)
    {
    break;
    }

    m_wset.acquire();
    }

    }

    else if (self_work != CT_UNLOCKED)
    {
    self_work->m_next.store(local_work,
    std::memory_order_relaxed);
    local_work = self_work;
    }

    return local_work;
    }

    ct_work*
    unlock()
    {
    ct_work* self_work = m_head.exchange(CT_UNLOCKED);

    if (self_work == CT_CONTENTION)
    {
    m_wset.release();
    return nullptr;
    }

    return self_work;
    }

    };




    struct ct_shared
    {
    ct_stack_mutex m_mutex;
    unsigned long m_var0 = 0;
    };



    void
    ct_worker_entry(
    ct_shared& shared
    ) {
    //std::cout << "ct_worker_entry" << std::endl; // testing thread
    race for sure...

    {
    for (unsigned long i = 0; i < CT_ITERS; ++i)
    {
    ct_work* w0 = new ct_work("ct_work");

    ct_work* wlock = shared.m_mutex.lock(w0);

    {
    shared.m_var0 += 2;
    }

    ct_work* wunlock = shared.m_mutex.unlock();

    while (wlock)
    {
    ct_work* next = wlock-
    m_next.load(std::memory_order_relaxed);
    delete wlock;
    wlock = next;
    }

    while (wunlock)
    {
    ct_work* next = wunlock-
    m_next.load(std::memory_order_relaxed);
    delete wunlock;
    wunlock = next;
    }
    }
    }
    }


    int main()
    {
    // Hello... :^)
    {
    std::cout << "Hello ct_fun_mutex... lol? ;^) ver:(0.0.0)\n";
    std::cout << "By: Chris M. Thomasson\n";
    std::cout << "____________________________________________________\n";
    std::cout.flush();
    }

    // Create our fun things... ;^)
    ct_shared shared = { };
    std::thread workers[CT_WORKERS] = { };

    // Lanuch...
    {
    std::cout << "Launching " << CT_WORKERS << " Threads...\n";
    std::cout.flush();

    for (unsigned long i = 0; i < CT_WORKERS; ++i)
    {
    workers[i] = std::thread(ct_worker_entry, std::ref(shared));
    }
    }

    // Join...
    {
    std::cout << "Joining Threads... (computing :^)\n";
    std::cout.flush();
    for (unsigned long i = 0; i < CT_WORKERS; ++i)
    {
    workers[i].join();
    }
    }



    // Sanity Check...
    {
    std::cout << "shared.m_var0 = " << shared.m_var0 << "\n";
    std::cout << "g_ct_work_alloc = " << g_ct_work_alloc.load(std::memory_order_relaxed) << "\n";
    std::cout << "g_ct_work_dealloc = " << g_ct_work_dealloc.load(std::memory_order_relaxed) << "\n";

    if (g_ct_work_alloc != g_ct_work_dealloc ||
    shared.m_var0 != CT_WORKERS * CT_ITERS * 2)
    {
    std::cout << "\nOh God damn it!!!! ;^o\n";
    }
    }


    // Fin...
    {
    std::cout << "____________________________________________________\n";
    std::cout << "Fin... :^)\n" << std::endl;
    }

    return 0;
    }
    ___________________________________




    --- MBSE BBS v1.1.2 (Linux-x86_64)
    * Origin: A noiseless patient Spider (3:633/280.2@fidonet)
  • From Chris M. Thomasson@3:633/280.2 to All on Tue Aug 19 05:37:16 2025
    On 8/18/2025 12:19 AM, Bonita Montero wrote:
    Am 18.08.2025 um 01:51 schrieb Chris M. Thomasson:
    Fwiw, here is a total experiment of mine. I think it should work, need
    to verify in Relacy (no time right now), but its a way to have a work
    stack and a critical section at the same time. I really need to add in
    a try_lock that pops the stack while trying to acquire the mutex.
    Anyway, can go give it a go in your spare time? Thanks. Show me some
    output?

    Be sure to take careful notice of the ct_stack_mutex struct...


    Sorry, you still don't understand that your idea is complete nonsense
    because the "otherwise-task" could be completely omitted. And popping
    a stack alone isn't sth. meaningful.

    The act of popping from a stack is doing some work, right? I just wanted
    to see if I could combine a mutex and a stack for fun. Somebody might
    find it useful, but I don't know and don't really care because well, it
    was for fun to begin with. Having discussions about it is also fun.


    Find a paper that describes your idea. I'm pretty sure a lot of people
    had this idea and they've all withdrawn it because of the issues I
    mentioned.

    Oh my. May I remind you that you that you have been totally wrong many
    times in the past about multiple things in our many discussions. You
    were so sure you were correct, but dead wrong! Can you remember some of
    them? Sigh.


    You seem to be manic.

    You are a nice guy... ;^o


    You're focussed on details and you don't see the
    abstraction levels above that which make ideas like that completely gratitious. Take care that it doesn't get worse.

    lol. I made this stack mutex hybrid for fun. Don't project on me.


    [...]


    --- MBSE BBS v1.1.2 (Linux-x86_64)
    * Origin: A noiseless patient Spider (3:633/280.2@fidonet)