2
\$\begingroup\$

Elm is a pure functional language for the front-end. It enforces an architecture that allows programs to stay pure in an event-based setting.

This simple header-only library implements a variant of the the Elm Architecture for C++17. Its small footprint and simplicity makes it easy to understand and quick to get started.

Features

The architecture supports running commands in parallel as asynchronous tasks, as well as in a immediate fashion.

Example Program

This example utilizes both direct and deferred action modes. It initializes by immediately increasing a conuter, then it manipulates the counter at a later time by deferring commands for later execution. To demonstrate the asynchronicity of the library the user can also enter a number in order to increase or decrease the counter.

#include "elm-architecture/elm-architecture.hpp"

#include <iostream>

namespace elm = elm_architecture;

// Model

struct model_type {
    int counter = 0;
};

// Msg

struct increase {};

struct decrease {};

struct user_increase {
    int value;
};

using message_type = std::variant<increase, decrease, user_increase>;

std::shared_future<message_type>
delayed_increase(std::chrono::milliseconds delay) {
    return std::async(
               std::launch::async,
               [delay]( ) -> message_type {
                   std::this_thread::sleep_for(delay);
                   return increase {};
               })
        .share( );
}

std::shared_future<message_type>
delayed_decrease(std::chrono::milliseconds delay) {
    return std::async(
               std::launch::async,
               [delay]( ) -> message_type {
                   std::this_thread::sleep_for(delay);
                   return decrease {};
               })
        .share( );
}

std::shared_future<message_type>
ask_user( ) {
    return std::async(
               std::launch::async,
               []( ) -> message_type {
                   int amount = 0;
                   std::cin >> amount;
                   return user_increase {amount};
               })
        .share( );
}

// Update

struct update_fn {
    using return_type = elm::return_type<model_type, message_type>;

    static auto
    update(const model_type& mod, const increase&) -> return_type {
        auto next = mod;
        next.counter += 1;
        std::cout << "Increasing counter from " << mod.counter << " to " << next.counter << std::endl;
        return {next, {}};
    }

    static auto
    update(const model_type& mod, const decrease&) -> return_type {
        auto next = mod;
        next.counter -= 1;
        std::cout << "Decreasing counter from " << mod.counter << " to " << next.counter << std::endl;
        return {next, {}};
    }

    static auto
    update(const model_type& mod, const user_increase& msg) -> return_type {
        auto next = mod;
        next.counter += msg.value;
        std::cout << "User increasing counter from " << mod.counter << " to " << next.counter << std::endl;
        return {next, {ask_user( )}};
    }
};

// Event Loop

int
main( ) {
    elm::start_eventloop<model_type, message_type, update_fn>({
        increase {},
        delayed_increase(std::chrono::milliseconds {1500}),
        delayed_decrease(std::chrono::milliseconds {1000}),
        delayed_increase(std::chrono::milliseconds {400}),
        ask_user( ),
    });
}

The library

#pragma once
#include <deque>
#include <future>
#include <variant>
#include <vector>

namespace elm_architecture {

// A command can either be a deferred command (shared_future) or
// invoked directly.
template <typename Model, typename Msg>
using command_type = std::variant<std::shared_future<Msg>, Msg>;

// The return type for the update functions, a new model and a
// list of actions to take after.
template <typename Model, typename Msg>
using return_type = std::tuple<Model, std::vector<command_type<Model, Msg>>>;

// Start the eventloop with a given list of initial actions to take
template <typename Model, typename Msg, typename Update>
auto
start_eventloop(const std::vector<command_type<Model, Msg>>& init = {}) {
    auto                                 model = Model {};
    std::deque<command_type<Model, Msg>> pending{init.begin(), init.end()};
    std::vector<std::shared_future<Msg>> in_progress;

    while(pending.size( ) > 0 || in_progress.size( ) > 0) {
        // Step One: Apply all pending events and remove them
        while(pending.size( ) > 0) {
            const auto& item = pending.front( );
            if(std::holds_alternative<std::shared_future<Msg>>(item)) {
                in_progress.push_back(std::get<std::shared_future<Msg>>(item));
            } else {
                const auto& msg             = std::get<Msg>(item);
                const auto  visitor         = [&model](const auto& msg) { return Update::update(model, msg); };
                auto [next_model, commands] = std::visit(visitor, msg);
                std::copy(commands.begin( ), commands.end( ), std::back_inserter(pending));
                model = next_model;
            }
            pending.pop_front( );
        }

        // Step Two: Pause the loop, the only way we get more events now is by polling
        // until one of the pending events finishes.
        {
            for(auto future = in_progress.begin( ); future != in_progress.end( ); ++future) {
                if(future->wait_for(std::chrono::milliseconds {1}) == std::future_status::ready) {
                    pending.push_back(future->get( ));
                    in_progress.erase(future);
                    break;
                }
            }
        }
    }
}
} // namespace elm_architecture

This project can also be found on GitHub.

\$\endgroup\$

0

You must log in to answer this question.

Start asking to get answers

Find the answer to your question by asking.

Ask question

Explore related questions

See similar questions with these tags.