// Copyright (c) 2021 RonxBulld
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.

#ifndef YAMQ_YAMQ_H
#define YAMQ_YAMQ_H

#include <iostream>
#include <map>
#include <mutex>
#include <set>
#include <vector>
#include <thread>
#include <queue>
#include <future>
#include <functional>
#include <atomic>
#include <type_traits>
#include <memory>

namespace yamq {
    class KVdb : public std::map<std::string, std::string> {
    private:
        const std::string empty_holder_;
    public:
        const std::string &operator[](const std::string &key) const {
            auto found = this->find(key);
            if (found != this->end()) {
                return found->second;
            } else {
                return empty_holder_;
            }
        }
        std::string &operator[](const std::string &key) {
            return std::map<std::string, std::string>::operator[](key);
        }
    };

    class ThreadPool {
    private:
        void IncreaseWorker() {
            workers_.emplace_back([this]{
              while (true) {
                  std::function<void()> task;
                  {
                      std::unique_lock<std::mutex> lock(this->queue_mutex_);
                      cv_.wait(lock, [this] { return this->stop_ || !this->tasks_queue_.empty(); });
                      if (this->stop_ && this->tasks_queue_.empty()) {
                          return;
                      }
                      task = std::move(this->tasks_queue_.front());
                      this->tasks_queue_.pop();
                      if (this->auto_extend_ && (this->worker_used_ >= (this->workers_.size() - 3))) {
                          for (unsigned idx = 0, count = unsigned(std::max((float)this->workers_.size() * 0.5f, 3.0f));
                               idx < count;
                               ++idx) {
                              this->IncreaseWorker();
                          }
                      }
                      this->worker_max_used_.store(std::max(worker_max_used_.load(), this->worker_used_.load() + 1));
                  }
                  this->worker_used_++;
                  task();
                  this->worker_used_--;
              }
            });
        }
    public:
        inline explicit ThreadPool(size_t max_threads, bool auto_extend = false)
                : stop_(false), auto_extend_(auto_extend) {
            worker_used_.store(0);
            worker_max_used_.store(0);
            for (size_t i = 0; i < max_threads; ++i) {
                IncreaseWorker();
            }
        }

        template <typename F, typename ... Args, typename R = typename std::result_of<F(Args...)>::type>
        inline std::future<R> enqueue(F &&f, Args && ... args) {
            auto task = std::make_shared<std::packaged_task<R()>>
                    (std::bind(std::forward<F>(f), std::forward<Args>(args)...));
            std::future<R> res = task->get_future();
            {
                std::unique_lock<std::mutex> lock(queue_mutex_);
                if (!stop_) {
                    tasks_queue_.template emplace([task](){ (*task)(); });
                }
            }
            cv_.notify_one();
            return res;
        }

        inline void NotifyAllExit() {
            {
                std::unique_lock<std::mutex> lock(queue_mutex_);
                stop_ = true;
            }
            cv_.notify_all();
            for (std::thread &worker : workers_) {
                worker.join();
            }
        }

        inline ~ThreadPool() {
            NotifyAllExit();
            std::cout << Report() << std::endl;
        }

        inline std::string Report() const {
            std::string report;
            report.append("Worker used rate: ")
                  .append(std::to_string(worker_max_used_.load()))
                  .append(" / ")
                  .append(std::to_string(workers_.size()));
            return report;
        }

    private:
        std::vector<std::thread> workers_;
        std::queue<std::function<void()>> tasks_queue_;
        std::mutex queue_mutex_;
        std::condition_variable cv_;
        bool stop_;
        bool auto_extend_;
        std::atomic_uint worker_used_{}, worker_max_used_{};
    };

    class PubSub;
    class ObserverBase;

    class ObserverBase {
    private:
        std::set<PubSub *> pubsub_set_;
        std::mutex pssmtx_, rmtx_;
        std::atomic_bool runable_{true};
        using lg = std::lock_guard<std::mutex>;
    private:
        void NotifyStop(PubSub *pub_sub) ;
        void NotifyAllStop() {
            for (PubSub *ps : pubsub_set_) {
                NotifyStop(ps);
            }
        }
    public:
        void InvokeWrapper(const KVdb &kvdb) {
            lg run_lock(rmtx_);
            if (runable_.load()) {
                this->Invoke(kvdb);
            }
        }
        virtual void Invoke(const KVdb &kvdb) = 0 ;

        void RegisterPubSub(PubSub &pub_sub) {
            lg lock(pssmtx_);
            pubsub_set_.insert(&pub_sub);
        }
        void UnregisterPubSub(PubSub *pub_sub) {
            lg lock(pssmtx_);
            pubsub_set_.erase(pub_sub);
        }

        void Disconnect(PubSub *pub_sub) {
            lg lock(pssmtx_);
            NotifyStop(pub_sub);
            pubsub_set_.erase(pub_sub);
        }
        void DisconnectAll() {
            lg lock(pssmtx_);
            NotifyAllStop();
            pubsub_set_.clear();
        }
        void WaitAndDisable() {
            lg run_lock(rmtx_);
            runable_.store(false);
        }
        virtual ~ObserverBase() {
            DisconnectAll();
            bool locked = rmtx_.try_lock();
            runable_.store(false);
            if (locked) rmtx_.unlock();
        }
    };

    template <typename T>
    class GenericObserver : public ObserverBase {
    private:
        T pred_;
    public:
        explicit GenericObserver(T pred) : pred_(pred) {}
        void Invoke(const KVdb &kvdb) override {
            pred_(kvdb);
        }
        ~GenericObserver() override = default;
    };

    class PubSub {
    private:
        std::set<std::shared_ptr<ObserverBase>> created_inside_;
        std::map<std::string, std::set<ObserverBase *>> pool_;
        std::mutex pmtx_;
        using lg = std::lock_guard<std::mutex>;
        ThreadPool *publish_thp_;
    private:
        void NotifyAllStop() ;
        template <bool IsObserver> struct subscriber ;
    public:
        PubSub() {
            publish_thp_ = new ThreadPool(32);
        }
        ~PubSub() {
            lg lock(pmtx_);

            delete publish_thp_;
            publish_thp_ = nullptr;

            NotifyAllStop();
            pool_.clear();
            created_inside_.clear();
        }
        void TerminalOffline(ObserverBase *terminal) {
            lg lock(pmtx_);
            for (auto &item : pool_) {
                item.second.erase(terminal);
            }
        }
        template <typename T>
        void Subscribe(const std::string &uri, T && pred) {
            lg lock(pmtx_);
            using PredDecay = typename std::decay<T>::type;
            ObserverBase *observer =
                    subscriber<std::is_convertible<PredDecay *, ObserverBase*>::value>::getPtr(*this, pred);
            if (observer) {
                pool_[uri].insert(observer);
                observer->RegisterPubSub(*this);
            }
        }
        void Publish(const std::string &uri, const KVdb &msg) {
            lg lock(pmtx_);
            auto slot_it = pool_.find(uri);
            if (slot_it != pool_.end()) {
                for (ObserverBase *ob : slot_it->second) {
                    publish_thp_->enqueue([ob, msg, this](){
                        ob->InvokeWrapper(msg);
                    });
                }
            }
        }
    };

    template <> struct PubSub::subscriber<true> {
        static ObserverBase *getPtr(PubSub &pubsub, ObserverBase &observer) {
            return &observer;
        }
    };
    template <> struct PubSub::subscriber<false> {
        template <typename Callable>
        static ObserverBase *getPtr(PubSub &pubsub, Callable && pred) {
            std::shared_ptr<ObserverBase> generic_ob =
                    std::make_shared<GenericObserver<Callable>>(std::forward<Callable>(pred));
            pubsub.created_inside_.insert(generic_ob);
            return generic_ob.get();
        }
    };

    void ObserverBase::NotifyStop(PubSub *pub_sub) {
        pub_sub->TerminalOffline(this);
    }
    void PubSub::NotifyAllStop() {
        for (auto &slot : pool_) {
            for (ObserverBase *ob : slot.second) {
                ob->UnregisterPubSub(this);
            }
            slot.second.clear();
        }
    }

}

#endif//YAMQ_YAMQ_H
