import threading
from concurrent.futures import ThreadPoolExecutor

import const_define
import sys
from config_reader import Node, Config, DB
import json
import models
import math
from utils import debug_print, gen_record_id
from priority_queue import PriorityQueue
import time
from singleton import singleton
import logging
import const
from sqlalchemy.orm import sessionmaker
import sqlalchemy
from schedule import Worker
import traceback


# every progress from the start node to the final node is represented as a soldier with a node record
class Soldier:
    def __init__(self, node_record):
        logging.info("[fsm: battle] soldier {} created".format(node_record.record_id))
        self.node_record = node_record

    def battle(self, context):
        logging.info("[fsm: battle] soldier {} starts to battle".format(self.node_record.record_id))
        # while self.node_record.node.type != const.FINAL_NODE:

        # check if the record is occupied
        if self.node_record.lock_time != 0:
            logging.warning("[fsm: battle] soldier {} is locked".format(self.node_record.record_id, ))
            return

        self.node_record.exec_times += 1
        self.node_record.lock_time = int(time.time())

        logging.info("[fsm: battle] soldier {} battle in node {}".format(self.node_record.record_id,
                                                                         self.node_record.node.name))
        node_class = sys.modules.get(self.node_record.node.name)(context)
        if self.node_record.node_status == const.NODE_PROCESSING_STATUS:
            return
        elif self.node_record.node_status == const.NODE_INIT_STATUS:
            result = node_class.do()
        elif self.node_record.node_status == const.NODE_RETRY_STATUS:
            result = node_class.retry()
        else:
            raise Exception(
                "[fsm: battle] new_node status error, new_node status is ", self.node_record.node_status)

        node_status_before = self.node_record.node_status

        if result == const.NODE_SUCCESS:
            self.node_record.node_status = const.NODE_INIT_STATUS
            self.node_record.node = Config().node_info[self.node_record.node.jump_node]
        elif result == const.NODE_RETRY:
            self.node_record.node_status = const.NODE_RETRY_STATUS
            self.node_record.get_next_exectime()
        elif result == const.NODE_FAIL:
            self.node_record.node_status = const.NODE_INIT_STATUS
            self.node_record.node = Config().node_info[self.node_record.node.fail_node]
        else:
            raise Exception(
                "[fsm: battle] new_node result error, new_node result is ", result)

        logging.info("[fsm: battle] soldier {} battle complete, node_status_before: {}, node_status_after: {}"
                     .format(self.node_record.record_id, node_status_before, self.node_record.node_status))

        # unlock the record before syncing
        self.node_record.lock_time = 0
        # execution times also resets when the result becomes success
        if self.node_record.node_status == const.NODE_INIT_STATUS:
            self.node_record.exec_times = 0
        self.node_record.sync()


class NodeRecord:
    def __init__(self, new_node, node_status):
        self.node = new_node
        self.node_status = node_status
        self.lock_time = 0
        self.exec_times = 0
        self.next_exec_time = int(time.time())
        self.record_id = gen_record_id()

    def get_next_exectime(self):
        interval = int(math.pow(2, self.exec_times))
        self.next_exec_time = int(time.time()) + interval
        logging.info("[fsm: get_next_exectime] soldier {} next_exec_time is {}"
                     .format(self.record_id, self.next_exec_time))

    def sync(self):
        # try to find a record. if exists, update it, else create a new record.
        records = DB().session.query(models.FsmRecord).filter(models.FsmRecord.record_id == self.record_id).all()
        if len(records) == 1:
            record = DB().session.query(models.FsmRecord).filter(models.FsmRecord.record_id == self.record_id).first()
            record.cur_node = self.node.name
            record.lock_time = self.lock_time
            record.exec_times = self.exec_times
            record.next_exec_time = self.next_exec_time
            record.status = self.node_status
            DB().session.commit()
        elif len(records) == 0:
            record = models.FsmRecord(
                record_id=self.record_id,
                cur_node=self.node.name,
                lock_time=self.lock_time,
                exec_times=self.exec_times,
                next_exec_time=self.next_exec_time,
                status=self.node_status,
                mode=const.LOCAL_MODE
            )
            DB().session.add(record)
            DB().session.commit()
        else:
            raise Exception("[fsm: sync] record {} exists more than one, error".format(self.record_id))


# the General's duty's to init every Soldier's configuration, and send the Soldier to battle.
# You can send one Soldier from the start new_node or from one new_node in the middle somewhere.


@singleton
class General:
    def __init__(self, path):
        self.config = None
        self.start_node = None
        self.new_node = None
        const_define.Consts()
        self.config_path = path
        self.read_config(self.config_path)
        # enable local push
        start_local_push()
        # enable global push
        Worker()

    def read_config(self, file):
        self.config = Config()
        with open(file, 'r') as f:
            loaded_json_data = json.load(f)
            for node_config in loaded_json_data[const.NODES]:
                new_node = Node()
                new_node.name = node_config[const.NAME]
                new_node.type = node_config[const.TYPE]

                if new_node.type == const.FINAL_NODE:
                    self.config.nodes.append(new_node)
                    continue

                if new_node.type == const.START_NODE:
                    self.start_node = new_node

                new_node.jump_node = node_config[const.JUMP_NODE]
                new_node.retry_node = node_config[const.RETRY_NODE]
                new_node.fail_node = node_config[const.FAIL_NODE]
                self.config.nodes.append(new_node)
            self.config.max_workers = loaded_json_data[const.MAX_WORKER]
            self.config.check_config_params()
        f.close()

    def bootstrap(self):
        new_node_record = NodeRecord(self.start_node, const.NODE_INIT_STATUS)
        new_node_record.sync()
        LocalPusher().local_queue.push(new_node_record, new_node_record.next_exec_time)


def push_soldier(node_record):
    veteran_soldier = Soldier(node_record)
    context = {'node_name': node_record.node.name}
    try:
        veteran_soldier.battle(context)
        if veteran_soldier.node_record.node.type == const.FINAL_NODE:
            logging.info("[fsm: battle] soldier {} completed battle in node {}"
                         .format(veteran_soldier.node_record.record_id,
                                 veteran_soldier.node_record.node.name))
        else:
            LocalPusher().local_queue.push(veteran_soldier.node_record, veteran_soldier.node_record.next_exec_time)
    except Exception as e:
        logging.error("[fsm: battle] soldier {} error: {}, throwing to global pusher"
                      .format(veteran_soldier.node_record.record_id, e))
        # if error, update the record's mode into global mode
        DB().session.query(models.FsmRecord).filter(models.FsmRecord.record_id == node_record.record_id) \
            .update({'mode': const.GLOBAL_MODE})


def start_local_push():
    # start a thread for LocalPusher
    logging.info("[fsm: local_push] local push thread begins to work in loop")
    threading.Thread(target=LocalPusher().loop_work, daemon=True).start()


@singleton
class LocalPusher:
    def __init__(self):
        self.local_queue = PriorityQueue()
        self.engine = sqlalchemy.create_engine(DB().engine_url, echo=False)
        self.session = sessionmaker(bind=self.engine)()

    def loop_work(self):

        while True:
            if self.local_queue.len() == 0:
                logging.debug("[fsm: local_push] local push queue is empty, sleep 1 second")
                time.sleep(1)
                continue
            node_record = self.local_queue.top()
            if node_record.next_exec_time > int(time.time()):
                logging.info("[fsm: local_push] local push queue top is not ready, sleep 1 second")
                time.sleep(1)
                continue
            else:
                self.local_queue.pop()
                logging.info("[fsm: local_push] local push queue is not empty, get the first record {}"
                             .format(node_record.record_id))
                push_soldier(node_record)
                time.sleep(1)


def start_global_push():
    logging.info("[fsm: global_push] global push thread begins to work in loop")
    threading.Thread(target=GlobalPusher().loop_work, daemon=True).start()


@singleton
class GlobalPusher:
    def __init__(self):
        self.engine = sqlalchemy.create_engine(DB().engine_url, echo=False)
        self.session = sessionmaker(bind=self.engine)()

    def loop_work(self, sharding_range_start, sharding_range):
        logging.info("[fsm: global_push] global push thread begins to work in loop, starting from {} to {}"
                     .format(sharding_range_start, sharding_range_start + sharding_range))
        while True:
            # only take records starting from sharding_range_start with a range of sharding_range
            records = self.session.query(models.FsmRecord).filter(models.FsmRecord.lock_time == 0,
                                                                  models.FsmRecord.next_exec_time < int(time.time()),
                                                                  models.FsmRecord.status != const.NODE_INIT_STATUS,
                                                                  models.FsmRecord.Mode == const.GLOBAL_MODE) \
                .limit(sharding_range).offset(sharding_range_start)
            self.session.commit()
            for record in records:
                debug_print(record.record_id, record.status, record.next_exec_time, record.lock_time)

            if len(records) == 0:
                logging.debug("[fsm: global_push] no records for global pushing, sleep 1 second")
                time.sleep(1)
                continue

            for record in records:
                node = Config().node_info[record.cur_node]
                if node.type == const.FINAL_NODE:
                    continue
                node_record = NodeRecord(node, record.status)
                node_record.lock_time = record.lock_time
                node_record.exec_times = record.exec_times
                node_record.next_exec_time = record.next_exec_time
                node_record.record_id = record.record_id
                push_soldier(node_record)

            time.sleep(1)
