# -*- coding: utf-8 -*
import os
import json
import time
import random
import subprocess
import numpy as np
from abc import ABCMeta, abstractmethod
from utils.enums import SystemType, TuningAction
from typing import Tuple, List
from log import Logger
from tqdm import trange

TUNING_PARAMETER = {
    "vm": ["swappiness", "dirty_background_ratio", "dirty_ratio", "dirty_expire_centisecs",
           "dirty_writeback_centisecs"],
    "kernel": ["shmmax", "shmall", "shmmni"],
    "fs": ["file-max", "aio-max-nr"],
    "net": ["core.rmem_default", "core.rmem_max", "core.wmem_default", "core.wmem_max"],
}
ACTIONS = [TuningAction.INCREASE, TuningAction.UNCHANGED, TuningAction.DECREASE]


class Tuner(metaclass=ABCMeta):
    """调优器抽象类"""
    def __init__(self):
        self.logger = Logger(__name__).get_logger()
        # 保证在使用调优器时已开启root权限
        if os.getuid() != 0:
            self.logger.exception("若开启调优模式，终端需要开启root权限")
            exit(1)
        self.timer = time.time()
        self.blunder_threshold = 5  # 调优阈值
        self.upper_limit = {
            "vm": len(TUNING_PARAMETER["vm"]),
            "kernel": len(TUNING_PARAMETER["kernel"]),
            "fs": len(TUNING_PARAMETER["fs"]),
            "net": len(TUNING_PARAMETER["net"])
        }

        cfg_file = "tuning.json"
        path = os.path.join(os.path.abspath("."), "config", cfg_file)
        with open(path, "r", encoding="utf-8") as f:
            content = f.read()
            cfg = json.loads(content)
            self.limit = cfg["limit"]  # 某个系统参数的调整上限/下限
            self.delta = cfg["delta"]  # 每次调整参数的步长
            self.alpha = cfg["qlearning"]["alpha"]  # 学习率
            self.gamma = cfg["qlearning"]["gamma"]  # 折扣因子
            self.epsilon = cfg["qlearning"]["epsilon"]  # 探索率
            self.epochs = cfg["qlearning"]["epochs"]  # 训练次数
            self.rewards = cfg["qlearning"]["rewards"]  # q-learning训练过程中的奖惩参数

        state_num = 0
        self.start_index = {}
        for key, val in TUNING_PARAMETER.items():
            self.start_index[key] = state_num
            state_num += len(val)

        self.q_table = np.zeros((state_num, 3))
        self.log_dir_path = os.path.join(os.path.abspath(".."), "log", "tuning")
        if not os.path.exists(self.log_dir_path):
            os.makedirs(self.log_dir_path)

    @staticmethod
    def query(sys_param: str) -> int:
        """查询某个系统参数"""
        out = subprocess.run(["sysctl", sys_param], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        out = str(out.stdout, encoding='utf-8')
        num = int(out.split("=")[-1])
        return num

    @staticmethod
    def modify(sys_param: str, value: int) -> bool:
        """修改某个系统参数"""
        complete = "=".join([sys_param, str(value)])
        out = subprocess.run(["sysctl", "-w", complete], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        out = str(out.stdout, encoding='utf-8')
        out = out.strip()
        if out == " ".join([sys_param, "=", str(value)]):
            return True
        else:
            return False

    @staticmethod
    def evaluate_cpu() -> float:
        pass

    @staticmethod
    def evaluate_disk(last: List[float], now: List[float]) -> float:
        """
            评估调优后磁盘性能变化，尽量使
                0:r/s增大
                1:rkB/s增大
                6:w/s增大
                7:wkB/s增大
                12:d/s增大
                13:dkB/s增大
                4:r_await降低
                10:w_await降低
                16:d_await降低
                18:aqu-sz降低
        """
        rs_delta = now[0] - last[0]
        rkb_delta = now[1] - last[1]
        ws_delta = now[6] - last[6]
        wkb_delta = now[7] - last[7]
        ds_delta = now[12] - last[12]
        dkb_delta = now[13] - last[13]
        r_await_delta = last[4] - now[4]
        w_await_delta = last[10] - now[10]
        d_await_delta = last[16] - now[16]
        aqusz_delta = last[18] - now[18]
        ave = (rs_delta + rkb_delta + ws_delta + wkb_delta + ds_delta + dkb_delta +
                r_await_delta + w_await_delta + d_await_delta + aqusz_delta) / 10
        return ave * 100

    @staticmethod
    def evaluate_mem(last: Tuple[float, float, float], now: Tuple[float, float, float]) -> float:
        """评估调优后内存性能变化，尽量让交换区空闲率变大"""
        free_swap_delta = now[1] - last[1]
        return (free_swap_delta / last[1]) * 100

    @staticmethod
    def evaluate_net() -> float:
        pass

    def log(self, is_rollback: bool, sys_param: str = None, value: int = None, file: str = "tuning.log") -> None:
        """将每次参数修改都写入某个文件中去"""
        with open(os.path.join(self.log_dir_path, file), "a", encoding="utf-8") as f:
            log_type = "rollback" if is_rollback else "writein"
            sys_param = "blank" if sys_param is None else sys_param
            value = "blank" if value is None else value
            info = " - ".join([log_type, sys_param, str(value)])
            f.write(info + "\n")

    def rollback(self, last: int, file: str = "tuning.log") -> None:
        """将参数根据日志文件进行回滚"""
        try:
            with open(os.path.join(self.log_dir_path, file), "r", encoding="utf-8") as f:
                lines = f.readlines()
                num = 0  # 已回滚几次，共需回滚last次
                roll_num = 0
                for line in reversed(lines):
                    [label, sys_param, value] = line.split(" - ")
                    if label == "rollback":
                        roll_num += 1
                        continue
                    if roll_num != 0:
                        roll_num -= 1
                        continue

                    now = self.query(sys_param)
                    now = now - int(value)  # value可能为负数
                    ok = self.modify(sys_param, now)
                    if not ok:
                        self.logger.error("Modify system parameter failed.")
                        exit(1)

                    num += 1
                    self.log(is_rollback=True)
                    if num == last:  # 也可能出现last过大的情况，但是这种就忽略，因为直接回到了最原始的状态下
                        break
        except FileNotFoundError:
            raise FileNotFoundError("Tuning log not found.")

    def q_learning_system_layer_tune(self):
        """
        以Q-learning算法进行系统层面调优
        :return: None
        """
        self.logger.info("Start building q-learning table...")
        for epoch in trange(self.epochs):
            key = random.choice(list(TUNING_PARAMETER.keys()))
            val = TUNING_PARAMETER[key]
            offset = random.randrange(0, len(val))
            start_index = self.start_index[key]
            param = val[offset]
            delta = self.delta[key][param]
            index = start_index + offset

            if random.uniform(0, 1) < self.epsilon:
                action: TuningAction = random.choice(ACTIONS)
            else:
                maxarg = np.argmax(self.q_table[index])
                action: TuningAction = ACTIONS[maxarg]

            cur = self.query(".".join([key, param]))
            update_val = cur + action.value * delta

            if update_val > self.limit[key][param][1]:
                 update_val = self.limit[key][param][1]
            if update_val < self.limit[key][param][0]:
                 update_val = self.limit[key][param][0]

            self.modify(".".join([key, param]), update_val)

            def rewards():
                performance_change = self.benchmark(None, is_qlearning=True)
                if performance_change < -self.blunder_threshold:
                    reward = self.rewards["worsen_super"]
                elif performance_change < 0:
                    reward = self.rewards["worsen"]
                elif performance_change < self.blunder_threshold:
                    reward = self.rewards["enhance"]
                else:
                    reward = self.rewards["enhance_super"]
                return reward

            time.sleep(5)  # 防止用于测试的线程过多
            reward = rewards()
            action_index = ACTIONS.index(action)
            self.q_table[index][action_index] += self.alpha * (reward + self.gamma * np.max(self.q_table[index]) -
                                                               self.q_table[index][action_index])

        self.logger.info("q-learning table building is done. Start applying tuning.")
        # 训练之后，根据q-table选用最优策略，然后进行参数调整
        for key, val in TUNING_PARAMETER.items():
            start_index = self.start_index[key]
            for param in val:
                offset = val.index(param)
                index = start_index + offset
                action: TuningAction = ACTIONS[np.argmax(self.q_table[index])]
                cur = self.query(param)
                delta = self.delta[key][param]
                update_val = cur + action.value * delta

                if update_val > self.limit[key][param][1]:
                    update_val = self.limit[key][param][1]
                if update_val < self.limit[key][param][0]:
                    update_val = self.limit[key][param][0]

                self.modify(".".join([key, param]), update_val)
        self.logger.info("Applying tuning done.")
        performance_change = self.benchmark(None, is_qlearning=True)
        if performance_change < 0:
            self.logger.info("经调优后，性能降低{}%.".format(performance_change))
        else:
            self.logger.info("经调优后，性能提升{}%.".format(performance_change))

    @abstractmethod
    def plain_system_layer_tune(self, systype: SystemType, num: int) -> None:
        """以朴素算法进行系统层面调优。systype表示修改内核参数的类型，num表示连着修改几个"""
        pass

    @abstractmethod
    def plain_software_layer_tune(self) -> None:
        """以朴素算法针对某个软件层面调优"""
        pass

    @abstractmethod
    def benchmark(self, num: int or None, is_qlearning: bool) -> None or float:
        """对每次的调优结果进行测试"""
        pass

    @abstractmethod
    def _init_benchmark(self) -> None:
        """调优前首次进行一下基础测试"""
        pass
