import datetime
import pickle
import time
import os

import requests
from itertools import *

import joblib
from base_funcs import *

_compute_service_url = "http://localhost:5000/"


def grid_search_exa(iter_grid, func, params: tuple) -> list:
    res = requests.post(url=os.path.join(_compute_service_url, "grid_search_exa"),
                        json={
                            "func": func.__name__,
                            "params": pickle.dumps(params),
                            "iter_grid": pickle.dumps(iter_grid)
                        })
    return res.json()["results"]


def linear_search_exa(iter_arr, func, params: tuple) -> list:
    res = requests.post(url=os.path.join(_compute_service_url, "linear_search_exa"),
                        json={
                            "func": func.__name__,
                            "params": pickle.dumps(params),
                            "iter_arr": pickle.dumps(iter_arr)
                        })
    return res.json()["results"]


# 基础算子
def psi_1T(v_1, v_2, v_3, e, z):
    return h_e * E(I(v_1 - e - D_t), p_D_t) \
        + b * E(I(D_t - v_1 + e), p_D_t) \
        + c_e * (v_1 - e) \
        + c_m * (z - v_3) \
        + h_m * (v_3 - v_1) \
        + theta * e


def psi_2T(v_2, v_3, y, z):
    return h_e * E(I(y - D_t), p_D_t) \
        + b * E(I(D_t - y), p_D_t) \
        + c_e * y \
        + c_m * (z - v_3) \
        + h_m * (v_3 - y)


def psi_3T(v_3, y, z):
    return h_e * E(I(y - D_t), p_D_t) \
        + b * E(I(D_t - y), p_D_t) \
        + c_e * y \
        + c_m * (z - v_3) \
        + h_m * (v_3 - y)


# psi_1t
def psi_1t(v_1, v_2, v_3, e, z):
    # 先计算基础值
    val = psi_1T(v_1, v_2, v_3, e, z)
    # 再从缓存寻找上一阶段的 f_t+1
    last_cache = global_cache[epoch]
    _D_t = [last_cache[f"psi_1t_f_{v_1 - e - D_t_i}_{v_2 - e - D_t_i}_{v_3 - e - D_t_i}_{z - e - D_t_i}"]["value"] for
            D_t_i in D_t]
    return val + alpha * E(_D_t, p_D_t)


# psi_2t
def psi_2t(v_2, v_3, y, z):
    val = psi_2T(v_2, v_3, y, z)
    last_cache = global_cache[epoch]
    _D_t = [last_cache[f"psi_2t_f_{y - D_t_i}_{v_2 - D_t_i}_{v_3 - D_t_i}_{z - D_t_i}"]["value"] for D_t_i in D_t]
    return val + alpha * E(_D_t, p_D_t)


# psi_3t
def psi_3t(v_3, y, z):
    val = psi_3T(v_3, y, z)
    last_cache = global_cache[epoch]
    _D_t = [last_cache[f"psi_3t_f_{y - D_t_i}_{y - D_t_i}_{v_3 - D_t_i}_{z - D_t_i}"]["value"] for D_t_i in D_t]
    return val + alpha * E(_D_t, p_D_t)


def phi_1t(v_0, v_1, v_2, v_3, K_t_i):
    e_range = range(0, v_1 - v_0 + 1)
    z_range = range(v_3, v_3 + K_t_i)
    e_z_grid = product(e_range, z_range)
    psi_1t_values = grid_search_exa(e_z_grid, psi_1t, (v_1, v_2, v_3))
    return - c_e * v_0 + np.min(psi_1t_values)


def phi_2t(v_0, v_1, v_2, v_3, K_t_i):
    y_range = range(v_1, v_2 + 1)
    z_range = range(v_3, v_3 + K_t_i)
    y_z_grid = product(y_range, z_range)
    psi_2t_values = grid_search_exa(y_z_grid, psi_2t, (v_2, v_3))
    return - c_e * v_0 + np.min(psi_2t_values)


def phi_3t(v_0, v_1, v_2, v_3, K_t_i):
    y_range = range(v_2, v_3 + 1)
    z_range = range(v_3, v_3 + K_t_i)
    y_z_grid = product(y_range, z_range)
    psi_3t_values = grid_search_exa(y_z_grid, psi_3t, (v_3,))
    return - c_e * v_0 + np.min(psi_3t_values)


def f_t(v_0, v_1, v_2, v_3):
    # print(v_0, v_1, v_2, v_3)
    _K_t = []
    for K_t_i in K_t:
        exp_1_value = phi_1t(v_0, v_1, v_2, v_3, K_t_i)
        exp_2_value = phi_2t(v_0, v_1, v_2, v_3, K_t_i)
        exp_3_value = phi_3t(v_0, v_1, v_2, v_3, K_t_i)
        # print(exp_1_value, exp_2_value, exp_3_value)
        _K_t.append(np.min([exp_1_value, exp_2_value, exp_3_value]))
    return E(_K_t, p_K_t)



def init_base_model(_T=3, _v_0=1, _v_1=4, _v_2=6, _v_3=8, _p=19, _b=19, _c_e=10, _c_m=2, _h_e=1, _h_m=0.2):
    global alpha, K_t, p_K_t, D_t, p_D_t, p, b, c_e, c_m, h_e, h_m, theta
    global epoch, max_epoch, global_cache
    # Variable
    alpha = 0.95
    # Kt
    mu_K_t = 6  # 均值
    cv_K_t = 0.3  # 变异系数
    sigma_K_t = cv_K_t * mu_K_t  # 标准差
    K_t = np.array([4, 5, 6, 7, 8])
    p_K_t = p_norm(K_t, mu_K_t, sigma_K_t)
    # Dt
    mu_t = 5
    sigma_t = 0.5
    D_t = np.array(list(range(0, 10)))
    p_D_t = p_norm(D_t, mu_t, sigma_t)
    p = _p
    b = _b
    c_e = _c_e
    c_m = _c_m
    h_e = _h_e
    h_m = _h_m
    theta = h_m / (1 - alpha)
    # theta = 12
    print(f"c_m: {c_m}, h_e: {h_e}, h_m: {h_m}, theta: {theta}")

    epoch = 1
    max_epoch = _T - 1
    v_0 = _v_0
    v_1 = _v_1
    v_2 = _v_2
    v_3 = _v_3

    # 初始化进程池
    res = requests.get(url=os.path.join(_compute_service_url, "init_service"))
    print(res.json()["message"])

    # 初始化 global_cache
    res = requests.post(url=os.path.join(_compute_service_url, "init_model"),
                        data=pickle.dumps({
                            "T": _T,
                            "v_0": _v_0,
                            "v_1": _v_1,
                            "v_2": _v_2,
                            "v_3": _v_3,
                            "alpha": alpha,
                            "p": p,
                            "b": b,
                            "theta": theta,
                            "c_e": c_e,
                            "c_m": c_m,
                            "h_e": h_e,
                            "h_m": h_m,
                            "K_t": K_t,
                            "p_K_t": p_K_t,
                            "D_t": D_t,
                            "p_D_t": p_D_t
                        }))
    print(res.json()["message"])

    # 在模型初始化之后, 需要逐个填充对应的缓存
    calc_begin_time = time.perf_counter()

    epoch = max_epoch
    # while True:
    #     if epoch == 1:
    #         res = f_t(v_0, v_1, v_2, v_3)
    #         print(res)
    #         break
    #
    #     def func(key):
    #         params = global_cache[epoch - 1][key]["params"]
    #         return {"key": key, "value": f_t(*params)}
    #
    #     pool = ThreadPoolExecutor(max_workers=_max_worker)
    #     tasks = [pool.submit(func, key) for key in global_cache[epoch - 1].keys()]
    #     results = [t.result() for t in as_completed(tasks)]
    #     for result in results:
    #         global_cache[epoch - 1][result["key"]]["value"] = result["value"]
    #
    #     epoch = epoch - 1
    #
    # calc_end_time = time.perf_counter()
    # print(f"计算完成, 耗时 {calc_end_time - calc_begin_time: .2f} 秒")
    # joblib.dump(global_cache,
    #             f"caches/global_cache_T={max_epoch + 1}_time={datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')}.pkl")
    return res


if __name__ == '__main__':
    print(init_base_model(_T=3, _v_0=1, _v_1=8, _v_2=12, _v_3=15))
    # T = 3, 30s
    # T = 4, 970s
    # T = 5,
    # T = 6,
    # T = 7, 20h
