import numpy as np
from itertools import *
from functools import *
import operator
import joblib
import datetime
from scipy.stats import norm
import time


# 最大值函数
def I(x: np.ndarray):
    t = np.zeros([x.shape[0], 2])
    t[:, 0] = x
    return np.max(t, axis=1)


# 期望函数
def E(x: np.ndarray, p: np.ndarray):
    return np.dot(x, p)


# 依照正态分布计算概率值
def p_norm(x: np.ndarray, mu, sigma):
    init_arr = norm.cdf(x, mu, sigma)
    res_arr = np.zeros(x.shape[0])
    res_arr[0] = init_arr[0]
    res_arr[1:] = np.diff(init_arr)
    res_arr[-1] = 1 - init_arr[-1]
    return res_arr


# Variable
T = 3
alpha = 0.95
# Kt
mu_K_t = 6  # 均值
cv_K_t = 0.3  # 变异系数
sigma_K_t = cv_K_t * mu_K_t  # 标准差
K_t = np.array([4, 5, 6, 7, 8])
p_K_t = p_norm(K_t, mu_K_t, sigma_K_t)
# Dt
mu_t = 5
sigma_t = 0.5
D_t = np.array(list(range(0, 10)))
p_D_t = p_norm(D_t, mu_t, sigma_t)
p = 19
b = 19
c_e = 10
c_m = 2
h_e = 1
h_m = 0.2
theta = h_m / (1 - alpha)

# 多线程优化模块 (有线程池)
from concurrent.futures import ThreadPoolExecutor, as_completed
import multiprocessing
from pathos.multiprocessing import ProcessPool

_max_worker = 10


def grid_search_exa(iter_grid, func, params: tuple) -> list:
    pool = ThreadPoolExecutor(max_workers=_max_worker)
    tasks = [pool.submit(func, *params, x, y) for x, y in iter_grid]
    results = [t.result() for t in as_completed(tasks)]
    return results


def linear_search_exa(iter_arr, func, params: tuple) -> list:
    pool = ThreadPoolExecutor(max_workers=_max_worker)
    tasks = [pool.submit(func, *params, x) for x in iter_arr]
    results = [t.result() for t in as_completed(tasks)]
    return results


# 基础算子
def psi_1T(v_1, v_2, v_3, e, z):
    return h_e * E(I(v_1 - e - D_t), p_D_t) \
           + b * E(I(D_t - v_1 + e), p_D_t) \
           + c_e * (v_1 - e) \
           + c_m * (z - v_3) \
           + h_m * (v_3 - v_1) \
           + theta * e


def psi_2T(v_2, v_3, y, z):
    return h_e * E(I(y - D_t), p_D_t) \
           + b * E(I(D_t - y), p_D_t) \
           + c_e * y \
           + c_m * (z - v_3) \
           + h_m * (v_3 - y)


def psi_3T(v_3, y, z):
    return h_e * E(I(y - D_t), p_D_t) \
           + b * E(I(D_t - y), p_D_t) \
           + c_e * y \
           + c_m * (z - v_3) \
           + h_m * (v_3 - y)


# psi_1t
def psi_1t(v_1, v_2, v_3, e, z):
    # 先计算基础值
    val = psi_1T(v_1, v_2, v_3, e, z)
    # 再从缓存寻找上一阶段的 f_t+1
    last_cache = global_cache[epoch]
    _D_t = [last_cache[f"psi_1t_f_{v_1 - e - D_t_i}_{v_2 - e - D_t_i}_{v_3 - e - D_t_i}_{z - e - D_t_i}"]["value"] for
            D_t_i in D_t]
    return val + alpha * E(_D_t, p_D_t)


# psi_2t
def psi_2t(v_2, v_3, y, z):
    val = psi_2T(v_2, v_3, y, z)
    last_cache = global_cache[epoch]
    _D_t = [last_cache[f"psi_2t_f_{y - D_t_i}_{v_2 - D_t_i}_{v_3 - D_t_i}_{z - D_t_i}"]["value"] for D_t_i in D_t]
    return val + alpha * E(_D_t, p_D_t)


# psi_3t
def psi_3t(v_3, y, z):
    val = psi_3T(v_3, y, z)
    last_cache = global_cache[epoch]
    _D_t = [last_cache[f"psi_3t_f_{y - D_t_i}_{y - D_t_i}_{v_3 - D_t_i}_{z - D_t_i}"]["value"] for D_t_i in D_t]
    return val + alpha * E(_D_t, p_D_t)


# psi_1t(1, 2, 3, 0, 4)
# psi_2t(3, 4, 2, 4)
# psi_3t(4, 3, 4)

def phi_1t(v_0, v_1, v_2, v_3, K_t_i):
    e_range = range(0, v_1 - v_0 + 1)
    z_range = range(v_3, v_3 + K_t_i)
    e_z_grid = product(e_range, z_range)
    psi_1t_values = grid_search_exa(e_z_grid, psi_1t, (v_1, v_2, v_3))
    return - c_e * v_0 + np.min(psi_1t_values)


def phi_2t(v_0, v_1, v_2, v_3, K_t_i):
    y_range = range(v_1, v_2 + 1)
    z_range = range(v_3, v_3 + K_t_i)
    y_z_grid = product(y_range, z_range)
    psi_2t_values = grid_search_exa(y_z_grid, psi_2t, (v_2, v_3))
    return - c_e * v_0 + np.min(psi_2t_values)


def phi_3t(v_0, v_1, v_2, v_3, K_t_i):
    y_range = range(v_2, v_3 + 1)
    z_range = range(v_3, v_3 + K_t_i)
    y_z_grid = product(y_range, z_range)
    psi_3t_values = grid_search_exa(y_z_grid, psi_3t, (v_3,))
    return - c_e * v_0 + np.min(psi_3t_values)


def f_t(v_0, v_1, v_2, v_3):
    def func(K_t_i):
        exp_1_value = phi_1t(v_0, v_1, v_2, v_3, K_t_i)
        exp_2_value = phi_2t(v_0, v_1, v_2, v_3, K_t_i)
        exp_3_value = phi_3t(v_0, v_1, v_2, v_3, K_t_i)
        return [exp_1_value, exp_2_value, exp_3_value]

    _K_t = [np.min(func(K_t_i)) for K_t_i in K_t]
    return E(_K_t, p_K_t)


# Hint: 生成 psi_1t 在 T = 1, 2, 3 阶段所需要计算的全部 f_t
def psi_1t_init(v_0, v_1, v_2, v_3):
    psi_f_dict = {}
    for K_t_i in K_t:
        e = range(0, v_1 - v_0 + 1)
        z = range(v_3, v_3 + K_t_i + 1)
        for D_t_i in D_t:
            for _e, _z in product(e, z):
                psi_f_dict[f"psi_1t_f_{v_1 - _e - D_t_i}_{v_2 - _e - D_t_i}_{v_3 - _e - D_t_i}_{_z - _e - D_t_i}"] = {
                    "value": 0,
                    "params": (v_1 - _e - D_t_i, v_2 - _e - D_t_i, v_3 - _e - D_t_i, _z - _e - D_t_i)
                }
    return psi_f_dict


def psi_2t_init(v_0, v_1, v_2, v_3):
    psi_f_dict = {}
    for K_t_i in K_t:
        y_12 = range(v_1, v_2 + 1)
        z = range(v_3, v_3 + K_t_i + 1)
        for D_t_i in D_t:
            for _y_12, _z in product(y_12, z):
                psi_f_dict[f"psi_2t_f_{_y_12 - D_t_i}_{v_2 - D_t_i}_{v_3 - D_t_i}_{_z - D_t_i}"] = {
                    "value": 0,
                    "params": (_y_12 - D_t_i, v_2 - D_t_i, v_3 - D_t_i, _z - D_t_i)
                }
    return psi_f_dict


def psi_3t_init(v_0, v_1, v_2, v_3):
    psi_f_dict = {}
    for K_t_i in K_t:
        y_23 = range(v_2, v_3 + 1)
        z = range(v_3, v_3 + K_t_i + 1)
        for D_t_i in D_t:
            for _y_23, _z in product(y_23, z):
                psi_f_dict[f"psi_3t_f_{_y_23 - D_t_i}_{_y_23 - D_t_i}_{v_3 - D_t_i}_{_z - D_t_i}"] = {
                    "value": 0,
                    "params": (_y_23 - D_t_i, _y_23 - D_t_i, v_3 - D_t_i, _z - D_t_i)
                }
    return psi_f_dict




if __name__ == "__main__":

    init_begin_time = time.perf_counter()

    global_cache = {}
    epoch = 1
    max_epoch = 2
    v_0 = 1
    v_1 = 2
    v_2 = 3
    v_3 = 4
    # 在开始迭代前, 先给初始条件的期望子式赋 0
    # Hint: 初值 v0, v1, v2, v3 均已确定, 因而可以生成全部的 e, z, y 取值范围
    # 由于要推导的是到 epoch = 1 的 f_t, 所以需要将 epoch = 2, 3, ... 等的对应表达式初始化为 0

    global_cache[epoch] = {}
    global_cache[epoch].update(psi_1t_init(v_0, v_1, v_2, v_3))
    global_cache[epoch].update(psi_2t_init(v_0, v_1, v_2, v_3))
    global_cache[epoch].update(psi_3t_init(v_0, v_1, v_2, v_3))

    while True:
        if epoch == max_epoch:
            break

        global_cache[epoch + 1] = {}
        # 遍历上一轮所有需要计算的 f_t
        for key in global_cache[epoch].keys():
            params = global_cache[epoch][key]["params"]
            global_cache[epoch + 1].update(psi_1t_init(*params))
            global_cache[epoch + 1].update(psi_2t_init(*params))
            global_cache[epoch + 1].update(psi_3t_init(*params))
        epoch = epoch + 1

    init_end_time = time.perf_counter()
    print(f"模型初始化完成. 耗时 {init_end_time - init_begin_time: .2f} 秒")

    # 在模型初始化之后, 需要逐个填充对应的缓存
    calc_begin_time = time.perf_counter()

    epoch = max_epoch

    while True:
        if epoch == 1:
            print(f_t(v_0, v_1, v_2, v_3))
            break
        print(f"当前时间步: {epoch}")

        def f_t_func(key):
            return {"key": key, "value": f_t(*global_cache[epoch - 1][key]["params"])}

        # pool = ProcessPool()
        #
        # def f_t_func(key, f_t, epoch, global_cache):
        #     return {"key": key, "value": f_t(*global_cache[epoch - 1][key]["params"])}
        #
        # length = len(global_cache[epoch - 1].keys())
        # results = pool.map(f_t_func, global_cache[epoch - 1].keys(),
        #                    [f_t] * length, [epoch] * length, [global_cache] * length)
        # pool.close()
        # pool.join()

        pool = ThreadPoolExecutor(max_workers=10)
        tasks = [pool.submit(f_t_func, key) for key in global_cache[epoch - 1].keys()]
        results = [x.result() for x in as_completed(tasks)]

        for result in results:
            global_cache[epoch - 1][result["key"]]["value"] = result["value"]

        # global_cache[epoch - 1][key]["value"] = f_t(*params)

        epoch = epoch - 1

    calc_end_time = time.perf_counter()
    print(f"计算完成, 耗时 {calc_end_time - calc_begin_time: .2f} 秒")

    # Epoch = 3, T = 4, Seconds = 536.44 s
    # Epoch = 4, T = 5, Seconds = 3747.08 s
    # Epoch = 5, T = 6, Seconds = 60426 s
    joblib.dump(global_cache,
                f"global_cache_T={max_epoch + 1}_time={datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')}.pkl")
