import argparse
import json
import time
import os

import matplotlib.pyplot as plt
import numpy as np
from load4sk import load_reservoir_4sk_data
from src.util import (load_knapsack_problem, make_directory, moving_average,
                      save_results)
import itertools

class FourSKQLearning:
    def __init__(
        self, 
        y,
        a0,
        aN,
        u_max, u_min, s_max, s_min, b,
        lr=0.1, 
        gamma=0.95, 
        episodes=50,
        eps=1.0, 
        eps_decay=0.995, 
        eps_min=0.01,
        summary_freq=500,
    ):
        self.y = y
        self.a0 = a0
        self.aN = aN
        self.u_max = u_max
        self.u_min = u_min
        self.s_max = s_max
        self.s_min = s_min
        self.b = b
        self.lr = lr
        self.gamma = gamma
        self.episodes = episodes
        self.eps = eps
        self.eps_decay = eps_decay
        self.eps_min = eps_min
        self.summary_freq = summary_freq
        self.nb_mon = nb_mon
        self.action_space = action_space

        self.n_sk = len(y)

        # Q值 : 状态（当前物品） + 动作（放入哪个背包）
        self.Q = self.initialize_memory("test_ltm.npy" ) #{} # np.zeros((self.n_items, self.n_actions))


        self.n_actions = len(action_space)  # Actions: do nothing (0) or put item in knapsack 1 to n_knapsacks

    def initialize_memory(self, filename):
        if os.path.exists(filename):
            Q = np.load(filename, allow_pickle='TRUE').item()
        else:
            Q = {}
        return Q

    def get_possible(self, state):
        actions = self.action_space.copy()
        t = state[0]
        s = state[1:5]
        y = state[5:]
        smin_t = self.s_min[:, t]
        smax_t = self.s_max[:, t]
        umin_t = self.u_min[:, t]
        umax_t = self.u_max[:, t]

        acts = [act for act in actions if act[0] <= umax_t[0] and act[0] <= s[0]+y[0]-smin_t[0] and
                                          act[0] >= umin_t[0] and act[0] >= s[0]+y[0]-smax_t[0] and
                                          act[1] <= umax_t[1] and act[1] <= s[1]+y[1]-smin_t[1] and
                                          act[1] >= umin_t[1] and act[1] >= s[1]+y[1]-smax_t[1] and
                                          act[2] <= umax_t[2] and act[2] <= s[2]+y[2]-smin_t[2]+act[1] and
                                          act[2] >= umin_t[2] and act[2] >= s[2]+y[2]-smax_t[2]+act[1] and
                                          act[3] <= umax_t[3] and act[3] <= s[3]+y[3]-smin_t[3]+act[2]+act[0] and
                                          act[3] >= umin_t[3] and act[3] >= s[3]+y[3]-smax_t[3]+act[2]+act[0]]
        return acts
        # n = len(x)
        # current_total = 0
        # for i in range(n):
        #     current_total += w[i] * x[i]
        # possible = []
        # for i in range(n):
        #     if current_total + w[i] <= W:
        #         possible.append(i)
        # return (np.array(possible, dtype=int))

    def get_q(self, state, action):
        """
        获取指定状态-动作对的Q值。如果当前状态或状态-动作对未知，
        则将Q值初始化为一个小的均匀随机值，并存储在关联记忆中。

        参数:
        state (numpy array): 当前状态。
        action (int): 要执行的动作。

        返回:
        float: 状态-动作对的Q值。
        """
        # 初始化Q值为一个小的均匀随机值
        initial_value = np.random.uniform(high=0.1)

        # 从记忆中获取当前状态的动作
        actions = self.Q.get(str(state))

        # 如果当前状态未知，则初始化Q值并存储在记忆中
        if actions is None:
            self.Q[str(state)] = {str(action): initial_value}
            # 返回初始化的Q值（均匀随机值）
            return initial_value

        # 获取指定动作的Q值
        q = actions.get(str(action))

        # 如果状态-动作对未知，则初始化Q值并存储在记忆中
        if q is None:
            self.Q[str(state)][str(action)] = initial_value
            return initial_value

        # 返回状态-动作对的Q值
        return q


    def select_action(self, state, epsilon):
        actions = self.get_possible(state)
        nactions = len(actions)
        # print("nactions: is ", nactions)
        if nactions == 0:
            return None
        # self.eps
        if np.random.rand() < epsilon:
            act = actions[np.random.randint(0, nactions)]
        else:
            q_values = []
            for i in range(nactions):
                q_values.append(self.get_q(state, actions[i]))
            act = actions[np.argmax(q_values)]
        return act

    def update_q_value(self, item, action, reward):
        max_next_q = np.max(self.Q[item + 1]) if item + 1 < self.n_items else 0
        self.Q[item, action] += self.lr * (reward + self.gamma * max_next_q - self.Q[item, action])

    def apply_action(self, s1, a1):
        # s1的状态修改 reward的计算
        s2 = s1.copy()
        t = s1[0]
        y = s1[5:]
        s2[0] = t + 1
        s2[1] = s1[1] + y[0] - a1[0]
        s2[2] = s1[2] + y[1] - a1[1]
        s2[3] = s1[3] + y[2] - a1[2] + a1[1]
        s2[4] = s1[4] + y[3] - a1[3] + a1[2] + a1[0]
        s2[5:] = y
        return s2

    def calcualte_reward(self, s1, a1):
        b = self.b
        t = s1[0]
        r = 0
        for i in range(4):
            r += b[i][t] * a1[i]
            if i == 3:
                r += b[4][t] * a1[i]
        # if t == self.nb_mon - 1:
        #     r += 0  # 加惩罚
        return r

    def get_maxq(self, state):
        initial_value = np.random.uniform(high=0.1)
        actions = self.Q.get(str(state))
        if actions == None:
            return initial_value
        values = list(actions.values())
        return np.max(values)

    def train(self):
        epsilons = []
        cumulative_rewards = []
        
        for episode in range(self.episodes):
            # reset the environment
            # 3个背包的当前容量
            # current_capacities = self.initial_capacities.copy()
            cumulative_reward = 0
            s1 = np.concatenate(([0], self.a0, self.y))

            for t in range(self.nb_mon):

                # select an action
                a1 = self.select_action(s1, self.eps)
                if a1 is None:
                    break
                s2 = self.apply_action(s1, a1)
                r  = self.calcualte_reward(s1, a1)
                q1 = self.get_q(s1, a1)
                q2 = self.get_maxq(s2)
                q1 = q1 + self.lr * (r + self.gamma * q2 - q1)  # 更新Q值
                self.Q[str(s1)][str(a1)] = q1  # 将更新后的Q值存入记忆
                s1 = s2
                cumulative_reward += r
                epsilons.append(self.eps)
                # epsilon decay
                self.eps = max(self.eps * self.eps_decay, self.eps_min)
            # print("episode" + str(episode) + " month: " + str(t + 1))

            # try to 构建
            if episode % 10 == 0:
                s1 = np.concatenate(([0], self.a0, self.y))
                u  = np.zeros((4, self.nb_mon))
                sN = np.zeros((1, 4))
                obj = 0
                for t in range(self.nb_mon):
                    # 注意epsilon为0, 即从Q表贪婪获取最大Q值的动作
                    a1 = self.select_action(s1, 0)
                    u[:, t] = a1
                    if a1 is None:
                        break
                    # print("add item", a1, "to knapsack")
                    s2 = self.apply_action(s1, a1)
                    s1 = s2
                    if t == self.nb_mon - 1:
                        sN[0,0] = s1[1]
                        sN[0,1] = s1[2]
                        sN[0,2] = s1[3]
                        sN[0,3] = s1[4]

                    for i in range(4):
                        obj += self.b[i][t] * a1[i]
                        if i == 3:
                            obj += self.b[4][t] * a1[i]
                print("episode" + str(episode))
                print("u:", u)
                print("sN:", sN)
                print("obj:", obj)

        print("\nProposed Solution 训练完毕\n")
        return epsilons, cumulative_rewards


    def get_policy(self):
        policy = []
        current_capacities = self.initial_capacities.copy()
        for item in range(self.n_items):
            action = np.argmax(self.Q[item])  # Always select the best action from Q-table for each item
            if action > 0 and self.weights[item] <= current_capacities[action - 1]:
                policy.append(action)  # Add the action to the policy if it is feasible
                current_capacities[action - 1] -= self.weights[item]  # Update capacity if the item is placed
            else:
                policy.append(0)  # Append 0 (do nothing) if the action is not feasible
        return policy


    def get_total_value(self):
        policy = self.get_policy()
        return sum(self.values[i] for i, a in enumerate(policy) if a > 0)

    
if __name__ == '__main__':
    # Example usage:
    y, a0, aN, u_max, u_min, s_max, s_min, b, nb_mon, action_space = load_reservoir_4sk_data()
    print("Initial capacity a0:", a0)
    print("Final capacity aN:", aN)
    print("Maximum discharge u_max:", u_max)
    print("Minimum discharge u_min:", u_min)
    print("Minimum storage s_min:", s_min)
    print("Maximum storage s_max:", s_max)
    print("Benefit coefficients b:", b)

    parser = argparse.ArgumentParser()
    # parser.add_argument("problem_name", type=str)
    parser.add_argument("--episodes", type=int, default=20000)
    parser.add_argument("--gamma", type=float, default=1.0)
    parser.add_argument("--eps", type=float, default=1.0)
    parser.add_argument("--eps_decay", type=float, default=0.9999)
    parser.add_argument("--lr", type=float, default=0.1)
    parser.add_argument("--eps_min", type=float, default=0.01)
    parser.add_argument("--summary_freq", type=int, default=500)

    args = parser.parse_args()
    # problem_name = args.problem_name
    episodes = args.episodes
    gamma = args.gamma
    eps = args.eps
    eps_decay = args.eps_decay
    lr = args.lr
    eps_min = args.eps_min
    
    # # 读取问题
    # knapsack_df, item_df = load_knapsack_problem(problem_name)
    # capacities = knapsack_df['capacity'].values
    # values = item_df['value'].values
    # weights = item_df['weight'].values

    # # 1 初始化Q-Learning
    learner = FourSKQLearning(y, a0, aN, u_max, u_min, s_max, s_min, b,
                                     lr=lr, gamma=gamma, episodes=episodes, eps=eps,
                                     eps_decay=eps_decay, eps_min=eps_min)

    print("\nSolving with Q-Learning...")

    start_time = time.time()

    # # 2 训练★★★
    epsilons, cumulative_rewards = learner.train()

    end_time = time.time()
    train_time = end_time - start_time

    print("\n状态空间长度:", len(learner.Q))
    # print_dictionary(memory)
    memory_file = "test_ltm.npy"
    np.save(memory_file, learner.Q)

    # # save train results
    # directory = f"results/train/qlearning/{problem_name}"
    # make_directory(directory)
    #
    # plt.plot(epsilons)
    # plt.title(f"Epsilon Decay ({eps_decay})")
    # plt.xlabel("Episode")
    # plt.ylabel(f"Epsilon")
    # plt.savefig(f"{directory}/epsilons.png")
    # plt.close()
    #
    # plt.plot(moving_average(cumulative_rewards, smooth=0.01))
    # plt.title(f"Cumulative Rewards")
    # plt.xlabel("Episode")
    # plt.ylabel(f"Cumulative Reward")
    # plt.savefig(f"{directory}/cumulative_rewards.png")
    # plt.close()
    #
    # # policy = learner.get_policy()
    # start_time = time.time()
    # # 3 total_value 按Q表计算：最终解的价值（存在物品未选中背包的情况）
    # total_value = learner.get_total_value()
    # inference_time = time.time() - start_time
    #
    # # save inference results
    # result_df = save_results(
    #     problem_name=problem_name,
    #     method="Q-learning",
    #     total_value=total_value,
    #     episodes=episodes,
    #     train_time=train_time,
    #     inference_time=inference_time,
    # )
    #
    # print("Inference results (the last one is the current result):")
    # print(result_df)
