import copy
import math
import random

import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
from torch import  optim

from model import Model
from model import device



class DQN():
    def __init__(self, n_actions, n_features,node_size, learning_rate, reward_decay, e_greedy,
                 replace_target_iter, memory_size, batch_size,e_greedy_increment=None):
        self.n_actions = n_actions
        self.n_features = n_features
        self.lr = learning_rate
        self.gamma = reward_decay
        self.epsilon_max = e_greedy
        self.replace_target_iter = replace_target_iter
        self.memory_size = memory_size
        self.batch_size = batch_size
        self.epsilon_increment = e_greedy_increment
        self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
        self.learn_step_counter = 0
        self.memory = []
        self.eval_net = Model(out=n_actions).to(device)
        self.msg_next=dict()
        self.eval_net.init_weight()
        self.target_net = copy.deepcopy(self.eval_net).to(device)
        self.loss_function = nn.MSELoss().to(device)
        #self.loss_function=nn.CrossEntropyLoss().to(device)
        self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=self.lr)
        #self.cosine_schedule = optim.lr_scheduler.CosineAnnealingLR(optimizer=self.optimizer,T_max=5,eta_min=0)
        #self.cosine_schedule = optim.lr_scheduler.ExponentialLR(optimizer=self.optimizer, gamma=0.99, last_epoch=-1)
        self.cost_his = []
        self.q_table=dict()
    def target_copy(self):
        self.target_net = copy.deepcopy(self.eval_net).to(device)
    def frozen_gat(self):
        for param in self.eval_net.named_parameters():
            if param[0] in ["gat1"]:
                param[1].requires_grad = False
        for param in self.target_net.named_parameters():
            if param[0] in ["gat1"]:
                param[1].requires_grad = False
        for param in self.eval_net.named_parameters():
            if param[0] in ["gat2"]:
                param[1].requires_grad = False
        for param in self.target_net.named_parameters():
            if param[0] in ["gat2"]:
                param[1].requires_grad = False
    def unfrozen(self):
        for param in self.eval_net.named_parameters():
            if param[0] in ["gat1"]:
                param[1].requires_grad =True
        for param in self.target_net.named_parameters():
            if param[0] in ["gat1"]:
                param[1].requires_grad = True
    def store_exp(self,str):
        torch.save(self.eval_net.state_dict(),str)
        # net=self.eval_net
        # torch.save(net.gat1.state_dict(),'gat1.pth')
        # torch.save(net.gat2.state_dict(),'gat2.pth')
        # torch.save(net.rnn.state_dict(),'rnn.pth')
        # torch.save(net.hn,'hn.npy')
        # torch.save(net.cn,'cn.npy')
        # model_save=MODEL_FILE.model_save(net.gat1,net.gat2,net.rnn,net.hn,net.cn)
        # torch.save(model_save,'model_save.pth')

    #从保存的文件中读取参数
    def construct_GAT_RNN(self,node_size,str):
        checkpoint=torch.load(str,map_location=device)
        self.eval_net.load_state_dict(checkpoint)
        self.eval_net.init_hidden()
        #self.eval_net.init_GAT()
        #self.eval_net.init_linear()
        self.target_net = copy.deepcopy(self.eval_net).to(device)

        # self.eval_net=Model(node_size=node_size,features=self.n_features, hidden=128, heads=1).to(device)
        # net=self.eval_net
        # checkpoint1=torch.load('gat1.pth',map_location=device)
        # checkpoint2 = torch.load('gat2.pth', map_location=device)
        # checkpoint3 = torch.load('rnn.pth', map_location=device)
        # net.gat1.load_state_dict(checkpoint1)
        # net.gat2.load_state_dict(checkpoint2)
        # net.rnn.load_state_dict(checkpoint3)
        # print('加载参数')
        # model_save=torch.load('model_save.pth')
        # self.eval_net.gat1=model_save.gat1
        # self.eval_net.gat2=model_save.gat2
        # self.eval_net.rnn=model_save.rnn
        # self.eval_net.h0=model_save.hn
        # self.eval_net.c0=model_save.cn
    def clear_transition(self):
        self.memory=[]
        self.memory_counter=0

    def store_transition(self,cur,reward,action,state_next,state_next_next, done):
        if not hasattr(self, 'memory_counter'):
            # hasattr用于判断对象是否包含对应的属性。
            self.memory_counter = 0
        transition = dict()
        transition['state_next'] = state_next
        transition['reward'] = reward
        transition['state_next_next'] = state_next_next
        transition['done']=done
        transition['action']=action
        transition['state']=cur
        # replace the old memory with new memory
        if self.memory_counter < self.memory_size:
            self.memory.append(transition)
        else:
            index = self.memory_counter % self.memory_size
            self.memory[index] = transition
        self.memory_counter += 1

    # def choose_action(self, observation):
    #     observation = observation[np.newaxis, :]
    #
    #     if np.random.uniform() < self.epsilon:
    #         # forward feed the observation and get q value for every actions
    #         s = torch.FloatTensor(observation)
    #         actions_value,(hn,cn) = self.eval_net(s)
    #         action = [np.argmax(actions_value.detach().numpy())][0]
    #     else:
    #         action = np.random.randint(0, self.n_actions)
    #     return action

    # 新增根据当前状态下的K个预选路由方案进行选择策略
    # 选择成功 返回动作编号和True  选择失败返回-1 False
    def choose_actions(self, link_msg,observations, alloc_flag, adj,eval):
        observations=torch.FloatTensor(observations).to(device)
        link_msg=torch.FloatTensor(link_msg).to(device)
        adj=torch.IntTensor(adj).to(device)
        if True not in alloc_flag:
            return -1, False
        if (np.random.uniform() < self.epsilon) or eval:
            k_values = []
            for idx in range(len(observations)):
                if alloc_flag[idx]==0:
                    k_values.append(-np.inf)
                else:
                    k_values.append(self.eval_net(observations[idx], adj).item())
            k_values.append(self.eval_net(link_msg,adj).item())
            action= np.argmax(k_values)
            #action=np.random.choice([j for j in range(len(observations))],size=1,p=self.sigm(k_values))[0]
        else:
            # 随机选择合法动作
            while True:
                action = np.random.randint(0, self.n_actions)
                if (action==self.n_actions):
                    break
                if alloc_flag[action]:
                    break
        if action==self.n_actions:
            return action,False
        else:
            return action, True

    def sigm(self,list):
        sum=0
        for num in list:
            if num==-np.inf:
                continue

            sum+=math.pow(math.e,num)
        res=np.zeros(len(list))
        for i in range(len(list)):
            if list[i]==-np.inf:
                continue
            res[i]=math.pow(math.e,list[i])/sum
        return res

    def choose_action_code(self,state,state_next,eval_flag):
        state=torch.FloatTensor(state).to(device)
        if np.random.uniform() < self.epsilon or eval_flag:
            action_score=self.eval_net(state.view(-1))
            for i in range(len(action_score)):
                if state_next[i][0][0]==-1:
                    action_score[i]=-np.inf
            action=action_score.cpu().detach_()
            #action=np.argmax()
            action=np.random.choice([j for j in range(len(action))],size=1,p=self.sigm(action))[0]
        else:
            # 随机选择合法动作
            while True:
                action = np.random.randint(0, self.n_actions)
                if (action==self.n_actions-1):
                    break
                if state_next[action][0][0]!=-1:
                    break
        return action
    def choose_action_eval(self,state,state_next,eval_flag):
        state=torch.FloatTensor(state).to(device)
        if np.random.uniform() < self.epsilon or eval_flag:
            action_score=self.eval_net(state.view(-1))
            for i in range(len(action_score)):
                if state_next[i][0][0]==-1:
                    action_score[i]=-np.inf
            action=action_score.cpu().detach_()
            action=np.argmax(action)
            #action=np.random.choice([j for j in range(len(action))],size=1,p=self.sigm(action))[0]
        else:
            # 随机选择合法动作
            while True:
                action = np.random.randint(0, self.n_actions)
                if (action==self.n_actions-1):
                    break
                if state_next[action][0][0]!=-1:
                    break
        return action
    def choose_action_prob(self,state_next,adj,eval_flag):
        state_next=torch.FloatTensor(state_next).to(device)
        adj=torch.IntTensor(adj).to(device)
        if np.random.uniform() < self.epsilon or eval_flag:
            k_values = []
            for idx in range(len(state_next)):
                if state_next[idx][0][0]==-1:
                    k_values.append(-np.inf)
                else:
                    k_values.append(self.eval_net(state_next[idx].view(-1), adj).item())
            #action= np.argmax(k_values)
            action=np.random.choice([j for j in range(len(state_next))],size=1,p=self.sigm(k_values))[0]
        else:
            # 随机选择合法动作
            while True:
                action = np.random.randint(0, self.n_actions+1)
                if (action==self.n_actions):
                    break
                if state_next[action][0][0]!=-1:
                    break
        return action


    def choose_actions_K1(self, alloc_flag):
        if not alloc_flag[0]:
            return -1 ,False
        return 0,True
    def choose_actions_KFF(self, state_next):
        for i in range(len(state_next)):
            if state_next[i][0][0]!=-1:
                return i

    def choose_action_random(self,alloc_flag):
        if True not in alloc_flag:
            return -1, False
        # 随机选择合法动作
        while True:
            action = np.random.randint(0, self.n_actions)
            if alloc_flag[action]:
                break
        return action, True
    def _replace_target_params(self):
        # 复制网络参数
        self.target_net.load_state_dict(self.eval_net.state_dict())

    def learn(self, adj):
        # check to replace target parameters
        # if self.learn_step_counter % self.replace_target_iter == 0:
        #     self._replace_target_params()
            #print('\ntarget params replaced\n')

        # sample batch memory from all memory
        batch_memory = np.random.choice(np.array(self.memory), self.batch_size)
        state=np.stack((p['state'] for p in batch_memory), axis=0)
        state = torch.FloatTensor(state).to(device)
        state=state.view(self.batch_size,-1)
        reward = np.stack((p['reward'] for p in batch_memory), axis=0)
        reward = reward.reshape(self.batch_size, 1)
        #reward = torch.FloatTensor(reward).to(device)

        action = np.stack((int(p['action'])  for p in batch_memory), axis=0)
        action=action.reshape(self.batch_size,1)
        #action=torch.LongTensor(action).to(device)

        state_next = np.stack((p['state_next'] for p in batch_memory), axis=0)
        #state_next=torch.FloatTensor(state_next).to(device)
        #state_next = state_next.view(self.batch_size, -1)

        done = np.stack((p['done'] for p in batch_memory), axis=0)
        #done=torch.FloatTensor(done).to(device)
        #done=done.reshape(self.batch_size,1)

        state_next_next = np.stack((p['state_next_next'] for p in batch_memory), axis=0)
        #state_next_next=torch.FloatTensor(state_next_next).to(device)

        #q_values = self.eval_net(state)

        for i in range(len(self.batch_size)):
            if str(state[i]) not in self.q_table:
                self.q_table[str(state[i])] = np.zeros(self.n_actions)
            if str(state_next[i]) not in self.q_table:
                self.q_table[str(state_next[i])] = np.zeros(self.n_actions)
            for j in range(len(state_next[i])):
                if state_next_next[i][j][0][0]==-1:
                    self.q_table[str(state_next[i])][j]=-np.inf
            self.q_table[str(state)][action[i]]=reward[i]+self.gamma*self.q_table[str(state_next)].max()
        #q_values=torch.gather(q_values,dim=1,index=action)
        # with torch.no_grad():
        #     max_next_q_values = self.target_net(state_next)
        # for i in range(self.batch_size):
        #     for j in range(len(state_next_next[0])):
        #         if state_next_next[i][j][0][0]==-1:
        #             max_next_q_values[i][j]=-np.inf
        # max_next_q_values=max_next_q_values.max(1)[0].view(-1, 1)
        # # 目标网络输出的当前状态的q(state_value)：即时奖励+折扣因子*下个时刻的最大回报
        # q_target = reward + self.gamma * (1 - done)*max_next_q_values
        #
        # # train eval network
        # loss = self.loss_function(q_target, q_values)
        #
        # self.optimizer.zero_grad()
        # loss.backward()
        # #print('loss='+ str(loss.cpu().detach().numpy()))
        # #nn.utils.clip_grad_norm_(self.eval_net.parameters(), max_norm=2, norm_type=2)
        # self.optimizer.step()
        # #self.cosine_schedule.step()
        # self.cost_his.append(loss.cpu().detach().numpy())
        # increasing epsilon
        #self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
        self.learn_step_counter += 1
        # run the nextwork
        #s=bn(torch.FloatTensor(s)).to(device)
        #s_ = bn(torch.FloatTensor(s_)).to(device)

        # done=[]
        # for i in range(len(s)):
        #     if s[i].equal(s_[i]):
        #         done.append(0)
        #     else:
        #         done.append(0)
        # done=torch.FloatTensor(done).to(device)
        #adj=torch.IntTensor(adj).to(device)
        # for i in range (len(s)):
        #     list2=[]
        #     for j in range(len(s)):
        #         if torch.equal(s[j],s[i]):
        #             list2.append(j)
        #     list.append(np.copy(list2))
        #     if len(list2)==0:
        #         print('++++++')
        # q_eval_list = []
        # q_next_max_list = []
        # for i in range(len(s)):
        #     q = self.eval_net(s[i],adj )
        #     q_eval_list.append(q)
        #     num_next=list[i]
        #     list3=[]
        #     for j in num_next:
        #         q = self.target_net(s_[j], adj)
        #         list3.append(q)
        #     eval_list = torch.stack(list3, dim=1)
        #     max=torch.unsqueeze(torch.max(eval_list),dim=0)
        #     q_next_max_list.append(max)
        # for i in range(len(s_)):
        #     q = self.target_net(s_[i], adj)
        #     q_next_list.append(q)

        # for i in range(len(state_next)):
        #     q=self.eval_net(state_next[i].view(-1),adj)
        #     q_eval_list.append(q)
        #     q_next_list=[]
        #
        #     for s1 in state_next_next[i]:
        #         if (s1[0][0]+1)!=0:
        #             q_eval=self.target_net(s1.view(-1),adj)
        #             q_next_list.append(q_eval)
        #         else:
        #             q_next_list.append(torch.FloatTensor([0]).to(device))
        #     q_next_list = torch.stack(q_next_list, dim=1)
        #     max = torch.unsqueeze(torch.max(q_next_list), dim=0)
        #     q_next_max_list.append(max)
        # q_next_max_list=torch.stack(q_next_max_list,dim=1)
        #
        # q_eval_list=torch.stack(q_eval_list,dim=1)
        #max_q =torch.max(q_next_list,dim=-1,keepdim=True)[0]


    def plot_cost(self):
        plt.figure()
        plt.plot(np.arange(len(self.cost_his)), self.cost_his)
        # for i in self.cost_his:
        #     print(i)
        plt.show()
