import copy
import math
import random

import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
from torch import  optim




class DQN():
    def __init__(self, n_actions, n_features,node_size, learning_rate, reward_decay, e_greedy,
                 replace_target_iter, memory_size, batch_size,e_greedy_increment=None):
        self.n_actions = n_actions
        self.n_features = n_features
        self.lr = learning_rate
        self.gamma = reward_decay
        self.epsilon_max = e_greedy
        self.replace_target_iter = replace_target_iter
        self.memory_size = memory_size
        self.batch_size = batch_size
        self.epsilon_increment = e_greedy_increment
        self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
        self.learn_step_counter = 0
        self.memory = []
        self.msg_next=dict()
        self.q_table=dict()

    def clear_transition(self):
        self.memory=[]
        self.memory_counter=0

    def store_transition(self,cur,reward,action,state_next,state_next_next, done):
        if not hasattr(self, 'memory_counter'):
            # hasattr用于判断对象是否包含对应的属性。
            self.memory_counter = 0
        transition = dict()
        transition['state_next'] = state_next
        transition['reward'] = reward
        transition['state_next_next'] = state_next_next
        transition['done']=done
        transition['action']=action
        transition['state']=cur
        # replace the old memory with new memory
        if self.memory_counter < self.memory_size:
            self.memory.append(transition)
        else:
            index = self.memory_counter % self.memory_size
            self.memory[index] = transition
        self.memory_counter += 1

    # def choose_action(self, observation):
    #     observation = observation[np.newaxis, :]
    #
    #     if np.random.uniform() < self.epsilon:
    #         # forward feed the observation and get q value for every actions
    #         s = torch.FloatTensor(observation)
    #         actions_value,(hn,cn) = self.eval_net(s)
    #         action = [np.argmax(actions_value.detach().numpy())][0]
    #     else:
    #         action = np.random.randint(0, self.n_actions)
    #     return action

    # 新增根据当前状态下的K个预选路由方案进行选择策略
    # 选择成功 返回动作编号和True  选择失败返回-1 False

    def sigm(self,list):
        sum=0
        for num in list:
            if num==-np.inf:
                continue
            sum+=math.pow(math.e,num)
        res=np.zeros(len(list))
        for i in range(len(list)):
            if list[i]==-np.inf:
                continue
            res[i]=math.pow(math.e,list[i])/sum
        return res

    def choose_action_code(self,state,state_next,eval_flag):

        if (np.random.uniform() < self.epsilon or eval_flag ) and str(state) in self.q_table:
            action_score=self.q_table[str(state)]
            for i in range(len(action_score)):
                if state_next[i][0][0]==-1:
                    action_score[i]=-np.inf

            #action=np.argmax()
            action=np.random.choice([j for j in range(len(action_score))],size=1,p=self.sigm(action_score))[0]
        else:
            # 随机选择合法动作
            while True:
                action = np.random.randint(0, self.n_actions)
                if (action==self.n_actions-1):
                    break
                if state_next[action][0][0]!=-1:
                    break
        return action
    def choose_action_eval(self,state,state_next,eval_flag):
        if str(state) in self.q_table:
            action_score=self.q_table[str(state)]
            for i in range(len(action_score)):
                if state_next[i][0][0]==-1:
                    action_score[i]=-np.inf
            action=np.argmax(action_score)
            #action=np.random.choice([j for j in range(len(action_score))],size=1,p=self.sigm(action_score))[0]
        else:
            # 随机选择合法动作
            while True:
                action = np.random.randint(0, self.n_actions)
                if (action==self.n_actions-1):
                    break
                if state_next[action][0][0]!=-1:
                    break
        return action
    def choose_action_prob(self,state_next,adj,eval_flag):
        state_next=torch.FloatTensor(state_next).to(device)
        adj=torch.IntTensor(adj).to(device)
        if np.random.uniform() < self.epsilon or eval_flag:
            k_values = []
            for idx in range(len(state_next)):
                if state_next[idx][0][0]==-1:
                    k_values.append(-np.inf)
                else:
                    k_values.append(self.eval_net(state_next[idx].view(-1), adj).item())
            #action= np.argmax(k_values)
            action=np.random.choice([j for j in range(len(state_next))],size=1,p=self.sigm(k_values))[0]
        else:
            # 随机选择合法动作
            while True:
                action = np.random.randint(0, self.n_actions+1)
                if (action==self.n_actions):
                    break
                if state_next[action][0][0]!=-1:
                    break
        return action


    def choose_actions_K1(self, alloc_flag):
        if not alloc_flag[0]:
            return -1 ,False
        return 0,True
    def choose_actions_KFF(self, state_next):
        for i in range(len(state_next)):
            if state_next[i][0][0]!=-1:
                return i

    def choose_action_random(self,alloc_flag):
        if True not in alloc_flag:
            return -1, False
        # 随机选择合法动作
        while True:
            action = np.random.randint(0, self.n_actions)
            if alloc_flag[action]:
                break
        return action, True
    def _replace_target_params(self):
        # 复制网络参数
        self.target_net.load_state_dict(self.eval_net.state_dict())

    def learn(self, adj):
        # check to replace target parameters
        # if self.learn_step_counter % self.replace_target_iter == 0:
        #     self._replace_target_params()
            #print('\ntarget params replaced\n')

        # sample batch memory from all memory
        batch_memory = np.random.choice(np.array(self.memory), self.batch_size)
        state=np.stack((p['state'] for p in batch_memory), axis=0)

        reward = np.stack((p['reward'] for p in batch_memory), axis=0)
        reward = reward.reshape(self.batch_size, 1)
        #reward = torch.FloatTensor(reward).to(device)

        action = np.stack((int(p['action'])  for p in batch_memory), axis=0)
        action=action.reshape(self.batch_size,1)
        #action=torch.LongTensor(action).to(device)

        state_next = np.stack((p['state_next'] for p in batch_memory), axis=0)
        #state_next=torch.FloatTensor(state_next).to(device)
        #state_next = state_next.view(self.batch_size, -1)

        done = np.stack((p['done'] for p in batch_memory), axis=0)
        #done=torch.FloatTensor(done).to(device)
        #done=done.reshape(self.batch_size,1)

        state_next_next = np.stack((p['state_next_next'] for p in batch_memory), axis=0)
        #state_next_next=torch.FloatTensor(state_next_next).to(device)

        #q_values = self.eval_net(state)

        for i in range(self.batch_size):
            if str(state[i]) not in self.q_table:
                self.q_table[str(state[i])] = np.zeros(self.n_actions)
            if str(state_next[i]) not in self.q_table:
                self.q_table[str(state_next[i])] = np.zeros(self.n_actions)
            # for j in range(len(state_next[i])):
            #     if state_next_next[i][j][0][0]==-1:
            #         self.q_table[str(state_next[i])][j]=-np.inf
            self.q_table[str(state[i])][action[i]]=reward[i]+self.gamma*self.q_table[str(state_next[i])].max()
        self.learn_step_counter += 1
