import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import random
from logger import Logger
from config.config import config


class ReplayMemory(object):
    def __init__(self, capacity, N_STATES, N_ACTIONS):
        self.states_dim = N_STATES
        self.action_dim = N_ACTIONS
        self.capacity = capacity
        self.memory = []
        self.position = 0

    def push(self, state, action, reward, next_state):
        if len(self.memory) < self.capacity:
            self.memory.append(None)
        transition = np.append(state, action)
        transition = np.append(transition, reward)
        transition = np.append(transition, next_state)
        self.memory[self.position] = transition
        self.position = (self.position + 1) % self.capacity

    def sample(self, batch_size):
        return random.sample(self.memory, batch_size)

    def IsInMemory(self, state, action):
        if (len(self.memory) != 0):
            MemoryList = np.array(self.memory)[:, :self.states_dim + self.action_dim].tolist()
            transition = np.append(state, action).tolist()
            if transition in MemoryList:
                return True
        return False

    def __len(self):
        return len(self.memory)


class Net(nn.Module):
    def __init__(self, N_STATES, N_ACTIONS):
        super(Net, self).__init__()
        self.states_dim = N_STATES
        self.action_dim = N_ACTIONS
        self.fc1 = nn.Linear(self.states_dim, 512)
        self.fc2 = nn.Linear(512, 256)
        self.fc3 = nn.Linear(256, self.action_dim)

    def forward(self, x):
        out = F.relu(self.fc1(x[:, :self.states_dim]))
        out = F.relu(self.fc2(out))
        out = self.fc3(out)
        return out


class DQN(object):
    def __init__(self, N_STATES, N_ACTIONS):
        cf = config()
        self.loss_log = Logger(cf.getConfig('path', 'log_path') + 'loss.log')
        self.learn_step_counter = 0
        self.EvalNet, self.TargetNet = Net(N_STATES, N_ACTIONS), Net(N_STATES, N_ACTIONS)
        self.memory = ReplayMemory(cf.getConfig('dqn', 'MEMORY_CAPACITY', 'int'), N_STATES, N_ACTIONS)
        self.EPSILON = cf.getConfig('dqn', 'EPSILON', 'float')
        self.GAMMA = cf.getConfig('dqn', 'GAMMA', 'float')
        self.SAMPLE_BATCH_SIZE = cf.getConfig('dqn', 'SAMPLE_BATCH_SIZE', 'int')
        self.TARGET_REPLACE_ITER = cf.getConfig('dqn', 'TARGET_REPLACE_ITER', 'int')
        self.optimizer = torch.optim.Adam(self.EvalNet.parameters(), lr=cf.getConfig('dqn', 'LR', 'float'))
        self.loss_func = nn.MSELoss()
        self.LossList = []
        self.action_dim = N_ACTIONS
        self.states_dim = N_STATES
        # 随机动作的初始化为全0
        self.last_random_action = [0 for i in range(N_ACTIONS)]
        self.random_time = 0
        self.num = 1


    def choose_action(self, x, env=None, epoch=-1):
        random_flag = False
        if np.random.uniform() < self.EPSILON:
            with torch.no_grad():
                state = torch.unsqueeze(torch.FloatTensor(x), 0)
                states = torch.cat((state, state), dim=0)
                action_value = self.EvalNet(states)
                action_prob = F.softmax(action_value[0], dim=0).tolist()
                action = [0] * self.action_dim
                action[action_prob.index(max(action_prob))] = 1
                
        else:
            if epoch == -1: # 最开始的32步还是随机选择
                action = self.RandomChooseAction()
            elif epoch >= 0:
                action = self.pseudorandom(env, epoch)
                random_flag = True
            else:
                raise ValueError("epoch 设置非法 : %d"%epoch)
        return action, random_flag


    def learn(self):    # 'TODO' change the loss function 
        if self.learn_step_counter % self.TARGET_REPLACE_ITER == 0:
            self.TargetNet.load_state_dict(self.EvalNet.state_dict())
        self.learn_step_counter += 1

        sample_batch = self.memory.sample(self.SAMPLE_BATCH_SIZE)
        sample_batch = np.array(sample_batch)
        b_s = torch.FloatTensor(sample_batch[:, :self.states_dim])
        b_a = torch.LongTensor(sample_batch[:, self.states_dim:self.states_dim + self.action_dim].astype(int))
        b_r = torch.FloatTensor(sample_batch[:, self.states_dim + self.action_dim:self.states_dim + self.action_dim + 1])
        b_s_ = torch.FloatTensor(sample_batch[:, self.states_dim + self.action_dim + 1:])

        self.optimizer.zero_grad()
        q_eval = self.EvalNet(b_s)
        q_eval = q_eval.mul(b_a)
        q_eval = torch.sum(q_eval, dim=1, keepdim=True)

        q_next = self.TargetNet(b_s_).detach()  # detach from graph, don't backpropagate
        q_next = torch.max(q_next, dim=1)[0].unsqueeze(-1)
        q_target = b_r + self.GAMMA * q_next
        
        loss = self.loss_func(q_eval, q_target)
        self.LossList.append(loss.item())
        loss.backward()
        self.optimizer.step()
        self.loss_log.write_tensor(q_eval, q_target)
        self.loss_log.write("计算出来的loss:",loss)


    def RandomChooseAction(self):
        action = [0] * self.action_dim
        chooseAction = random.sample(range(self.action_dim), 1)
        for i in chooseAction:
            action[i] = 1
        return action


    def pseudorandom(self, env, epoch):
        """
        伪随机选择，每次值对一个寄存器设置epoch+1个位置，寄存器也随机选择
        """
        # 根据已经迭代的更新的次数，轮次的修改寄存器，给出这次要修改的寄存器代号 reg_changed [0~8]
        reg_changed = self.random_time % env.getRegisterNum()
        # 计算出寄存器对应的起始位置
        start = 0 if reg_changed == 0 else sum(env.register_num[:reg_changed])

        # # 计算本次可以修改的位数
        # if (epoch // 100) == 1:
        #     self.num += 1
        
        # if self.num > max(env.register_num):
        #     self.num = 1

        reg_next_action = self.register_set(self.num, env.getRegisterNum(), env.register_num[reg_changed])
        next_action = [0 for i in range(start)] + reg_next_action + [0 for i in range(sum(env.register_num) - start - env.register_num[reg_changed])]

        # 设置完随机动作之后，更新这里
        self.random_time += 1
        return next_action


    def register_set(self, num, regs_num, regs_bit_num):
        """
        给出寄存器reg需要修改的num个位的序号，并将需要修改的位 置1
        @ num : 需要修改的位数
          regs_num : 可以设置的寄存器总数
          regs_bit_num : 当前寄存器位数
        """
        reg_len = len(reg)
        reg = [0] * reg_len  # 随机选择是动作初始全0
        if num <= 0:
            raise ValueError("num can not <= 0")
        if num == 1: # 如果目前需要设置的位数是1，那么根据已经随机的次数可以求出不同于之前设置的
            site = (self.random_time // regs_num) # 在修改1位的情况下，第几次来到这个寄存器了
            if site < regs_bit_num:
                reg[site] = 1
            else:
                site = site % regs_bit_num
                reg[site] = 1
        else:
            if num > reg_len:
                num = num % reg_len
            sites = random.sample(range(reg_len), num)
            for i in sites:
                reg[i] = 1
        return reg
    

    def store_transition(self, state, action, reward, next_state):
        self.memory.push(state, action, reward, next_state)


    def SaveModel(self, PATH):
        torch.save(self.EvalNet, PATH)

    def LoadModel(self, PATH):
        self.EvalNet = torch.load(PATH)
        self.EvalNet.train()
        self.TargetNet = torch.load(PATH)
        self.TargetNet.eval()

