import numpy as np
from environment import Env
from requirement import get_requirement
from dqn import DQN, ReplayMemory, Transition
import torch
import torch.optim as optim
import torch.nn.functional as F
import random
import copy

device = torch.device('cuda:0')

BATCH_SIZE = 32

class QLearningAgent:
    def __init__(self, actions, discount_factor=0.9, epsilon=0.8, model=None, action_code_qos=None, n_actions=0):
        self.actions_dict = actions
        self.discount_factor = discount_factor
        self.epsilon = epsilon
        self.model = model.to(device)
        self.model_ = copy.deepcopy(model).to(device)
        self.model_.eval()
        self.action_code_qos = action_code_qos # service: [code_state, *qos]
        self.n_actions = n_actions
        self.end = max(actions.keys())
    # 采样 <s, a, r, s'>
    def learn(self, state, index, action, reward, next_state, memory, optimizer, k, n_heads):
        def get_max(outputs, n_actions):
            row, col = outputs.shape
            mask = torch.ones((row, 1)) * torch.arange(col)
            background = torch.full((row, col), -10000, dtype=torch.float32)
            res = torch.where(mask < n_actions, outputs, background)
            return res.argmax(dim=1).reshape((-1, 1))
        if len(memory) < BATCH_SIZE:
            return
        transitions = memory.sample(BATCH_SIZE)
        batch = Transition(*zip(*transitions))
        batch_done = torch.Tensor(batch.done)
        batch_mask = list(map(lambda x: list(filter(lambda y: x[y]=='1', range(n_heads))), batch.mask))
        
        
        inputs = torch.cat(list(map(lambda x: self.get_inputs(x), batch.next_state)))
        self.model.eval()
        outputs = self.model(inputs.to(device)).cpu()
        len_action = outputs.shape[1] // n_heads
        n_actions = torch.Tensor(list(map(lambda x: [self.get_n_actions(x)], batch.next_state)))
        
        
        next_state_arg = list(map(lambda x: get_max(outputs[:, x*len_action:(x+1)*len_action], n_actions), range(n_heads)))
        next_state_arg = torch.cat(next_state_arg, dim=1) + torch.arange(0, n_heads*len_action, len_action).reshape(1, -1)
        
        inputs = torch.cat(list(map(lambda x: self.get_inputs(x), batch.next_state)))
        self.model_.eval()
        outputs = self.model_(inputs.to(device)).cpu()
        next_state_value = torch.cat(list(map(lambda x: outputs[x][next_state_arg[x]][batch_mask[x]].reshape(-1, 1), range(BATCH_SIZE))))
       
        
        cur_reward = torch.Tensor(batch.reward)
        cur_reward = torch.cat(list(map(lambda x: torch.tensor([cur_reward[x]]*len(batch_mask[x])).reshape(-1, 1), range(BATCH_SIZE))))
        comp = torch.zeros_like(next_state_value)
        mask_mod = torch.cat(list(map(lambda x: torch.tensor([batch_done[x]]*len(batch_mask[x])).reshape(-1, 1), range(BATCH_SIZE))))
        next_state_value = torch.where(mask_mod == 1, comp, next_state_value)
        expected_state_action_value = (next_state_value * self.discount_factor + cur_reward)
        
        self.model.train()
        inputs = torch.cat(list(map(lambda x: self.get_inputs(x), batch.state)))
        outputs = self.model(inputs.to(device)).cpu().reshape(BATCH_SIZE, n_heads, -1)
        index_batch = torch.LongTensor(batch.index).detach()
        state_action_values = torch.cat(list(map(lambda x: outputs[x][batch_mask[x]].gather(1, torch.tensor([index_batch[x]]*len(batch_mask[x])).reshape(-1, 1)), range(BATCH_SIZE))))
        
        loss = F.smooth_l1_loss(state_action_values, expected_state_action_value.detach())
        
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    
    def get_n_actions(self, state):
        actions = self.actions_dict[state[0]]
        return len(actions)

    def get_inputs(self, state):
        actions = self.actions_dict[state[0]]
        len_actions = len(actions)
        values = sum(map(lambda x: self.action_code_qos[x], actions), [])
        pad = []
        for i in range(self.n_actions - len_actions):
            pad += [0, random.random(), random.random()]
        values = list(state) + values + pad
        inputs = torch.Tensor([values])
        return inputs

    # 从Q-table中选取动作
    def get_action(self, state, k, n_heads):
        actions = self.actions_dict[state[0]]
        len_actions = len(actions)

        values = sum(map(lambda x: self.action_code_qos[x], actions), [])
        pad = sum([[0, random.random(), random.random()] for _ in range(self.n_actions - len_actions)], [])
        values = list(state) + values + pad
        inputs = torch.Tensor([values])
        self.model_.eval()
        outputs = self.model_(inputs.to(device)).cpu()
        len_action = outputs.shape[1] // n_heads
        outputs = outputs[:, k*len_action:(k+1)*len_action]
        index = outputs[0, :len_actions].argmax()
        action = actions[index]
        return index, action
    
    def save(self):
        torch.save(self.model.state_dict(), 'dqn_param.pth')
    
    def load(self):
        self.model_.load_state_dict(torch.load('dqn_param.pth'))
        self.model_.eval()


def get_action(agent, state, k, n_heads):
    agent.model.eval()
    actions = agent.actions_dict[state[0]]
    len_actions = len(actions)
    values = sum(map(lambda x: agent.action_code_qos[x], actions), [])
    values = list(state) + values + [0] * (3*agent.n_actions - 3*len(actions))
    inputs = torch.Tensor([values])
    outputs = agent.model(inputs.to(device)).cpu()
    len_action = outputs.shape[1] // n_heads
    outputs = outputs[0, k*len_action:k*len_action+len_actions]
    outputs = torch.softmax((outputs - outputs.mean()) / outputs.std(), 0)
    index = random.choices(range(len(outputs)), outputs)[0] % len_actions
    action = actions[index]
    return index, action

def run(code_service_dict, service_code_dict, qos_needed, service_qos_dict, weights = [0.5, 0.5]):
    EPISODE = 5000
    SAMPLE = 10000
    env = Env(code_service_dict, service_code_dict, qos_needed, service_qos_dict)
    actions = env.get_actions()
    action_code_qos = env.get_action_code_qos()
    n_actions = max(map(len, actions.values()))
    
    n_heads = 100
    model = DQN(n_actions, n_heads)
    optimizer = optim.RMSprop(model.parameters())
    memory = ReplayMemory(1024)
    agent = QLearningAgent(actions=actions, discount_factor=0.6, epsilon=0.4, model=model, action_code_qos=action_code_qos, n_actions=n_actions)
    p = 0.3
    # training
    for episode in range(EPISODE):
        state = env.reset()
        k = random.randrange(n_heads)
        # print(state)
        while True:
            # agent产生动作
            index, action = agent.get_action(state, k, n_heads)
            next_state, reward, done = env.step(state, action, weights)
            
            # real
            inputs = torch.Tensor(agent.get_inputs(state)).reshape(1, -1)
            agent.model_.eval()
            outputs = agent.model_(inputs.to(device)).cpu()
            len_action = outputs.shape[1] // n_heads
            outputs = outputs[:, k*len_action:(k+1)*len_action]

            n_actions = agent.get_n_actions(state)
            real = max(outputs[0][:n_actions]).data
            
            # pred
            inputs = torch.Tensor(agent.get_inputs(state)).reshape(1, -1)
            agent.model.eval()
            outputs = agent.model(inputs.to(device)).cpu()
            len_action = outputs.shape[1] // n_heads
            outputs = outputs[:, k*len_action:(k+1)*len_action]
            pred = outputs[0][index].data
            
            weight = abs(real - pred)+0.1

            m = [random.choices([0, 1], [1-p, p])[0] for _ in range(n_heads)]
            m = ''.join(map(str, m))
            memory.push(weight, state, [index], [action], next_state, [reward], m, [done])
            state = next_state
            if done:
                break
# 更新Q表
        agent.learn(state, index, action, reward, next_state, memory, optimizer, k, n_heads)

        # print(episode)
        if episode % 100 == 99:
            print(episode//100+1, end=':')
                
            agent.epsilon = 0
            state = env.reset()
            actions = []
            rewards = 0
            while True:
                candidates = {}
                for k in range(n_heads):
                    index, action = agent.get_action(state, k, n_heads)
                    candidates[action] = candidates.get(action, 0) + 1
                actions.append(action)
                action = max(candidates.keys(), key=lambda x: candidates[x])
                next_state, reward, done = env.step(state, action, weights)
                rewards += reward
                state = next_state
                if done:
                    break
            print(actions, state, rewards)
            agent.save()
            agent.load()
            agent.epsilon = 0.4
    # inference
    res = []
    for loop in range(SAMPLE):
        test_state = tuple([0] + qos_needed)
        actions = []
        while True:
            index, action = get_action(agent, test_state, k, n_heads)
            actions.append(action)
            next_test_state, reward, done = env.step(test_state, action, weights)
            test_state = next_test_state
            if done:
                break
                # print(actions, state)
        res.append((tuple(sorted(actions)), test_state))
    res = set(res)
    res = list(filter(lambda x: x[1][1] >= -0.001 and x[1][2] >= 0, res))
    res.sort(key=lambda x: x[1][1]+x[1][2], reverse=True)
    return res
    