import numpy as np
import random
import math

import torch
import torch.nn as nn
import torch.nn.functional as F

device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')

class Model(nn.Module):
    def __init__(self, features_size, hidden_size, action_size) -> None:
        super(Model, self).__init__()
        self.embed = nn.Conv2d(features_size, hidden_size, kernel_size=1, stride=1) # [B,64,17,17] (假设输入84x84)
        self.seq_len = 17 * 17  # CNN输出的特征图尺寸为7x7
        self.pos_embed = nn.Parameter(torch.randn(1, self.seq_len, hidden_size))
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=hidden_size,
            nhead=8,  # 多头注意力头数
            dim_feedforward=hidden_size,
            dropout=0.1,
            activation='gelu'
        )
        self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=3)
        self.fc = nn.Linear(hidden_size, action_size)

    def forward(self, observation):
        x = self.embed(observation)  # [B, hidden_size, 17, 17]
        x = x.flatten(2).permute(0, 2, 1)
        x += self.pos_embed
        x = self.transformer(x)  # [B, seq_len, hidden_size]
        x = x.mean(dim=1)  # [B, hidden_size]
        x = self.fc(x)  # [B, action_size]
        return x

class Memory:
    def __init__(self):
        # group_size = 12
        self.observation = np.zeros((12, 401, 12, 17, 17), dtype=np.float32)
        self.actions = np.zeros((12, 401), dtype=np.int32)
        self.rewards = np.zeros((12, 401), dtype=np.float32)
        self.mask = np.zeros((12, 401), dtype=np.float32)
        self.round_id = 0
        self.step_id = 0

    def push(self, observation, action, reward, done=False):
        self.observation[self.round_id, self.step_id, :] = observation
        self.actions[self.round_id, self.step_id] = action
        self.rewards[self.round_id, self.step_id] = reward
        self.mask[self.round_id, self.step_id] = 1.0
        if done:
            self.round_id += 1
            self.step_id = 0
        else:
            self.step_id += 1

    def load(self):
        # 
        rewards = self.rewards.sum(axis = 1)
        advantages = (rewards - rewards.mean()) / (rewards.std() + 0.001) # [Batch_size]
        max_len = 0
        for i in range(12):
            max_len = max(self.mask[i].sum(), max_len)
        max_len = int(max_len)
        return self.observation[:, : max_len], self.actions[:, : max_len], advantages, self.mask[:, : max_len]
    
    def load2(self):
        rewards = self.rewards.sum(axis = 1)
        advantages = (rewards - rewards.mean()) / (rewards.std() + 0.001) # [Batch_size]

        observations = []
        actions = []
        _advantages = []

        for i in range(12):
            seq_len = int(self.mask[i].sum())
            observations.append(self.observation[i, : seq_len])
            actions.append(self.actions[i, : seq_len])
            _advantages.append(np.ones(seq_len, dtype=np.float32) * advantages[i])
        
        observations = np.concatenate(observations)
        actions = np.concatenate(actions)
        _advantages = np.concatenate(_advantages)
        
        return observations, actions, _advantages

    def clear(self):
        self.round_id = 0
        self.step_id = 0
        self.mask[:] = 0

    def full(self):
        return self.round_id >= 12

class Framework(object):
    def __init__(self, ) -> None:
        self.lr = 1e-3
        features_size, action_size = 12, 6
        self.actor = Model(features_size, 64, action_size).to(device)
        self.optimizer = torch.optim.Adam(self.actor.parameters(),lr=self.lr)
        self.memory = Memory()

    def push(self, observation, action, reward, done=False):
        self.memory.push(observation, action, reward, done)
    
    def prepare_inputs(self, observation, actions, advantages, mask):

        observation = torch.FloatTensor(observation).to(device)
        actions = torch.LongTensor(actions).to(device)
        advantages = torch.FloatTensor(advantages).to(device)
        mask = torch.FloatTensor(mask).to(device)

        return observation, actions, advantages, mask

    def prepare_inputs2(self, observation, actions, advantages):

        observation = torch.FloatTensor(observation).to(device)
        actions = torch.LongTensor(actions).to(device)
        advantages = torch.FloatTensor(advantages).to(device)

        return observation, actions, advantages


    def train(self):
        if not self.memory.full():
            return
        self.actor.train()

        # 区分轨迹
        # obs, act, adv, mask = self.memory.load()
        # obs, act, adv, mask = self.prepare_inputs(obs, act, adv, mask)

        # B, S, C, H, W = obs.shape
        # obs = obs.reshape(B * S, C, H, W)
        # act_logp = self.actor(obs) # [B * S, action_dim]
        # act = act.reshape(-1, 1) # 
        # per_act_logp = torch.gather(act_logp, dim=1, index=act).reshape(B, S)
        # per_act_loss = torch.exp(per_act_logp - per_act_logp.detach()) * adv.unsqueeze(1)
        # loss = ((per_act_loss * mask).sum(dim=1) / mask.sum(dim=1)).mean()

        # 不区分轨迹
        obs, act, adv = self.memory.load2()
        obs, act, adv = self.prepare_inputs2(obs, act, adv)

        act_logp = self.actor(obs)
        act = act.reshape(-1, 1)
        per_act_logp = torch.gather(act_logp, dim=1, index=act) # [B]
        per_act_loss = -torch.exp(per_act_logp - per_act_logp.detach()) * adv
        loss = per_act_loss.mean()

        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        self.memory.clear()

        print('loss: ',loss.detach().item())

    def predict(self, observation):

        self.actor.eval()
        observation = torch.FloatTensor(observation).unsqueeze(0).to(device)

        with torch.no_grad():
            action = self.actor(observation).squeeze(0)

        return action.cpu().tolist() # 

    def save_model(self,model_path):
        state_dict = self.actor.state_dict()
        for key in state_dict:
            state_dict[key] = state_dict[key].data.cpu()

        torch.save(state_dict,model_path)

    def load_model(self,model_path):
        state_dict = torch.load(model_path)
        self.actor.load_state_dict(state_dict)
