import itertools

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal


class TransitionDistribution(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
        super(TransitionDistribution, self).__init__()
        layers = []
        layers.append(nn.Linear(input_dim, hidden_dim))
        layers.append(nn.LeakyReLU())
        for i in range(num_layers):
            layers.append(nn.Linear(hidden_dim, hidden_dim))
            layers.append(nn.LeakyReLU())
        layers.append(nn.Linear(hidden_dim, output_dim))
        self.net = nn.Sequential(*layers)
        self.mean = nn.Linear(output_dim, 1)
        self.std = nn.Linear(output_dim, 1)

    def forward(self, data):
        x = self.net(data)
        return Normal(self.mean(x), scale=torch.sigmoid(self.std(x)))

class DynamicModel(nn.Module):
    def __init__(self, state_dim, action_dim, learning_rate=1e-3, device=None):
        super(DynamicModel, self).__init__()
        self.state_dim = state_dim
        self.action_dim = action_dim
        self.net = nn.ModuleList()
        for i in range(state_dim):
            self.net.append(TransitionDistribution(state_dim+action_dim, 256, 10, 3).to(device))

        self.reward_net = nn.Sequential(
            nn.Linear(state_dim + action_dim, 64),
            nn.LeakyReLU(),
            nn.Linear(64, 1)
        )
        self.device = device
        self.dynamic_optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate, weight_decay=0.01)
        self.reward_optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate, weight_decay=0.01)
        self.causal_graph = None

    def forward(self, state_action, causal_graph=None):
        assert causal_graph is not None, print("因果图为空")
        if isinstance(causal_graph, np.ndarray):
            causal_graph = torch.from_numpy(causal_graph).float().to(state_action.device)
        next_state = torch.empty((state_action.shape[0], self.state_dim), device=state_action.device)
        for i in range(self.state_dim):
            sa_i = torch.mul(state_action, causal_graph[:, i])
            sa_i_normal = self.net[i](sa_i)
            next_state[:, i] = sa_i_normal.rsample().flatten()

        reward = self.reward_net(state_action)

        return reward, next_state

    def loss(self, pre_state, real_state, pre_reward, real_reward):
        """
        :param pre_state: 预测的相邻时刻状态的差值
        :param real_state: 真实的相邻时刻状态的差值
        :param pre_reward: 预测的下一个时刻奖励
        :param real_reward: 真实的下一个时刻奖励
        :return:
        """
        total_loss = F.mse_loss(pre_state, real_state) + F.mse_loss(pre_reward, real_reward)
        return total_loss

    def update(self, loss):
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

    def learn(self, state, action, next_state, reward, batch_size=128, holdout_ratio=0.1, max_iter=20,
              causal_graph=None):

        if self.causal_graph is None and causal_graph is not None:
            self.causal_graph = torch.from_numpy(causal_graph).to(self.device)

        # 设置训练集与验证集
        permutation = np.random.permutation(state.shape[0])
        # 打乱数据集
        state, action, next_state, reward = state[permutation], action[permutation], next_state[permutation], reward[
            permutation]
        # 验证集大小
        num_holdout = int(state.shape[0] * holdout_ratio)
        # 训练集
        train_state, train_action, train_next_state, train_reward = state[num_holdout:], action[num_holdout:], \
                                            next_state[num_holdout:], reward[num_holdout:]
        # 验证集

        holdout_state, holdout_action, holdout_next_state, holdout_reward = state[:num_holdout], action[:num_holdout],\
                                                                next_state[:num_holdout], reward[:num_holdout]
        holdout_state = torch.from_numpy(holdout_state).float().to(self.device)
        holdout_action = torch.from_numpy(holdout_action).float().to(self.device)
        holdout_next_state = torch.from_numpy(holdout_next_state).float().to(self.device)
        holdout_reward = torch.from_numpy(holdout_reward).float().to(self.device)
        train_state = torch.from_numpy(train_state).float().to(self.device)
        train_action = torch.from_numpy(train_action).float().to(self.device)
        train_next_state = torch.from_numpy(train_next_state).float().to(self.device)
        train_reward = torch.from_numpy(train_reward).float().to(self.device)
        # 保留最好的结果
        self._snapshots = (None, 1e10)

        for epoch in itertools.count():
            # 所有真实数据都用来训练
            for batch_start_pos in range(0, train_state.shape[0], batch_size):
                reward, next_state = self.forward(torch.cat((train_state, train_action), dim=-1), causal_graph)
                # loss = self.loss(next_state, train_next_state, reward, train_reward)
                reward_loss = F.mse_loss(reward, train_reward)
                state_loss = F.mse_loss(next_state, train_next_state)
                # print("epoch:{}, reward loss:{}, state_loss:{}".format(epoch, reward_loss, state_loss))
                self.dynamic_optimizer.zero_grad()
                self.reward_optimizer.zero_grad()
                reward_loss.backward()
                state_loss.backward()
                self.dynamic_optimizer.step()
                self.reward_optimizer.step()


            # 训练完后，看看相比于之前的训练，效果是否有提升
            with torch.no_grad():
                reward, next_state = self.forward(torch.cat((holdout_state, holdout_action), dim=-1), causal_graph)
                # holdout_losses = self.model.loss(next_state, holdout_next_state, reward, holdout_reward,is_sum=False)
                holdout_losses = F.mse_loss(next_state, holdout_next_state) + F.mse_loss(reward, holdout_reward)
                holdout_losses = holdout_losses.cpu()
                # 看看是否还有提升,5次没有提升就结束训练
                break_condition = self._save_best(epoch, holdout_losses)
                if break_condition or epoch > max_iter:  # 结束训练
                    break

    def _save_best(self, epoch, losses, threshold=0.2):
        updated = False
        _, best = self._snapshots
        improvement = (best - losses) / best
        if improvement > threshold:
            self._snapshots = (epoch, losses)
            updated = True
        self._epoch_since_last_update = 0 if updated else self._epoch_since_last_update + 1
        return self._epoch_since_last_update > 5

    def predict(self, state, action, causal_graph=None):
        inputs = torch.cat((state, action), dim=-1).float().to(self.device)
        if isinstance(inputs, np.ndarray):
            inputs = torch.from_numpy(inputs).float().to(self.device)
        return self.forward(inputs, causal_graph)
