import fitlog
import torch.nn as nn
from .BaseRumorFramework import SubjEnhancedFramework
import gym
from gym import spaces
import random
import numpy as np
import nltk
from nltk import WordNetLemmatizer
import torch
from torch.autograd import Variable
import matplotlib.pyplot as plt
from .model_utils import mean_std_groups
import torch.nn.functional as Fnn
from tensorboardX import SummaryWriter


class SeqRumorDetecEnv(gym.Env):
    """
    state = (repost, hidden, label, event_type)
    action = (0, 1), 0:continue observe, 1:rumor-alarm
    reward = {-epsilon, -c_i*N, c_i*M}
    """
    metadata = {
        'render.modes': ['human', 'rgb_array'],
        'video.frames_per_second': 2
    }

    def __init__(self, dataset, num_worker=1,
                 hidden_size=300, epsilon=0.1, N=1.0, M=1.0):
        self.lemmatizer = WordNetLemmatizer()
        self.data = dataset.data
        self.data_ID = dataset.data_ID
        self.data_y = np.array(dataset.data_y).argmax(axis=1)
        self.data_len = dataset.data_len
        self.event_indexs = list(range(len(self.data_ID)))

        self.num_worker = num_worker
        self.hidden_size = hidden_size
        self.epsilon = epsilon
        self.N_reward = N
        self.M_punishment = M

        self.action_space = spaces.Discrete(2)
        self.observation_space = None

        idxs = random.sample(self.event_indexs, self.num_worker)
        self.event_ID = [self.data_ID[idx] for idx in idxs]
        self.reposts = [ [" ".join(self.lemma(
                                 self.data[event_ID]['text'][j]))
                        for j in range(self.data_len[idx])]
            for idx, event_ID in zip(idxs, self.event_ID)
        ]
        self.seq_length = [len(rep) for rep in self.reposts]
        self.weak_label = [int(self.data_y[idx]) for idx in idxs]
        self.event_type = [self.data[event_ID]['topic_label'] for event_ID in self.event_ID]
        self.cur_idx = [0]*self.num_worker
        self.c_arr = [1.0/(self.data_len[idx]-
                          np.arange(self.data_len[idx], dtype=np.float32)) for idx in idxs]
        self.hidden = torch.zeros([1, self.num_worker, self.hidden_size])
        self.state = ([self.reposts[ID_idx][cur_idx] for ID_idx, cur_idx in enumerate(self.cur_idx)],
                      self.hidden,
                      self.weak_label,
                      self.event_type)


    def lemma(self, word_tokens):
        tags = nltk.pos_tag(word_tokens)
        new_words = []
        for pair in tags:
            if pair[1].startswith('J'):
                new_words.append(self.lemmatizer.lemmatize(pair[0], 'a'))
            elif pair[1].startswith('V'):
                new_words.append(self.lemmatizer.lemmatize(pair[0], 'v'))
            elif pair[1].startswith('N'):
                new_words.append(self.lemmatizer.lemmatize(pair[0], 'n'))
            elif pair[1].startswith('R'):
                new_words.append(self.lemmatizer.lemmatize(pair[0], 'r'))
            else:
                new_words.append(pair[0])
        return new_words

    def step(self, action, hidden):
        """

        :param action:
        :type action:
        :param hidden:
        :type hidden:
        :return:
        :rtype:
        step process:
            1. generate rewards based on the action at t-1
            2. jump to the state t
            3. generate text at t
        """
        reward = np.ones([self.num_worker])*-1*self.epsilon
        done = [False]*self.num_worker
        self.hidden = hidden
        for i in range(self.num_worker):
            if int(action[i]) == 0: # continue
                pass
            elif int(action[i]) == self.weak_label[i] and self.weak_label[i]==1:
                reward[i] = self.c_arr[i][self.cur_idx[i]]*self.N_reward
            elif int(action[i]) != self.weak_label[i] and self.weak_label[i]==0:
                reward[i] = self.c_arr[i][self.cur_idx[i]] * self.N_reward
            else:
                print("\n ######Warning!!! action:%d, label:%d"%(int(action[i]),
                                                                 self.weak_label[i]))
            if self.cur_idx[i] >= self.seq_length[i] - 1:
                done[i] = True
            else:
                self.cur_idx[i] += 1
        self.state = ([self.reposts[ID_idx][cur_idx] for ID_idx, cur_idx in enumerate(self.cur_idx)],
                      self.hidden,
                      self.weak_label,
                      self.event_type)
        return self.state, reward, done, {}

    def reset(self):
        idxs = random.sample(self.event_indexs, self.num_worker)
        self.event_ID = [self.data_ID[idx] for idx in idxs]
        self.reposts = [ [" ".join(self.lemma(
                                 self.data[event_ID]['text'][j]))
                        for j in range(self.data_len[idx])]
            for idx, event_ID in zip(idxs, self.event_ID)
        ]
        self.seq_length = [len(rep) for rep in self.reposts]
        self.weak_label = [int(self.data_y[idx]) for idx in idxs]
        self.event_type = [self.data[event_ID]['topic_label'] for event_ID in self.event_ID]
        self.cur_idx = [0]*self.num_worker
        self.c_arr = [1.0/(self.data_len[idx]-
                          np.arange(self.data_len[idx], dtype=np.float32)) for idx in idxs]
        self.hidden = torch.zeros([1, self.num_worker, self.hidden_size])
        self.state = ([self.reposts[ID_idx][cur_idx] for ID_idx, cur_idx in enumerate(self.cur_idx)],
                      self.hidden,
                      self.weak_label,
                      self.event_type)
        return self.state

    def render(self, mode='human'):
        return None

    def close(self):
        return None


class ReinforceRumorDetection(SubjEnhancedFramework):
    def __init__(self, sentvec1, sentvec2, prop_model, actor, critic
                 ):
        super(ReinforceRumorDetection, self).__init__(sentvec1, sentvec2, prop_model, actor)
        self.critic = critic.to(self.device)

    def ActAndCritic(self, sents, hiddens):
        sent_vecs_1 = self.sent2vec(sents)  # sents: [s_1, s'_1, ..., s''_1 ]
        sent_vecs_2 = self.subj_trainer.sent2vec(sents)
        sent_vecs = sent_vecs_1 + sent_vecs_2  # [num_workers, sent_vec_dim]
        _, df_last_state = self.prop_model.prop_cell(sent_vecs.unsqueeze(1), hiddens.to(self.device))
        preds = self.rdm_cls(df_last_state[-1, :, :])
        values = self.critic(df_last_state[-1, :, :])
        return preds, values, df_last_state.detach()

    def sample_trajectories(self, num_worker, rollout_steps, cuda):
        pass

    def RewardErrorBar(self, total_steps_plt, ep_reward_plt, plot_group_size):
        x_means, _, y_means, y_stds = mean_std_groups(np.array(total_steps_plt),
                                                      np.array(ep_reward_plt), plot_group_size)
        # fig = plt.figure()
        # fig.set_size_inches(8, 6)
        # plt.ticklabel_format(axis='x', style='sci', scilimits=(-2, 6))
        plt.clf()
        plt.errorbar(x_means, y_means, yerr=y_stds, ecolor='xkcd:blue', fmt='xkcd:black', capsize=5,
                     elinewidth=1.5, mew=1.5, linewidth=1.5)
        plt.title('Training progress (Rumor Detection)')
        plt.xlabel('Total steps')
        plt.ylabel('Episode reward')
        plt.savefig('ep_reward.png', dpi=200)
        #                     plt.show()

    def Log_Validation(self, writer, val_rst, te_rst, step_cur):
        """
        :param writer:
        :type writer:SummaryWriter
        :param val_rst: val_acc, val_loss, val_prec, val_recall, val_f1
        :type val_rst: list
        :param te_rst: te_acc, te_loss, te_prec, te_recall, te_f1
        :type te_rst: list
        :param step_cur: current step
        :type step_cur: int
        :return:
        :rtype:
        """
        print("========step_cur:%3d=============="%step_cur)
        print("val_acc:%3.4f, val_loss:%3.4f, val_prec:%3.4f, val_recall:%3.4f, val_f1:%3.4f" % val_rst)
        print("te_acc:%3.4f, te_loss:%3.4f, te_prec:%3.4f, te_recall:%3.4f, te_f1:%3.4f" % te_rst)
        writer.add_scalar("val_acc", val_rst[0], step_cur)
        writer.add_scalar("val_prec", val_rst[0], step_cur)
        writer.add_scalar("val_recall", val_rst[0], step_cur)
        writer.add_scalar("val_f1", val_rst[0], step_cur)
        writer.add_scalar("te_acc", te_rst[0], step_cur)
        writer.add_scalar("te_prec", te_rst[0], step_cur)
        writer.add_scalar("te_recall", te_rst[0], step_cur)
        writer.add_scalar("te_f1", te_rst[0], step_cur)

    def ACTrain(self, env, dev_loader, te_loader,  plot_reward, total_step,
                gamma=0.5, lambd=1.0, lr_discount=1.0, plot_group_size=80,
                value_coeff=0.5, entropy_coeff=0.01, grad_norm_limit=40,
                log_dir="../reinforce_logs/", log_suffix="_ReinforceRD", model_file=""
                ):
        writer = SummaryWriter(log_dir, filename_suffix=log_suffix)
        if plot_reward:
            total_steps_plt = []
            ep_reward_plt = []
            plt.ion()

        num_worker = env.num_worker
        step_cur = 0
        cuda = torch

        optimizer = torch.optim.Adam([
            {'params': self.sent2vec.parameters(), 'lr': 3e-4 * lr_discount / self.grad_accum_cnt},
            {'params': self.prop_model.parameters(), 'lr': 3e-4 * lr_discount / self.grad_accum_cnt},
            {'params': self.rdm_cls.parameters(), 'lr': 3e-4 * lr_discount / self.grad_accum_cnt},
            {'params': self.critic.parameters(), 'lr': 3e-4 * lr_discount / self.grad_accum_cnt},
            {'params': self.subj_trainer.sent2vec.parameters(), 'lr': 3e-4 * lr_discount / self.grad_accum_cnt},
            {'params': self.subj_trainer.senti_cls.parameters(), 'lr': 3e-4 * lr_discount / self.grad_accum_cnt},
        ]
        )
        while step_cur < total_step:
            steps = []
            ep_rewards = np.zeros([num_worker])
            obs = env.reset()
            rollout_steps = min(env.seq_length)
            for _ in range(rollout_steps):
                policies, values, hiddens = self.ActAndCritic(obs[0], obs[1])
                probs = policies.softmax(dim=1)
                actions = probs.multinomial(1).data
                obs, rewards, dones, _ = env.step(actions.cpu().numpy(), hiddens.detach())
                ep_rewards += rewards
                masks = (1. - torch.from_numpy(np.array(dones, dtype=np.float32))).unsqueeze(1)
                if cuda: masks = masks.cuda()
                rewards = torch.from_numpy(rewards).float().unsqueeze(1)
                if cuda: rewards = rewards.cuda()
                steps.append((rewards, masks, actions, policies, values))
            step_cur += 1  # total reward of the traces
            if plot_reward:
                total_steps_plt.append(step_cur)
                ep_reward_plt.append(ep_rewards.mean())
                writer.add_scalar("train_reward:", ep_reward_plt[-1], step_cur)
                if step_cur%100 == 0 :
                    val_rst = self.valid(dev_loader, all_metrics=True) # all_metrics: val_acc, val_loss, val_prec, val_recall, val_f1
                    te_rst = self.valid(te_loader, all_metrics=True)
                    self.Log_Validation(writer, val_rst, te_rst, step_cur)
                    print("ep_reward_plt:", ep_reward_plt[-1])
                    self.RewardErrorBar(total_steps_plt, ep_reward_plt, plot_group_size)

            actions, policies, values, returns, advantages = self.process_rollout(steps, gamma, num_worker, lambd, cuda)
            probs = Fnn.softmax(policies)
            log_probs = Fnn.log_softmax(policies)
            log_action_probs = log_probs.gather(1, Variable(actions))
            policy_loss = (-log_action_probs * Variable(advantages)).sum()
            value_loss = (.5 * (values - Variable(returns)) ** 2.).sum()
            entropy_loss = (log_probs * probs).sum()

            loss = policy_loss + value_loss * value_coeff + entropy_loss * entropy_coeff
            loss.backward()

            nn.utils.clip_grad_norm(self.parameters(), grad_norm_limit)
            optimizer.step()
            optimizer.zero_grad()
        env.close()

    def process_rollout(self, steps, gamma, num_workers, lambd, cuda):
        # bootstrap discounted returns with final value estimates
        _, _, _, _, last_values = steps[-1]
        returns = last_values.data

        advantages = torch.zeros(num_workers, 1)
        if cuda: advantages = advantages.cuda()

        out = [None] * (len(steps) - 1)

        # run Generalized Advantage Estimation, calculate returns, advantages
        for t in reversed(range(len(steps) - 1)):
            rewards, masks, actions, policies, values = steps[t]
            _, _, _, _, next_values = steps[t + 1]

            returns = rewards + returns * gamma * masks

            deltas = rewards + next_values.data * gamma * masks - values.data
            advantages = advantages * gamma * lambd * masks + deltas

            out[t] = actions, policies, values, returns, advantages

        # return data as batched Tensors, Variables
        return map(lambda x: torch.cat(x, 0), zip(*out))
