import transformers
from transformers import BertModel, BertTokenizer, AdamW, get_linear_schedule_with_warmup
import torch
import unicodedata
import numpy as np
import pandas as pd
import seaborn as sns
from pylab import rcParams
import matplotlib.pyplot as plt
from matplotlib import rc
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
from collections import defaultdict
from textwrap import wrap

from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
from sklearn.metrics import f1_score
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from collections import Counter
import ast
import random
import json
from math import isnan

from transformers import BertTokenizer, BertModel
from configs import Config
import matplotlib
matplotlib.use('TkAgg')

PRE_TRAINED_MODEL_NAME=Config.PRE_TRAINED_MODEL_NAME
NUMBER_FACTOR = Config.NUMBER_FACTOR
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

def mask(df):
    df = df.reset_index(drop=True)
    df['verb_mask'] = 0
    df['A0_mask'] = 0
    df['A1_mask'] = 0
    df['verb_mask'] = df['verb_mask'].astype('object')
    df['A0_mask'] = df['A0_mask'].astype('object')
    df['A1_mask'] = df['A1_mask'].astype('object')
    for index, row in df.iterrows():

        df.at[index, 'stock_factors'] = [*map(float, df.loc[index, 'stock_factors'])]
        AV_num = 0
        for k, col in enumerate(['verb', 'A0', 'A1']):
            masks = []
            for j in range(len(row['verbA0A1'])):
                mask = np.zeros(299)
                idx = []
                for v in row['verbA0A1'][j][k]:
                    idx = idx + [int(i) for i in range(v[0], v[0] + v[1])]
                # idx = np.unique(idx).tolist()
                counter = Counter(idx)

                mask = [0 if counter[i] == 0 else 1 / len(counter) for i in range(0, len(mask))]
                mask.insert(0, 0)
                masks.append(mask)
            AV_num = len(masks)
            for i in range(10 - len(masks)):
                masks.append(np.zeros(300))
            while len(masks) > 10:
                masks.pop()
            name = col + '_mask'
            df.at[index, name] = np.array(masks)
        if AV_num > 10:
            AV_num = 10
        df.loc[index, 'AV_num'] = int(AV_num)
    df.AV_num = df.AV_num.astype('int')
    df.stock_factors = df.stock_factors.apply(np.array)
    return df


class GPReviewDataset(Dataset):

    def __init__(self, reviews, targets, verb, A0, A1, AV_num, tokenizer, stock_factors, max_len):
        self.reviews = reviews
        self.targets = targets
        self.stock_factors = stock_factors
        self.verb = verb
        self.A0 = A0
        self.A1 = A1
        self.AV_num = AV_num
        self.tokenizer = tokenizer
        self.max_len = max_len

    def __len__(self):
        return len(self.reviews)

    def __getitem__(self, item):
        review = str(self.reviews[item])
        target = self.targets[item]
        stock_factors = self.stock_factors[item]
        v = self.verb[item]
        a0 = self.A0[item]
        a1 = self.A1[item]
        av_num = self.AV_num[item]

        encoding = self.tokenizer.encode_plus(
            review,
            add_special_tokens=True,
            max_length=self.max_len,
            return_token_type_ids=False,
            padding='max_length',
            truncation=True,
            return_attention_mask=True,
            return_tensors='pt',
        )

        return {
            'review_text': review,
            'input_ids': encoding['input_ids'].flatten(),
            'attention_mask': encoding['attention_mask'].flatten(),
            'targets': torch.tensor(target, dtype=torch.long),
            'stock_factors': torch.tensor(stock_factors),
            'verb': torch.tensor(v),
            'A0': torch.tensor(a0),
            'A1': torch.tensor(a1),
            'AV_num': torch.tensor(av_num)
        }


def create_data_loader(df, tokenizer, max_len, batch_size):
    ds = GPReviewDataset(
        reviews=df.text_a.to_numpy(),
        targets=df.label.to_numpy(),
        stock_factors=df.stock_factors,
        verb=df.verb_mask,
        A0=df.A0_mask,
        A1=df.A1_mask,
        AV_num=df.AV_num,
        tokenizer=tokenizer,
        max_len=max_len
    )

    return DataLoader(
        ds,
        batch_size=batch_size,
        # num_workers=4,
        shuffle=True
    )


class SentimentClassifier(nn.Module):

    def __init__(self, n_classes):
        super(SentimentClassifier, self).__init__()
        self.bert = BertModel.from_pretrained(PRE_TRAINED_MODEL_NAME)
        self.encoder_layer = nn.TransformerEncoderLayer(d_model=2304 + NUMBER_FACTOR, nhead=1)
        self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=2)
        self.drop = nn.Dropout(p=0.1)
        # self.L1 = nn.Linear(self.bert.config.hidden_size*30, self.bert.config.hidden_size*3)
        self.out1 = nn.Linear((self.bert.config.hidden_size * 3 + NUMBER_FACTOR) * 10,
                              (self.bert.config.hidden_size * 3 + NUMBER_FACTOR) * 3)
        self.out = nn.Linear((self.bert.config.hidden_size * 3 + NUMBER_FACTOR) * 3, n_classes)
        self.linear_for_stock_factors = nn.Linear(NUMBER_FACTOR, NUMBER_FACTOR)
        self.flatten2 = nn.Flatten(2, -1)
        self.flatten = nn.Flatten(1, -1)
        self.relu = nn.ReLU()
        self.sig = nn.Sigmoid()
        self.Querry = nn.Linear(self.bert.config.hidden_size * 2, self.bert.config.hidden_size * 2)
        self.Key = nn.Linear(self.bert.config.hidden_size * 2, self.bert.config.hidden_size * 2)
        # self.Self_supervised = nn.Linear(self.bert.config.hidden_size,self.bert.config.hidden_size)

    def forward(self, input_ids, attention_mask, verb, A0, A1, stock_factors, AV_num):
        # get bert embedding
        hidden_state = self.bert(
            input_ids=input_ids,
            attention_mask=attention_mask
        )[0]
        batch = hidden_state.shape[0]
        verb_tmp = verb.clone()

        # stock_factor(8*24)
        stock_factors_batch = stock_factors[:, :NUMBER_FACTOR]
        stock_factors_batch = self.linear_for_stock_factors(stock_factors_batch.float())
        stock_factors_batch = self.relu(stock_factors_batch)
        stock_factors_batch = torch.unsqueeze(stock_factors_batch, 1)  # 8*1*2304
        stock_factors_batch = torch.cat(10 * [stock_factors_batch], 1)

        A0_tmp = A0.clone()
        A1_tmp = A1.clone()
        # mask verb
        AV_idx = []
        for idx, num in enumerate(AV_num):
            self_label = torch.randint(0, num, (1,))[0]
            AV_idx.append(self_label)
            A0_tmp[idx, self_label, :] = 0
            A1_tmp[idx, self_label, :] = 0

        # verb label
        AV_idx = torch.tensor(AV_idx).to(device)

        # get K(8*10*1536)
        A0_mask = torch.unsqueeze(A0, 3)
        A0_mask = torch.cat(768 * [A0_mask], 3)
        A0_mask = torch.mean(A0_mask * torch.unsqueeze(hidden_state, 1), 2, True)

        A1_mask = torch.unsqueeze(A1, 3)
        A1_mask = torch.cat(768 * [A1_mask], 3)
        A1_mask = torch.mean(A1_mask * torch.unsqueeze(hidden_state, 1), 2, True)
        K = self.Key(torch.squeeze(torch.cat([A0_mask, A1_mask], 3)).float())

        # get verb embedding after masking(8*10*1*768)
        V_mask = torch.unsqueeze(verb, 3)
        V_mask = torch.cat(768 * [V_mask], 3)
        V_mask = torch.mean(V_mask * torch.unsqueeze(hidden_state, 1), 2, True)
        transformer_input = V_mask

        # get A0 embedding(8*10*2*768)
        A0_mask_mask = torch.unsqueeze(A0_tmp, 3)
        A0_mask_mask = torch.cat(768 * [A0_mask_mask], 3)
        A0_mask_mask = torch.mean(A0_mask_mask * torch.unsqueeze(hidden_state, 1), 2, True)
        transformer_input = torch.cat([transformer_input, A0_mask_mask], 2)

        # get A1 embedding(8*10*3*768)
        A1_mask_mask = torch.unsqueeze(A1_tmp, 3)
        A1_mask_mask = torch.cat(768 * [A1_mask_mask], 3)
        A1_mask_mask = torch.mean(A1_mask_mask * torch.unsqueeze(hidden_state, 1), 2, True)
        transformer_input = torch.cat([transformer_input, A1_mask_mask], 2)

        # get transformer input(8*10*2304)
        transformer_input = self.flatten2(transformer_input.float())
        transformer_input = torch.cat([transformer_input, stock_factors_batch], 2)

        # turn to (11*8*2304)
        transformer_input = torch.stack([transformer_input[:, i, :] for i in range(0, len(verb[0]))])

        # get transformer output(11*8*2304)
        transformer_output = self.transformer_encoder(transformer_input)

        # turn to (8*11*2304)
        transformer_output = torch.stack([torch.squeeze(transformer_output[:, i, :]) for i in range(0, batch)])
        transformer_output = torch.squeeze(transformer_output)

        # turn to (8*11*2304)
        self_pred = torch.zeros((batch, 10))
        if transformer_output.dim() == 2:
            transformer_output = torch.unsqueeze(transformer_output, 0)

        # Q For CL
        for idx, i in enumerate(AV_idx):
            Q = torch.unsqueeze(self.Querry(transformer_output[idx, i, 768:2304]), 0)
            self_pred[idx] = (Q @ K[idx].T)

        # get transformer input for classification(8*10*3*768)
        transformer_input = torch.cat([V_mask, A0_mask, A1_mask], 2)

        # get transformer input(8*11*2304)
        transformer_input = self.flatten2(transformer_input.float())
        transformer_input = torch.cat([transformer_input, stock_factors_batch], 2)

        # get transformer output(11*8*2304)
        transformer_input = torch.stack([transformer_input[:, i, :] for i in range(0, len(transformer_input[0]))])
        transformer_output = self.transformer_encoder(transformer_input)

        # turn to (8*11*2304)
        transformer_output = torch.stack([torch.squeeze(transformer_output[:, i, :]) for i in range(0, batch)])
        transformer_output = torch.squeeze(transformer_output)

        # handle exception
        if transformer_output.dim() == 2:
            transformer_output = torch.unsqueeze(transformer_output, 0)

        output = self.flatten(transformer_output.float())
        output = self.sig(output)
        #     output = self.drop(output)
        output = self.out1(output)
        output = self.sig(output)
        output = self.drop(output)
        output = self.out(output)

        return output, self_pred, AV_idx

def train_epoch(
  model,
  data_loader,
  loss_fn,
  optimizer,
  device,
  scheduler,
  n_examples
):
  model = model.train()

  losses = []
  correct_predictions = 0
  correct_predictions_verbs = 0

  for d in data_loader:
    input_ids = d["input_ids"].to(device)
    attention_mask = d["attention_mask"].to(device)
    targets = d["targets"].to(device)
    stock_factors = d['stock_factors'].to(device)
    verb = d["verb"].to(device)
    A0 = d["A0"].to(device)
    A1 = d["A1"].to(device)
    AV_num = d["AV_num"].to(device)

    outputs,self_outputs,self_labels = model(
      input_ids=input_ids,
      attention_mask=attention_mask,
      verb = verb,
      A0 = A0,
      A1 = A1,
      stock_factors = stock_factors,
      AV_num = AV_num
    )
    self_outputs = self_outputs.to(device)
    self_labels = self_labels.to(device)



    if outputs.dim()==1:
      outputs = torch.unsqueeze(outputs,0)
    _, preds = torch.max(outputs, dim=1)
    _, self_preds = torch.max(self_outputs, dim=1)

    # print(targets.shape,outputs.shape)
    loss = 0.8*loss_fn(outputs, targets)+0.2*loss_fn(self_outputs,self_labels)

    correct_predictions += torch.sum(preds == targets)
    correct_predictions_verbs += torch.sum(self_preds == self_labels)
    losses.append(loss.item())

    loss.backward()
    nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
    optimizer.step()
    scheduler.step()
    optimizer.zero_grad()

  return correct_predictions.double() / n_examples, np.mean(losses),correct_predictions_verbs.double()/n_examples


def eval_model(model, data_loader, loss_fn, device, n_examples):
    model = model.eval()

    losses = []
    correct_predictions = 0

    with torch.no_grad():
        for d in data_loader:
            input_ids = d["input_ids"].to(device)
            attention_mask = d["attention_mask"].to(device)
            targets = d["targets"].to(device)
            stock_factors = d['stock_factors'].to(device)
            verb = d["verb"].to(device)
            A0 = d["A0"].to(device)
            A1 = d["A1"].to(device)
            AV_num = d["AV_num"].to(device)

            outputs = model(
                input_ids=input_ids,
                attention_mask=attention_mask,
                verb=verb,
                A0=A0,
                A1=A1,
                stock_factors=stock_factors,
                AV_num=AV_num
            )[0]

            if outputs.dim() == 1:
                outputs = torch.unsqueeze(outputs, 0)
            _, preds = torch.max(outputs, dim=1)

            loss = loss_fn(outputs, targets)

            correct_predictions += torch.sum(preds == targets)

            losses.append(loss.item())

    return correct_predictions.double() / n_examples, np.mean(losses)

