import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence
from collections import defaultdict
from tqdm.auto import tqdm
import pandas as pd
from gensim.models import KeyedVectors
import gensim
import numpy
import jieba
import os
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from word2vec import Word2Vec
from log import Logger
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
import matplotlib.pyplot as plt

torch.cuda.set_device(0)
logger = Logger(filename="./lstm.log").get_logger()


def encode_label(data_list=None):
    """
    label encoder
        Example : [a,b,c,a] => [0,1,2,0]
    :param data_list: data list
    :return: labeled seq
    """
    encoder = LabelEncoder()
    # encoder.fit(["surprise", "neutral", "happy", "angry", "sad", "fear"])
    encoder.fit(["happy", "sad"])
    label_index = None
    if data_list is None:
        raise ValueError("Error : data_list is null")
    if not isinstance(data_list, list):
        label_index = encoder.transform(list(data_list))
    return label_index, encoder


def load_data(root, data_type) -> (pd.DataFrame, pd.DataFrame):
    """
    load raw data
        Example: df = load_data( root="./weibo", data_type="train" )
        will load raw data from "./weibo/usual_train_labeled.csv"
    :param root: data dir
    :param data_type: ( "train","eval","test")
    :return: pd.DataFrame
    """
    word2vec = Word2Vec()

    pth = os.path.join(root, "two_health_" + data_type + "_labeled.csv")
    _df = pd.read_csv(pth, encoding='utf-8')
    _content = _df['文本'].astype(str).map(word2vec.get_ids)
    label_index, encoder = encode_label(_df['情绪标签'].values)

    return _content, label_index


class LSTMDataset(Dataset):
    def __init__(self, root, data_type):
        self.contents, self.labels = load_data(root=root, data_type=data_type)

    def __len__(self):
        return len(self.contents)

    def __getitem__(self, i):
        return self.contents[i], self.labels[i]


def collate_fn(examples):
    lengths = torch.tensor([len(ex[0]) for ex in examples], dtype=torch.int64)
    inputs = [torch.tensor(ex[0]) for ex in examples]
    targets = torch.tensor([ex[1] for ex in examples], dtype=torch.long)
    # 对batch内的样本进行padding，使其具有相同长度
    inputs = pad_sequence(inputs, batch_first=True)
    return inputs, lengths, targets


class LSTM(nn.Module):
    def __init__(self, embedding_dim, hidden_dim, num_class):
        super(LSTM, self).__init__()
        word2vec = Word2Vec()
        vocab, weight = word2vec.get_vocab_weight()
        self.embeddings = nn.Embedding.from_pretrained(weight, freeze=False)
        self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True)
        self.output = nn.Linear(hidden_dim, num_class)


    def forward(self, inputs, lengths):
        embeddings = self.embeddings(inputs)
        x_pack = pack_padded_sequence(embeddings, lengths.cpu(), batch_first=True, enforce_sorted=False)
        hidden, (hn, cn) = self.lstm(x_pack)
        outputs = self.output(hn[-1])
        log_probs = F.log_softmax(outputs, dim=-1)
        _outputs = F.softmax(outputs, dim=-1)
        return log_probs, _outputs


def train(cfg):
    # setting device
    device = "cpu"
    if cfg["using_gpu"] >= 0 and torch.cuda.is_available():
        device = "cuda"
    device = torch.device(device)

    # prepare dataset
    train_dataset = LSTMDataset(root="./haodf", data_type="train")
    train_data_loader = DataLoader(train_dataset,
                                   batch_size=cfg["batch_size"],
                                   collate_fn=collate_fn,
                                   shuffle=True)

    eval_dataset = LSTMDataset(root="./haodf", data_type="eval")
    eval_data_loader = DataLoader(eval_dataset,
                                  batch_size=1,
                                  collate_fn=collate_fn,
                                  shuffle=False)

    # prepare LSTM model and optim
    model = LSTM(cfg["embedding_dim"], cfg["hidden_dim"], cfg["num_class"])
    model.to(device)

    nll_loss = nn.NLLLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    for epoch in range(cfg["epochs"]):
        total_loss = 0
        model.train()
        for batch in tqdm(train_data_loader, desc=f"Training Epoch {epoch}"):
            inputs, lengths, targets = [x.to(device) for x in batch]
            log_probs = model(inputs, lengths)
            loss = nll_loss(log_probs, targets)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
        print(f"Loss: {total_loss:.2f}")
        logger.info(f"Loss: {total_loss:.2f}")

        if (epoch + 1) % 5 == 0:
            model.eval()
            acc = 0
            for batch in tqdm(eval_data_loader, desc=f"Testing"):
                inputs, lengths, targets = [x.to(device) for x in batch]
                with torch.no_grad():
                    output = model(inputs, lengths)
                    acc += (output.argmax(dim=1) == targets).sum().item()
            print(f"Acc: {acc / len(eval_data_loader):.2f}")
            logger.info(f"Acc: {acc / len(eval_data_loader):.2f}")

    torch.save(model, "lstm_model.pth")

    # eval
    model = torch.load("lstm_model.pth", torch.device(device))
    model.eval()
    acc = 0
    for batch in tqdm(eval_data_loader, desc=f"Testing"):
        inputs, lengths, targets = [x.to(device) for x in batch]
        with torch.no_grad():
            output = model(inputs, lengths)
            acc += (output.argmax(dim=1) == targets).sum().item()
    print(f"Acc: {acc / len(eval_data_loader):.2f}")


# 评估数据
def predict(cfg):
    # setting device
    device = "cpu"
    if cfg["using_gpu"] >= 0 and torch.cuda.is_available():
        device = "cuda"
    device = torch.device(device)

    test_dataset = LSTMDataset(root="./haodf", data_type="eval")
    test_data_loader = DataLoader(test_dataset,
                                  batch_size=1,
                                  collate_fn=collate_fn,
                                  shuffle=False)

    # eval
    model = torch.load("lstm_model.pth", torch.device(device))
    model.eval()
    acc = 0

    all_targets = []
    all_output = []
    all_predicts = []
    for batch in tqdm(test_data_loader, desc=f"Testing"):
        inputs, lengths, targets = [x.to(device) for x in batch]
        with torch.no_grad():
            output, _o = model(inputs, lengths)
            acc += (output.argmax(dim=1) == targets).sum().item()
            all_targets.extend(targets.cpu().numpy().tolist())
            all_predicts.extend(output.argmax(dim=1).cpu().numpy().tolist())
            all_output.extend(_o.cpu().numpy().tolist())

    print(f"Acc: {acc / len(test_data_loader):.2f}")

    # print(all_targets, all_predicts, all_output)
    cm = confusion_matrix(all_targets, all_predicts)
    cm_display = ConfusionMatrixDisplay(cm).plot()
    plt.show()

    # 保存结果
    pth = os.path.join("./haodf", "two_health_" + "eval" + "_labeled.csv")
    _df = pd.read_csv(pth, encoding='utf-8')
    _df["predict"] = all_predicts
    _df["target"] = all_targets
    _df["detail"] = all_output
    _df.to_csv("./lstm_predict.csv")
    print("保存成功")


if __name__ == '__main__':
    args = {
        "using_gpu": True,
        "epochs": 10,
        "embedding_dim": 300,
        "hidden_dim": 100,
        "num_class": 2,
        "batch_size": 64
    }
    # train(args)
    predict(args)
