import random
import unittest

import pandas
import torch
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.utilities.types import (
    OptimizerLRScheduler,
    STEP_OUTPUT,
    TRAIN_DATALOADERS,
    EVAL_DATALOADERS,
)
from sklearn.metrics.pairwise import cosine_similarity
from torch.utils.data import Dataset, DataLoader
from torchmetrics.functional import f1_score, precision, recall, accuracy
from transformers import BertTokenizer, BertModel, BertForSequenceClassification

from log import LOGGER, WORK_DIR
from text_classifier import EmbeddingGenerator


class EmbeddingGeneratorByBert(EmbeddingGenerator):
    BERT_MODEL_PATH = "/home/uniquelip/ai_models/bert-base-uncased"

    def __init__(self):
        random_seed = 42
        random.seed(random_seed)
        torch.manual_seed(random_seed)

        if torch.cuda.is_available():
            torch.cuda.manual_seed_all(random_seed)

        self.tokenizer = BertTokenizer.from_pretrained(self.BERT_MODEL_PATH)
        self.model = BertModel.from_pretrained(self.BERT_MODEL_PATH)

    def gen_sentence_embedding(self, text_or_list, debug=False):
        encoding = self.tokenizer.batch_encode_plus(
            [text_or_list] if isinstance(text_or_list, str) else text_or_list,
            max_length=50,
            padding=True,
            truncation=True,
            return_tensors="pt",
            add_special_tokens=True,
        )
        input_ids = encoding["input_ids"]
        attention_mask = encoding["attention_mask"]

        if debug:
            self.print_tokens(input_ids)

        with torch.no_grad():
            outputs = self.model(input_ids, attention_mask=attention_mask)
            word_embeddings = outputs.pooler_output
            LOGGER.info(word_embeddings.shape)
            return word_embeddings

    def print_tokens(self, input_ids):
        for seq_ids in input_ids:
            # skip_special_tokens will skip [CLS] [SEP] tokens
            decoded_txt = self.tokenizer.decode(seq_ids, skip_special_tokens=True)
            LOGGER.info(decoded_txt)
            tokenized_text = self.tokenizer.tokenize(decoded_txt)
            LOGGER.info(
                "total %d tokens, tokens:%s" % (len(tokenized_text), tokenized_text)
            )


EMBEDDING_GENERATOR = EmbeddingGeneratorByBert()


def load_raw_data(path):
    df = pandas.read_table(
        path, delimiter=":", engine="python", header=None, names=["text", "label"]
    )
    return [(k, v) for k, v in df.values]


def load_train_data():
    return load_raw_data("%s/resources/training/train.txt" % WORK_DIR)


def load_test_data():
    return load_raw_data("%s/resources/training/test.txt" % WORK_DIR)


class CustomDataset(Dataset):
    def __init__(self, docs, tokenizer, max_len):
        self.tokenizer = tokenizer
        self.docs = docs
        self.max_len = max_len

    def __len__(self):
        return len(self.docs)

    def __getitem__(self, index):
        text = self.docs[index][0]
        label = self.docs[index][1]

        ids, masks = self.tokenize_text(text)
        return {"ids": ids, "masks": masks, "labels": label}

    def tokenize_text(self, text: str):
        encode_dict = self.tokenizer.encode_plus(
            text,
            add_special_tokens=True,
            truncation=True,
            max_length=self.max_len,
            padding="max_length",
            return_attention_mask=True,
            return_tensors="pt",
        )
        ids = encode_dict["input_ids"]
        masks = encode_dict["attention_mask"]

        ids = ids.squeeze(0)
        masks = masks.squeeze(0)
        return ids, masks


class ClassifyModule(LightningModule):
    def __init__(self, train_params):
        super().__init__()
        max_len = 50
        bert_tokenizer = BertTokenizer.from_pretrained(
            EmbeddingGeneratorByBert.BERT_MODEL_PATH
        )
        self.train_dataset = CustomDataset(load_train_data(), bert_tokenizer, max_len)
        self.test_dataset = CustomDataset(load_test_data(), bert_tokenizer, max_len)
        self.model = BertForSequenceClassification.from_pretrained(
            EmbeddingGeneratorByBert.BERT_MODEL_PATH,
            num_labels=2,
            output_attentions=False,
            output_hidden_states=False,
        )

        self.batch_size = train_params["batch_size"]
        self.learning_rate = train_params["lr"]
        self.gamma = train_params["gamma"]

    def forward(self, batch):
        input_ids, masks, labels = (batch["ids"], batch["masks"], batch["labels"])
        loss, logits = self.model(
            input_ids,
            attention_mask=masks,
            labels=labels,
            token_type_ids=None,
            return_dict=False,
        )
        return loss, logits

    def configure_optimizers(self) -> OptimizerLRScheduler:
        optimizer = torch.optim.AdamW(self.model.parameters(), lr=self.learning_rate)
        scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, self.gamma)
        return [optimizer], [scheduler]

    def training_step(self, batch, batch_idx) -> STEP_OUTPUT:
        loss, logits = self.forward(batch)

        preds = torch.argmax(logits, dim=1).flatten()
        labels = batch["labels"].flatten()

        f1 = f1_score(preds, labels, task="binary", average="macro")

        self.log("loss", loss)
        self.log("train_f1", f1)

        return loss

    def train_dataloader(self) -> TRAIN_DATALOADERS:
        return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=True)

    def test_dataloader(self) -> EVAL_DATALOADERS:
        return DataLoader(self.test_dataset, batch_size=self.batch_size, shuffle=False)


TRAIN_PARAMS = {"batch_size": 32, "lr": 3e-5, "gamma": 0.8}

TRAIN_MODEL_NAME = "sample_model"


def train():
    mod = ClassifyModule(TRAIN_PARAMS)
    checkpoint = ModelCheckpoint(
        monitor="train_f1",
        dirpath="%s/resources/bert_model" % WORK_DIR,
        mode="max",
        filename=TRAIN_MODEL_NAME,
    )

    trainer = Trainer(max_epochs=2, log_every_n_steps=5, callbacks=[checkpoint])
    trainer.fit(mod)


def test():
    mod = ClassifyModule.load_from_checkpoint(
        "%s/resources/bert_model/%s.ckpt" % (WORK_DIR, TRAIN_MODEL_NAME),
        train_params=TRAIN_PARAMS,
    )

    total_preds = []
    total_labels = []
    for batch in mod.test_dataloader():
        y = mod.model(batch["ids"], token_type_ids=None, attention_mask=batch["masks"])
        logits = y[0]
        preds = torch.argmax(logits, dim=1)
        total_preds.extend(preds.tolist())
        total_labels.extend(batch["labels"].tolist())

    t_preds = torch.tensor(total_preds)
    t_labels = torch.tensor(total_labels)
    print(
        "f1:%.2f"
        % f1_score(
            t_preds,
            t_labels,
            task="binary",
        )
    )

    print("precision:%.2f" % precision(t_preds, t_labels, task="binary"))
    print("recall:%.2f" % recall(t_preds, t_labels, task="binary"))
    print("accuracy:%.2f" % accuracy(t_preds, t_labels, task="binary"))
    return total_preds


class BertTest(unittest.TestCase):
    @unittest.skip
    def test(self):
        v1 = EMBEDDING_GENERATOR.gen_sentence_embedding(
            "GeeksForGeeks is a computer science portal", True
        )
        v2 = EMBEDDING_GENERATOR.gen_sentence_embedding(
            "GeeksForGeeks is a technology website", True
        )
        print(cosine_similarity(v1, v2))

    @unittest.skip
    def test_train(self):
        train()

    def test_test(self):
        test()


if __name__ == "__main__":
    unittest.main()
