import os.path
from typing import Optional, List
from torch import Tensor, nn, optim, LongTensor
from torch.utils.data import DataLoader, TensorDataset
import pytorch_lightning as pl
from transformers import BatchEncoding, BertConfig

from transformers.models.bert import BertForSequenceClassification, BertTokenizer
from transformers.modeling_outputs import SequenceClassifierOutput

from src.data_reader import NLUCsvDataReader
from src.schema import TextClassificationInputExample
from src.config import Config, root_dir
from src.metric import MetricsReport


class ATISDataModule(pl.LightningDataModule):
    def __init__(self, config: Config):
        super(ATISDataModule, self).__init__()

        self.config = config
        self.data_reader = NLUCsvDataReader()
        self.tokenizer = BertTokenizer.from_pretrained(config.bert_name)

        self.train, self.val, self.test = [], [], []

        self.label2idx = {}
        label_file = os.path.join(root_dir, config.data_dir, 'labels.txt')
        if os.path.exists(label_file):
            with open(label_file, 'r', encoding='utf-8') as f:
                for index, line in enumerate(f.readlines()):
                    line = line.strip()
                    self.label2idx[line] = index
            self.config.n_labels = len(self.label2idx.keys())

    def setup(self, stage: Optional[str] = None) -> None:

        self.train = self.data_reader.read_intents(
            os.path.join(self.config.data_dir, 'train.csv')
        )
        self.val = self.data_reader.read_intents(
            os.path.join(self.config.data_dir, 'dev.csv')
        )
        self.test = self.data_reader.read_intents(
            os.path.join(self.config.data_dir, 'test.csv')
        )

    def convert_examples_to_features(self, examples: List[TextClassificationInputExample]):
        # 1. batch encoding sentence
        sentences = [example.raw_text for example in examples]
        result: BatchEncoding = self.tokenizer.batch_encode_plus(
            sentences,
            padding=True,
            max_length=self.config.max_length,
            return_tensors='pt',
            return_token_type_ids=True,
            return_attention_mask=True,
            return_special_tokens_mask=True
        )

        category_ids = [self.label2idx[example.category] for example in examples]
        category_tensor = LongTensor(category_ids)

        return result['input_ids'], result['attention_mask'], result['token_type_ids'], category_tensor

    def train_dataloader(self) -> DataLoader:
        features = self.convert_examples_to_features(self.train)
        dataset = TensorDataset(*features)
        return DataLoader(dataset=dataset, batch_size=self.config.train_batch_size)

    def val_dataloader(self) -> DataLoader:
        features = self.convert_examples_to_features(self.val)
        dataset = TensorDataset(*features)
        return DataLoader(dataset=dataset, batch_size=self.config.train_batch_size)

    def test_dataloader(self) -> DataLoader:
        features = self.convert_examples_to_features(self.test)
        dataset = TensorDataset(*features)
        return DataLoader(dataset=dataset, batch_size=self.config.train_batch_size)


class BertT(pl.LightningModule):
    def __init__(self, config: Config):
        super(BertT, self).__init__()

        self.config = config
        bert_config = BertConfig.from_pretrained(config.bert_name)
        bert_config.num_labels = config.n_labels
        self.bert = BertForSequenceClassification(bert_config)
        self.label_strings = [i for i in range(config.n_labels)]

    def forward(self, input_ids, token_type_ids, attention_mask):

        output: SequenceClassifierOutput = self.bert(
            input_ids,
            attention_mask,
            token_type_ids
        )
        return output.logits

    def get_metric_report(self, logits: Tensor, labels: Tensor) -> MetricsReport:
        predicted = logits.argmax(dim=-1)
        assert predicted.shape == labels.shape

        predicted = predicted.detach().cpu().numpy().tolist()
        labels = labels.detach().cpu().numpy().tolist()
        return MetricsReport.by_sequence(
            labels,
            predicted,
            self.label_strings
        )

    def training_step(self, batch, batch_index: int) -> Tensor:
        input_ids, token_type_ids, attention_mask, labels = batch
        output: SequenceClassifierOutput = self.bert(
            input_ids,
            attention_mask,
            token_type_ids,
            labels=labels
        )
        if batch_index % 10 == 0:
            metric_report = self.get_metric_report(output.logits, labels)
            self.log('train-recall', metric_report.recall, on_step=True, on_epoch=True)
            self.log('train-precision', metric_report.precision, on_step=True, on_epoch=True)
            self.log('train-acc', metric_report.accuracy, on_step=True, on_epoch=True)
            self.log('train-f1', metric_report.f1_score, on_step=True, on_epoch=True)
        return output.loss

    def validation_step(self, batch, batch_index: int) -> MetricsReport:
        input_ids, token_type_ids, attention_mask, labels = batch
        output: SequenceClassifierOutput = self.bert(
            input_ids,
            attention_mask,
            token_type_ids,
            labels=labels
        )
        metric_report = self.get_metric_report(output.logits, labels)
        return metric_report

    def validation_step_end(self, metric_report: MetricsReport):
        self.log('val-recall', metric_report.recall, on_step=True, on_epoch=True)
        self.log('val-precision', metric_report.precision, on_step=True, on_epoch=True)
        self.log('val-acc', metric_report.accuracy, on_step=True, on_epoch=True)
        self.log('val-f1', metric_report.f1_score, on_step=True, on_epoch=True)

    def configure_optimizers(self):
        optimizer = optim.AdamW(self.parameters(), lr=5e-5)
        return optimizer


def train():
    config = Config().parse_args(known_only=True)
    data_module = ATISDataModule(config)
    model = BertT(config)

    trainer = pl.Trainer(gpus=2, max_epochs=config.epochs, min_epochs=config.epochs-4)
    trainer.fit(model, data_module)


if __name__ == '__main__':
    train()
