# coding=utf-8
# Copyright (C) xxx team - All Rights Reserved
#
# @Version:   3.9.4
# @Software:  PyCharm
# @FileName:  architecture.py
# @CTime:     2021/5/3 16:37   
# @Author:    Haiyang Yu
# @Email:     xxx
# @UTime:     2021/5/3 16:37
#
# @Description:
#     xxx
#     xxx
#
import codecs
import logging
from typing import List, Dict
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import pytorch_lightning as pl
from torchinfo import summary
# self
from models import CNN, CRF
from metrics import NerSentMetric, NerMetric

logger = logging.getLogger(__name__)


class Architecture(pl.LightningModule):
    def __init__(self, cfg):
        super(Architecture, self).__init__()
        self.save_hyperparameters(cfg)

        # config
        self.encoder = CNN(cfg)

        if cfg.decoder_method == 'crf':
            self.has_crf_layer = True
            self.crf = CRF(cfg.token_types, batch_first=True)
        else:
            self.has_crf_layer = False

        self.crf = CRF(num_tags=cfg.token_types, batch_first=True)

        self.example_input_array = [dummy_x, dummpy_cgr]
        summary(self.encoder, input_data=[dummy_x, dummpy_cgr],
                row_settings=['ascii_only', 'depth', 'var_names'],
                col_names=['input_size', 'output_size', 'num_params', 'mult_adds', 'trainable'],
                verbose=1,
                depth=5,
                )

        self.train_acc = NerSentMetric()
        self.valid_acc = NerSentMetric()
        self.test_acc = NerSentMetric()

    def get_progress_bar_dict(self):
        # don't show the version number
        items = super().get_progress_bar_dict()
        items.pop("v_num", None)
        return items

    def forward(self, batch):
        x, x_mask = batch
        x_logit = self.encoder(x, x_mask)
        return self.crf.decode(x_logit), self.crf.transitions.detach().numpy().round(2)


    def main_loop(self, batch, stage):
        x, x_mask, x_tag = batch

        x_pred = self.encoder(x, x_mask)
        if self.has_crf_layer:
            loss = -self.crf(tag_logits, tags, x_mask, reduction='mean')
            x_pred = torch.tensor(self.crf.decode(tag_logits, x_mask)).type_as(x)
        else:
            active_logits = x_pred.view(-1, x_pred.size(-1))[torch.eq(x_mask.view(-1), 1)]
            active_tags = x_tag.view(-1)[torch.eq(x_mask.view(-1), 1)]
            loss = F.cross_entropy(active_logits, active_tags)
            x_pred = torch.argmax(x_pred, dim=-1) * x_mask


        x_logit = self.encoder(x, x_mask)
        loss = -self.crf(x_logit, x_tag, x_mask.bool())
        x_pred = torch.tensor(self.crf.decode(x_logit)).type_as(x)
        # active_logits = x_pred.view(-1, x_pred.size(-1))[torch.eq(x_mask.view(-1), 1)]
        # active_tags = x_tag.view(-1)[torch.eq(x_mask.view(-1), 1)]
        # loss = F.cross_entropy(active_logits, active_tags)

        getattr(self, stage + '_acc')(x_pred, x_tag)
        metrics = {stage + '_loss': loss,
                   stage + '_acc': getattr(self, stage + '_acc')}
        return loss, metrics

    def training_step(self, batch, batch_idx):
        loss, metrics = self.main_loop(batch, 'train')
        self.log_dict(metrics, on_step=True, on_epoch=False, logger=True)
        return loss

    def validation_step(self, batch, batch_idx):
        loss, metrics = self.main_loop(batch, 'valid')
        self.log_dict(metrics, on_step=False, on_epoch=True, logger=True)
        return loss

    def test_step(self, batch, batch_idx):
        loss, metrics = self.main_loop(batch, 'test')
        self.log_dict(metrics, on_step=False, on_epoch=True, logger=True)
        return loss

    def configure_optimizers(self):
        optimizer = getattr(optim, self.optimizer)(self.encoder.parameters(), lr=self.lr)
        lr_scheduler = {
            'scheduler': getattr(optim.lr_scheduler, self.lr_scheduler_name)(
                optimizer=optimizer,
                mode=self.lr_scheduler_mode,
                factor=self.lr_scheduler_factor,
                patience=self.lr_scheduler_patience,
                verbose=True,
            ),
            'interval': self.lr_scheduler_interval,
            'monitor': self.lr_scheduler_monitor,
        }
        return [optimizer], [lr_scheduler]


if __name__ == '__main__':
    class Config(object):
        learning_rate = 1e-1


    config = Config()
    model = Architecture(config)
