import paddle
import paddle.nn.functional as F

from ..modules import *
from .base_model import BaseModel, FlatBaseModel
from ..loss import CrossEntropy
from ..utils import mask_select
from paddlenlp.layers.crf import LinearChainCrf, LinearChainCrfLoss, ViterbiDecoder


class TokenClassification(BaseModel):
    def __init__(self, args):
        super().__init__(args, 'seq')
        hidden_size = self.transformer.config['hidden_size']
        self.linear = Linear(hidden_size, args.n_class, args.m_drop)
        self.loss = CrossEntropy(args.eps)

    def forward(self, input_ids, **kwargs):
        logits = self.forward_transformer(input_ids)
        logits = self.linear(logits)
        if not self.training:
            logits = logits.argmax(-1)
        return logits

    def compute_loss(self, forward_out, batch):
        return self.loss(forward_out, batch['labels'], batch['attention_mask'])


class CRFTokenClassification(BaseModel):
    def __init__(self, args):
        super().__init__(args, 'seq')
        hidden_size = self.transformer.config['hidden_size']
        self.linear = Linear(hidden_size, args.n_class, args.m_drop)
        self.crf = LinearChainCrf(args.n_class, crf_lr=100, with_start_stop_tag=False)
        self.loss = LinearChainCrfLoss(self.crf)
        self.viterbi_decoder = ViterbiDecoder(self.crf.transitions, False)

    def forward(self, input_ids, **kwargs):
        logits = self.forward_transformer(input_ids, )
        logits = self.linear(logits)
        if not self.training:
            attention_mask = kwargs['attention_mask']
            _, pred_path = self.viterbi_decoder(logits, attention_mask.sum(1))
            return pred_path
        return logits

    def compute_loss(self, forward_out, batch):
        lengths = batch['attention_mask'].sum(1)
        return self.loss(forward_out, lengths, batch['labels']).mean()


class SpanTokenClassification(BaseModel):
    def __init__(self, args):
        super().__init__(args, 'seq')
        hidden_size = self.transformer.config['hidden_size']
        self.linear = Linear(hidden_size, args.n_class * 2, args.m_drop)
        self.loss = CrossEntropy(args.eps)

    def forward(self, input_ids, **kwargs):
        logits = self.forward_transformer(input_ids)
        logits = self.linear(logits)
        return logits

    def compute_loss(self, forward_out, batch):
        start_logits, end_logits = paddle.split(forward_out, 2, -1)
        start_label, end_label = paddle.split(batch['labels'], 2, -1)
        return (self.loss(start_logits, start_label.squeeze(-1), batch['attention_mask']) +
                self.loss(end_logits, end_label.squeeze(-1), batch['attention_mask'])) / 2


class SpanTokenClassificationV2(BaseModel):
    def __init__(self, args):
        super().__init__(args, 'seq')
        hidden_size = self.transformer.config['hidden_size']
        self.linear1 = Linear(hidden_size, args.n_class * 2, args.m_drop)
        self.linear2 = Linear(hidden_size, 4, args.m_drop)
        self.loss = CrossEntropy(args.eps)

    def forward(self, input_ids, attention_mask, token_type_ids=None, **kwargs):
        output = self.forward_transformer(input_ids, attention_mask)
        logits = self.linear1(output)
        if not self.training:
            return logits
        binary_logits = self.linear2(self.forward_transformer(input_ids, attention_mask))
        return logits, binary_logits

    def compute_loss(self, forward_out, batch):
        logits, binary_logits = forward_out
        start_logits, end_logits = paddle.split(logits, 2, -1)
        binary_start_logits, binary_end_logits = paddle.split(binary_logits, 2, -1)
        start_label, end_label = paddle.split(batch['labels'], 2, -1)
        start_label = start_label.squeeze(-1)
        end_label = end_label.squeeze(-1)
        # print(start_logits.shape, batch['attention_mask'].shape, start_label.shape, batch['labels'].shape)
        base_loss = (self.loss(start_logits, start_label, batch['attention_mask']) +
                     self.loss(end_logits, end_label, batch['attention_mask'])) / 2
        binary_start_label = paddle.where(start_label != 0, paddle.ones_like(start_label, dtype=start_label.dtype),
                                          start_label)
        binary_end_label = paddle.where(end_label != 0, paddle.ones_like(end_label, dtype=end_label.dtype), end_label)
        binary_loss = (self.loss(binary_start_logits, binary_start_label, batch['attention_mask']) +
                       self.loss(end_logits, binary_end_label, batch['attention_mask'])) / 2

        mask = batch['attention_mask'].flatten()
        start_logits_binary = self.convert2binary_logtis(start_logits).reshape([-1, 2])[mask]
        end_logits_binary = self.convert2binary_logtis(end_logits).reshape([-1, 2])[mask]
        binary_start_logits = binary_start_logits.reshape([-1, 2])[mask]
        binary_end_logits = binary_end_logits.reshape([-1, 2])[mask]
        kd_loss = self.kd_loss(start_logits_binary, F.softmax(binary_start_logits, -1)) + \
                  self.kd_loss(end_logits_binary, F.softmax(binary_end_logits, -1))
        return 0.8 * base_loss + 0.1 * binary_loss + 0.1 * kd_loss

    @staticmethod
    def convert2binary_logtis(tensor):
        tensor_split = F.softmax(tensor, -1).chunk(tensor.shape[-1], -1)
        return paddle.concat([tensor_split[0], paddle.concat(tensor_split[1:], -1).sum(-1, keepdim=True)], -1)

    def kd_loss(self, q, p):
        p_loss = F.kl_div(paddle.log(p), q)
        q_loss = F.kl_div(paddle.log(q, ), p)
        return (p_loss + q_loss) / 2


class FlatTokenClassification(FlatBaseModel):
    def __init__(self, args):
        super().__init__(args, 'seq')
        hidden_size = self.transformer.config['hidden_size']
        self.linear = Linear(hidden_size, args.n_class, args.m_drop)
        self.loss = CrossEntropy(args.eps)

    def forward(self, input_ids, word_ids, word_mask, char_word_mask, char_word_s, char_word_e, attention_mask,
                **kwargs):
        logits = self.forward_transformer(input_ids, word_ids, word_mask, char_word_mask, char_word_s, char_word_e,
                                          attention_mask)
        logits = self.linear(logits)
        if not self.training:
            logits = logits.argmax(-1)
        return logits

    def compute_loss(self, forward_out, batch):
        return self.loss(forward_out, batch['labels'], batch['attention_mask'])


class FlatSpanTokenClassification(FlatBaseModel):
    def __init__(self, args):
        super().__init__(args, 'seq')
        hidden_size = self.transformer.config['hidden_size']
        self.linear = Linear(hidden_size, args.n_class * 2, args.m_drop)
        self.loss = CrossEntropy(args.eps)

    def forward(self, input_ids, word_ids, word_mask, char_word_mask, char_word_s, char_word_e, attention_mask,
                **kwargs):
        logits = self.forward_transformer(input_ids, word_ids, word_mask, char_word_mask, char_word_s, char_word_e,
                                          attention_mask)
        logits = self.linear(logits)
        return logits

    def compute_loss(self, forward_out, batch):
        start_logits, end_logits = paddle.split(forward_out, 2, -1)
        start_label, end_label = paddle.split(batch['labels'], 2, -1)
        return (self.loss(start_logits, start_label.squeeze(-1), batch['attention_mask']) +
                self.loss(end_logits, end_label.squeeze(-1), batch['attention_mask'])) / 2


class BiAffine(BaseModel):
    def __init__(self, args):
        super().__init__(args, 'seq')
        hidden_size = self.transformer.config['hidden_size']
        self.linear = Linear(hidden_size, hidden_size * 2, args.m_drop)

        self.u = paddle.create_parameter([hidden_size, args.n_class, hidden_size], 'float32')
        self.loss = CrossEntropy(args.eps)

    def forward(self, input_ids, **kwargs):
        logits = self.forward_transformer(input_ids).split(2, -1)
        logits = paddle.einsum('bik,knd,bjd->bijn', logits[0], self.u, logits[1])
        if not self.training:
            logits = logits.argmax(-1)
        return logits

    def compute_loss(self, forward_out, batch):
        triu_mask = paddle.triu(paddle.ones(forward_out.shape[:-1]))
        seq_mask = batch['attention_mask'].unsqeuzze(-1)
        forward_out = mask_select(forward_out, triu_mask * seq_mask)
        label = mask_select(batch['labels'], batch['label_mask'])
        return self.loss(forward_out, label, None)
