from typing import Dict, Optional
import logging

from overrides import overrides
import torch

from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.data.fields import MetadataField
from allennlp.models.model import Model
from allennlp.modules import FeedForward, Seq2SeqEncoder, Seq2VecEncoder, TextFieldEmbedder
from allennlp.nn import InitializerApplicator, util
from allennlp.nn.util import get_text_field_mask
from allennlp.training.metrics import CategoricalAccuracy, FBetaMeasure
from allennlp.modules import TimeDistributed
from allennlp.nn.util import get_lengths_from_binary_sequence_mask
from allennlp.modules.attention import Attention
from allennlp.nn.util import weighted_sum
import datetime
from allennlp.common import logging as common_logging

logger = logging.getLogger(__name__)

# from .gpu_mem_track import MemTracker
# tracker = MemTracker(path='./track')

def reshape_mask(original_mask):
    sequence_lengths = get_lengths_from_binary_sequence_mask(original_mask)
    mask = torch.where(sequence_lengths > 0, 1, sequence_lengths)
    return mask

def slice_list(b_id, tensor):
    tensor = tensor['tokens']
    single_instance = {}
    # print('###')
    # print(tensor['token_ids'][b_id].shape)
    single_instance['token_ids'] = tensor['token_ids'][b_id] if len(tensor['token_ids'][b_id].shape) >= 2 else tensor['token_ids'][b_id].unsqueeze(0)
    single_instance['mask'] = tensor['mask'][b_id] if len(tensor['mask'][b_id].shape) >= 2 else tensor['mask'][b_id].unsqueeze(0)
    single_instance['type_ids'] = tensor['type_ids'][b_id] if len(tensor['type_ids'][b_id].shape) >= 2 else tensor['type_ids'][b_id].unsqueeze(0)
    single_instance = {'tokens': single_instance}
    return single_instance
    

@Model.register("basic_classifier_cls_with_optseq_at")
class BasicClassifier(Model):
    """
    This `Model` implements a basic text classifier. After embedding the text into
    a text field, we will optionally encode the embeddings with a `Seq2SeqEncoder`. The
    resulting sequence is pooled using a `Seq2VecEncoder` and then passed to
    a linear classification layer, which projects into the label space. If a
    `Seq2SeqEncoder` is not provided, we will pass the embedded text directly to the
    `Seq2VecEncoder`.
    Registered as a `Model` with name "basic_classifier".
    # Parameters
    vocab : `Vocabulary`
    text_field_embedder : `TextFieldEmbedder`
        Used to embed the input text into a `TextField`
    seq2seq_encoder : `Seq2SeqEncoder`, optional (default=`None`)
        Optional Seq2Seq encoder layer for the input text.
    seq2vec_encoder : `Seq2VecEncoder`
        Required Seq2Vec encoder layer. If `seq2seq_encoder` is provided, this encoder
        will pool its output. Otherwise, this encoder will operate directly on the output
        of the `text_field_embedder`.
    feedforward : `FeedForward`, optional, (default = `None`)
        An optional feedforward layer to apply after the seq2vec_encoder.
    dropout : `float`, optional (default = `None`)
        Dropout percentage to use.
    num_labels : `int`, optional (default = `None`)
        Number of labels to project to in classification layer. By default, the classification layer will
        project to the size of the vocabulary namespace corresponding to labels.
    namespace : `str`, optional (default = `"tokens"`)
        Vocabulary namespace corresponding to the input text. By default, we use the "tokens" namespace.
    label_namespace : `str`, optional (default = `"labels"`)
        Vocabulary namespace corresponding to labels. By default, we use the "labels" namespace.
    initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
        If provided, will be used to initialize the model parameters.
        
    basic_classifier_cls_with_cat: Design
    
    这一分类器专门为了提高讯飞分类数据的准确率而设计
    其主要思路是：模型主要处理两个方面的特征来进行拼接
    
    第一个方面的特征：文本开头的510个字符，输入预训练语言模型之后，得到CLS向量，这个整体的CLS向量是重要特征之一。
    第二个方面的特征：（有四种）
        第一种：分句之后的CLS序列，采取boe的方法处理后，拼接到原来的CLS向量中
        第二种：分句之后的CLS序列，采取self-attention的方法处理后，加权平均，拼接到原来的CLS向量中
        第三种：对分句之后的每一个TOKEN进行LSTM处理读取，获得长短记忆内容后进行降维，拼接到原来的CLS向量中
        第四种：对分句之后的每一个TOKEN进行CNN特征抽取，获得特征后，拼接到CLS向量中
        
    前两种的抽象方法是一致的。我们称之为type1。
    后两种的抽象方法是一致的。我们称之为type2。
    
    在这一模型文件里，主要考虑实现前两种方法的实现。
    需要搭配 text_classification_json_cat 数据读取器来实现。
    如果没有这个经过修改的数据读取器，模型无法运行。
    而且，在配置文件中，必须将 segment_sentence 改为 True。
    """

    def __init__(
        self,
        vocab: Vocabulary,
        text_field_embedder: TextFieldEmbedder,
        # list_field_embedder: TextFieldEmbedder,
        pre_seq2vec_encoder: Seq2VecEncoder = None,  # cls 抽取
        seq2vec_encoder: Seq2VecEncoder = None,  # 最后的cls boe，如果是截断，就是不存在的
        # token_level_seq2vec_encoder: Seq2VecEncoder = None,  # 字符级编码
        feedforward: Optional[FeedForward] = None,  # feedforward
        cls_attention: Attention = None,  # cls序列的自注意力权重
        # cls_encoder: Seq2SeqEncoder = None,  # cls序列的lstm编码
        cls_seq2vec_encoder: Seq2VecEncoder = None, # cls序列的seq2vec编码
        dropout: float = None,  # feedforward的DP
        num_labels: int = None,
        label_namespace: str = "labels",
        namespace: str = "tokens",
        initializer: InitializerApplicator = InitializerApplicator(),
        **kwargs,
    ) -> None:

        super().__init__(vocab, **kwargs)
        self._text_field_embedder = text_field_embedder
        # self._list_field_embedder = list_field_embedder
        # self._token_level_seq2vec_encoder = token_level_seq2vec_encoder
        self._seq2vec_encoder = seq2vec_encoder
        self._feedforward = feedforward
        self._pre_seq2vec_encoder = pre_seq2vec_encoder
        self._cls_seq2vec_encoder = cls_seq2vec_encoder
        
        if feedforward is not None:
            self._classifier_input_dim = feedforward.get_output_dim()
        elif self._cls_seq2vec_encoder:
            self._classifier_input_dim = self._seq2vec_encoder.get_output_dim() + self._cls_seq2vec_encoder.get_output_dim()
        else:
            self._classifier_input_dim = self._seq2vec_encoder.get_output_dim()

        if dropout:
            self._dropout = torch.nn.Dropout(dropout)
        else:
            self._dropout = None
        self._label_namespace = label_namespace
        self._namespace = namespace

        if num_labels:
            self._num_labels = num_labels
        else:
            self._num_labels = vocab.get_vocab_size(namespace=self._label_namespace)
            
        self._classification_layer = torch.nn.Linear(self._classifier_input_dim, self._num_labels)
        self._accuracy = CategoricalAccuracy()
        self._loss = torch.nn.CrossEntropyLoss()
        self._f1_metric = FBetaMeasure(average='weighted')
        self.count = 0
        self._cls_attention = cls_attention
        # self._cls_encoder = cls_encoder
        # self._unfold_sequence = TimeDistributed(self._token_level_seq2vec_encoder)
        initializer(self)

    def forward(  # type: ignore
        self,
        tokens: TextFieldTensors,
        seq_tokens,  # listfiled
        label: torch.IntTensor = None,
        metadata: MetadataField = None,
    ) -> Dict[str, torch.Tensor]:

        """
        # Parameters
        tokens : `TextFieldTensors`
            From a `TextField`
        label : `torch.IntTensor`, optional (default = `None`)
            From a `LabelField`
        # Returns
        An output dictionary consisting of:
            - `logits` (`torch.FloatTensor`) :
                A tensor of shape `(batch_size, num_labels)` representing
                unnormalized log probabilities of the label.
            - `probs` (`torch.FloatTensor`) :
                A tensor of shape `(batch_size, num_labels)` representing
                probabilities of the label.
            - `loss` : (`torch.FloatTensor`, optional) :
                A scalar loss to be optimised.
        """
        
        # part 1 basic text embedder
        embedded_text = self._text_field_embedder(tokens)  # [batch_size, tokens, emb_size] 
        mask = get_text_field_mask(tokens).long()
        
        # part2 outer basic feature
        # this seq2vec encoder must be set as 'cls pooler'`
        if self._seq2vec_encoder:  
            embedded_text = self._seq2vec_encoder(embedded_text, mask=mask)  # [batch_size, emb_size]
        
        # debug note:
        #
        # source() seq_tokens: [batch_size, sentence, tokens]  if paralle : 2 * 5 * 200 * 312 ~= 10~100W
        # target() cls_seq: [batch_size, sentence, embedding_size] 2 * 4 * 312 >= 1W
        # tmp_seq: [sentence, tokens] ~= [batch_size, tokens] 
        # the key point is, process one batch at a time, you must believe your device is the best
        #
        # print("tokens: {}".format(tokens))
        # print("seq_tokens: {}".format(seq_tokens))
        
        res_tensor = []
        b_size, sen_size, token_size = seq_tokens['tokens']['token_ids'].shape  # 8 * 10 * 300 <= 10W
        
        # logger.info('b_size: {}'.format(b_size))
        # logger.info("seq_tokens['tokens']['token_ids'].shape: {}".format(seq_tokens['tokens']['token_ids'].shape))
        
        for b_id in range(b_size):  # target is [batch_size, emb_size]
            
            batch_instance = slice_list(b_id, seq_tokens)  # [sentence, tokens] = [batch_size, tokens]
            
            # maybe, we here need to process one sentence at a time
            _res = []
            for s_id in range(sen_size):  # target is [sentence_num, emb_size]
                sentence_instance = slice_list(s_id, batch_instance)  # [1, tokens]
                # logger.info('sentence_instance line-214: {}'.format(sentence_instance))
                
                seq_embedded_text = self._text_field_embedder(sentence_instance)  # [1, tokens, emb_dim] = [tokens, emb_size]
                # logger.info('seq_embedded_text.shape line-216: {}'.format(seq_embedded_text.shape))
                # logger.info('seq_embedded_text line-216: {}'.format(seq_embedded_text))
                
                seq_mask = get_text_field_mask(sentence_instance).long()
                seq_embedded_text = self._seq2vec_encoder(seq_embedded_text, mask=seq_mask)  # [1, emb_dim] and the target is [emb_dim]
                # logger.info('seq_embedded_text.shape line-227: {}'.format(seq_embedded_text.shape))
                _res.append(seq_embedded_text)
                
            seq_embedded_text = torch.cat(_res, dim=0)  # [sentence_num, emb_size]
                
            # logger.info('batch_instance line-209: {}'.format(batch_instance))
            # logger.info('seq_embedded_text.shape line-210: {}'.format(seq_embedded_text.shape))
            # note:
            #
            # the limit is not batch_size but average sentence number
            # sentence no more than 10, and tokens no more than 200, means 10 * 200 * 312 <= 100w
            # average sentence length is 5, and tokens num is 200, means 5 * 200 * 312 = 312000 ~= 31w
            
            # note 2:
            #
            # and here is another problem, which is the number of sentence in each batch
            # is not the same. which means we need to deal with each batch in this loop.
            # so, the seq2vec operation can be done in this loop.
            # anyway, this loop will produce a tensor like [batch, emb_size], so that the result
            # can work with outer feature [batch, emb_size]
            
            # note 3: 
            # although the compute logic here is with batch compute, but just unsqueeze the embedding  
            
            seq_embedded_text = seq_embedded_text.unsqueeze(0)  # input [sentence, emb_dim] -> [1, sentence, emb_dim]
            # logger.info('seq_embedded_text.shape line-233: {}'.format(seq_embedded_text.shape))
            # seq_embedded_text = self._cls_seq2vec_encoder(seq_embedded_text)  # [1, emb_dim] work without mask
            # logger.info('embedded_text.shape -line 258: {}'.format(embedded_text[b_id].shape))
            # logger.info('seq_embedded_text.shape -line 260: {}'.format(seq_embedded_text.shape))
            
            wt = self._cls_attention(embedded_text[b_id].unsqueeze(0), seq_embedded_text.transpose(1, 2)).squeeze(0)
            # logger.info('wt.shape -line 259: {}'.format(wt.shape))
            seq_embedded_text = weighted_sum(seq_embedded_text, wt)
            # logger.info('seq_embedded_text.shape -line 262: {}'.format(seq_embedded_text.shape)) 
        
            res_tensor.append(seq_embedded_text)
            
        seq_embedded_text = torch.cat(res_tensor, dim=0)  # catch the tensor and reform its shape into [batch_size, emb_size]
        # logger.info('seq_embedded_text.shape line-241: {}'.format(seq_embedded_text.shape))
        
        process_flag = 1
      
        if process_flag:  # 如果有第二方面特征编码，才需要完成拼接
            embedded_text = torch.cat((embedded_text, seq_embedded_text), 1)  # 312 + CLS / 312 + lstm hidden size * 2

        if self._feedforward is not None:  # 降维层，无论如何都是需要的
            embedded_text = self._feedforward(embedded_text)
            
        if self._dropout:  # DP after all（FF）
            embedded_text = self._dropout(embedded_text)

        logits = self._classification_layer(embedded_text)
        probs = torch.nn.functional.softmax(logits, dim=-1)

        output_dict = {"logits": logits, "probs": probs}
        output_dict["token_ids"] = util.get_token_ids_from_text_field_tensors(tokens)
        if label is not None:
            loss = self._loss(logits, label.long().view(-1))
            output_dict["loss"] = loss
            self._accuracy(logits, label)
            self._f1_metric(logits, label)

        return output_dict

    @overrides
    def make_output_human_readable(
        self, output_dict: Dict[str, torch.Tensor]
    ) -> Dict[str, torch.Tensor]:
        """
        Does a simple argmax over the probabilities, converts index to string label, and
        add `"label"` key to the dictionary with the result.
        """
        predictions = output_dict["probs"]
        if predictions.dim() == 2:
            predictions_list = [predictions[i] for i in range(predictions.shape[0])]
        else:
            predictions_list = [predictions]
        classes = []
        for prediction in predictions_list:
            label_idx = prediction.argmax(dim=-1).item()
            label_str = self.vocab.get_index_to_token_vocabulary(self._label_namespace).get(
                label_idx, str(label_idx)
            )
            classes.append(label_str)
        output_dict["label"] = classes
        tokens = []
        for instance_tokens in output_dict["token_ids"]:
            tokens.append(
                [
                    self.vocab.get_token_from_index(token_id.item(), namespace=self._namespace)
                    for token_id in instance_tokens
                ]
            )
        output_dict["tokens"] = tokens
        return output_dict

    def get_metrics(self, reset: bool = True) -> Dict[str, float]:
        metrics = {"accuracy": self._accuracy.get_metric(reset)}
        
        f1_dict = self._f1_metric.get_metric(reset)
        for key in f1_dict.keys():
            metrics['f_' + key] = f1_dict[key]
            
        return metrics

    default_predictor = "text_classifier"