# -*- coding: utf-8 -*-
"""
@File: multi_label_cls.py
@Copyright: 2019 Michael Zhu
@License：the Apache License, Version 2.0
@Author：Michael Zhu
@version：
@Date：
@Desc: 
"""
import json
import logging
import os

from .utils import DataProcessor, InputExample, InputFeatures
from ...file_utils import is_tf_available

if is_tf_available():
    import tensorflow as tf

logger = logging.getLogger(__name__)


class MedicalMultiLabelProcessor():
    """Processor for the SST-2 data set (GLUE version)."""

    def _read_jsonl(self, input_file):
        list_samples = []
        with open(input_file, "r", encoding="utf-8") as f:
            for line in f:
                line = line.strip()

                if len(line) < 2:
                    continue

                list_samples.append(json.loads(line))

        return list_samples

    # def get_example_from_tensor_dict(self, tensor_dict):
    #     """See base class."""
    #     return InputExample(tensor_dict['idx'].numpy(),
    #                         tensor_dict['sentence'].numpy().decode('utf-8'),
    #                         None,
    #                         str(tensor_dict['label'].numpy()))

    def get_train_examples(self, data_dir, all_labels):
        """See base class."""
        return self._create_examples(
            self._read_jsonl(os.path.join(data_dir, "train.jsonl")),
            "train",
            all_labels
        )

    def get_dev_examples(self, data_dir, all_labels):
        """See base class."""
        return self._create_examples(
            self._read_jsonl(os.path.join(data_dir, "dev.jsonl")),
            "dev",
            all_labels
        )

    def get_test_examples(self, data_dir, all_labels):
        """See base class."""
        return self._create_examples(
            self._read_jsonl(os.path.join(data_dir, "test.jsonl")),
            "test",
            all_labels
        )

    def get_labels_from_data(self, input_file):
        """See base class."""
        if isinstance(input_file, str):
            list_samples = self._read_jsonl(input_file)
        else:
            list_samples = []
            for file_ in input_file:
                list_samples.extend(self._read_jsonl(file_))

        labels = []
        for samp in list_samples:
            samp_labs = samp["labels"]
            for lab_ in samp_labs:
                if lab_ not in labels:
                    labels.append(lab_)
        return labels

    def get_classes(self):
        return ["0", "1"]

    def _create_examples(self, lines, set_type, all_labels):
        """Creates examples for the training and dev sets."""
        examples = []
        for (i, line) in enumerate(lines):
            # if i == 0:
            #     continue
            guid = "%s-%s" % (set_type, i)
            text_a = line["text"]
            labels = line["labels"]
            labels_idx = [0] * len(all_labels)
            for lab_ in labels:
                lab_idx_ = all_labels.index(lab_)
                labels_idx[lab_idx_] = 1

            labels_idx_str = ",".join([str(w) for w in labels_idx])

            examples.append(
                InputExample(
                    guid=guid,
                    text_a=text_a,
                    text_b=None,
                    label=labels_idx_str
                )
            )
        return examples


def mlc_convert_examples_to_features(examples, tokenizer,
                                      processor=None,
                                      input_folder=None,
                                      max_length=512,
                                      task=None,
                                      label_list=None,
                                      class_list=None,
                                      output_mode=None,
                                      pad_on_left=False,
                                      pad_token=0,
                                      pad_token_segment_id=0,
                                      mask_padding_with_zero=True):
    """
    Loads a data file into a list of ``InputFeatures``

    Args:
        examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.
        tokenizer: Instance of a tokenizer that will tokenize the examples
        max_length: Maximum example length
        task: GLUE task
        label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
        output_mode: String indicating the output mode. Either ``regression`` or ``classification``
        pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
        pad_token: Padding token
        pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)
        mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
            and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
            actual values)

    Returns:
        If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
        containing the task-specific features. If the input is a list of ``InputExamples``, will return
        a list of task-specific ``InputFeatures`` which can be fed to the model.

    """

    assert processor is not None
    if label_list is None:
        input_file = os.path.join(input_folder, "train.jsonl")
        label_list = processor.get_labels_from_data(input_file)

    if class_list is not None:
        class_list = processor.get_classes()

    if output_mode is None:
        output_mode = "classification"

    label_map = {label: i for i, label in enumerate(label_list)}
    class_map = {class_: i for i, class_ in enumerate(class_list)}

    features = []
    for (ex_index, example) in enumerate(examples):
        if ex_index % 10000 == 0:
            logger.info("Writing example %d" % (ex_index))
        # if is_tf_dataset:
        #     example = processor.get_example_from_tensor_dict(example)
        #     example = processor.tfds_map(example)

        inputs = tokenizer.encode_plus(
            example.text_a,
            example.text_b,
            add_special_tokens=True,
            max_length=max_length,
        )
        input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]

        # The mask has 1 for real tokens and 0 for padding tokens. Only real
        # tokens are attended to.
        attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)

        # Zero-pad up to the sequence length.
        padding_length = max_length - len(input_ids)
        if pad_on_left:
            input_ids = ([pad_token] * padding_length) + input_ids
            attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
            token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
        else:
            input_ids = input_ids + ([pad_token] * padding_length)
            attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
            token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)

        assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
        assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(len(attention_mask), max_length)
        assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(len(token_type_ids), max_length)

        label = None
        if output_mode == "classification":

            label = example.label
            label = label.split(",")
            label = [class_map[w] for w in label]
        else:
            raise KeyError(output_mode)

        if ex_index < 5:
            logger.info("*** Example ***")
            logger.info("guid: %s" % (example.guid))
            logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
            logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
            logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
            logger.info("label: %s (id = %s)" % (example.label, json.dumps(label, ensure_ascii=False)))

        features.append(
                InputFeatures(input_ids=input_ids,
                              attention_mask=attention_mask,
                              token_type_ids=token_type_ids,
                              label=label))

    return features



if __name__ == "__main__":
    pass
    
