""" Named entity recognition fine-tuning: utilities to work with CLUENER task. """
import torch
import logging
import os
import copy
import json
from torch.utils.data import IterableDataset

from .utils_ner import DataProcessor
logger = logging.getLogger(__name__)

class InputExample(object):
    """A single training/test example for token classification."""
    def __init__(self, guid, text_a, labels, text_b, pointing):
        """Constructs a InputExample.
        Args:
            guid: Unique id for the example.
            text_a: list. The words of the sequence.
            labels: (Optional) list. The labels for each word of the sequence. This should be
            specified for train and dev examples, but not for test examples.
        """
        self.guid = guid
        self.text_a = text_a
        self.labels = labels
        self.text_b = text_b
        self.pointing = pointing

    def __repr__(self):
        return str(self.to_json_string())
    def to_dict(self):
        """Serializes this instance to a Python dictionary."""
        output = copy.deepcopy(self.__dict__)
        return output
    def to_json_string(self):
        """Serializes this instance to a JSON string."""
        return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"

class InputFeatures(object):
    """A single set of features of data."""
    def __init__(self, input_ids, input_mask, input_len, segment_ids, label_ids, mlm_ids):
        self.input_ids = input_ids
        self.input_mask = input_mask
        self.segment_ids = segment_ids
        self.label_ids = label_ids
        self.mlm_ids   = mlm_ids
        self.input_len = input_len

    def __repr__(self):
        return str(self.to_json_string())

    def to_dict(self):
        """Serializes this instance to a Python dictionary."""
        output = copy.deepcopy(self.__dict__)
        return output

    def to_json_string(self):
        """Serializes this instance to a JSON string."""
        return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"

def collate_fn(batch):
    """
    batch should be a list of (sequence, target, length) tuples...
    Returns a padded tensor of sequences sorted from longest to shortest,
    """
    all_input_ids, all_attention_mask, all_token_type_ids, all_lens, all_label_ids, all_mlm_ids, all_pointing = map(torch.stack, zip(*batch))
    max_len = max(all_lens).item()
    all_input_ids = all_input_ids[:, :max_len]
    all_attention_mask = all_attention_mask[:, :max_len]
    all_token_type_ids = all_token_type_ids[:, :max_len]
    
    all_label_ids = all_label_ids[:,:max_len]
    all_mlm_ids = all_mlm_ids[:,:max_len]
    all_pointing = all_pointing[:,:max_len]
    
    return all_input_ids, all_attention_mask, all_token_type_ids, all_label_ids, all_mlm_ids, all_pointing, all_lens


def convert_examples_to_features(examples,label_list, max_seq_length, tokenizer,
                                 cls_token_at_end=False,cls_token="[CLS]",cls_token_segment_id=1,
                                 sep_token="[SEP]", pad_on_left=False, pad_token=0,pad_token_segment_id=0,
                                 sequence_a_segment_id=0, mask_padding_with_zero=True,):
    
    label_map = {label: i for  label, i in label_list.items()}
    
    features = []
    for (ex_index, example) in enumerate(examples):
        if ex_index % 10000 == 0:
            logger.info("Writing example %d of %d", ex_index, len(examples))
        
        tokens_a = example.text_a   # default example.text_a is chinese str  list
        tokens_b = example.text_b
        pointing = example.pointing
        try:
            label_ids = [label_map[x] for x in example.labels]
        except Exception as e:
            print(e, ''.join(tokens_a) +"\t" + "".join(tokens_b))
            continue
        
        if len(tokens_a) > max_seq_length:
            logger.info(f"example {ex_index} exceed {max_seq_length} and skip ")
            continue

        segment_ids = [sequence_a_segment_id] * len(tokens_a)

        input_ids = tokenizer.convert_tokens_to_ids(tokens_a)
        mlm_ids   = tokenizer.convert_tokens_to_ids(tokens_b)
        # The mask has 1 for real tokens and 0 for padding tokens. Only real
        # tokens are attended to.
        input_mask = [1] * len(input_ids) #1 if mask_padding_with_zero else 0
        input_len = len(label_ids)
        
        assert len(label_ids) == len(input_mask) == len(segment_ids), f"{len(label_ids)} == {len(input_mask)} == {len(segment_ids)}"
        if ex_index < 5:
            logger.info("*** Example ***")
            logger.info("guid: %s", example.guid)
            logger.info("tokens: %s", " ".join([str(x) for x in tokens_a]))
            logger.info("tokens mlm: %s", " ".join([str(x) for x in tokens_b]))
            logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
            logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
            logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
            logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
            logger.info("label_ids: %s", " ".join([str(x) for x in example.labels]))
            logger.info("pointing: %s", " ".join([str(x) for x in pointing]))
        
        input_ids = seq_pad(input_ids, max_seq_length, pad_token)
        input_mask = seq_pad(input_mask, max_seq_length, 0)
        segment_ids = seq_pad(segment_ids, max_seq_length, 0)
        label_ids  = seq_pad(label_ids, max_seq_length, 0)
        mlm_ids    = seq_pad(mlm_ids, max_seq_length, 0)
        pointing   = seq_pad(pointing, max_seq_length, 0)
        
        features.append({
            'input_ids':input_ids, 'input_mask':input_mask, 'input_len':input_len, 
            'segment_ids':segment_ids, 'label_ids':label_ids, 'mlm_ids':mlm_ids, 'pointing':pointing
            })
    return features

def seq_pad(seq, max_seq_length, pad_token):
    pad_seq = [_ for _ in seq]
    while len(pad_seq) < max_seq_length:
        pad_seq.append(pad_token)
    return pad_seq

    
class CSCProcessor(DataProcessor):
    """Processor for the chinese ner data set."""

    def get_train_examples(self, data_dir, train_file):
        """See base class."""
        return self._create_examples(self._read_json(os.path.join(data_dir, train_file), "train"), "train")

    def get_dev_examples(self, data_dir, dev_file):
        """See base class."""
        return self._create_examples(self._read_json(os.path.join(data_dir, dev_file), "dev"), "dev")

    def get_test_examples(self, data_dir, test_file):
        """See base class."""
        return self._create_examples(self._read_json(os.path.join(data_dir, test_file), 'test'), "test")

    
    def get_labels(self, labelpath):
        """See base class."""
        label_file_path = os.path.join(labelpath)
        label_ids = {}
        with open(label_file_path, 'r', encoding='utf-8') as f:
            if label_file_path.endswith(".json"):
                label_ids = json.load(f)
            else:
                raise ValueError(f"invalid label map format: {label_file_path}")
        return label_ids
    
    def _create_examples(self, lines, set_type):
        """Creates examples for the training and dev sets."""
        examples = []
        for (i, line) in enumerate(lines):
            guid = "%s-%s" % (set_type, i)
            text_a= line[0]
            labels = line[1]
            text_b = line[2]
            pointing = line[3]
            examples.append(InputExample(guid=guid, text_a=text_a, labels=labels, text_b=text_b, pointing=pointing))
        return examples

ner_processors = {
    'csc':CSCProcessor
}
