from __future__ import annotations
from cProfile import label
from collections import defaultdict
import os
import re
import json
from genericpath import isfile
import numpy as np
from typing import List, Tuple
import unicodedata
from sklearn.semi_supervised import LabelSpreading


class Preprocessor:

    @staticmethod
    def read_peoples_daily_corpus(path:str, known_labels:list = None, sentence_max_len:int = 512 - 2) -> Tuple[List[str], List[List[str]], List[List[int]], List[str]]:
        """
        读取人民日报数据集的内容

        参数:
            path (str): 数据集所在的目录, 该目录下所有的文件都会被读取
            sentence_max_len (int, optional): 句子的最大长度, 默认值 512-2.

        返回:
            Tuple(List[str], List[List[str]], List[List[int]], List[str]): 四元组, 分别为：所有句子的字符串, 所有句子的字符、所有句子的标签、所有已知标签
        """
        # 读取所有文件
        file_names = [fn for fn in os.listdir(path) if isfile(os.path.join(path, fn))]
        corpus_strings = []
        corpus_tokens = []
        corpus_labels = []
        skip_count = 0
        for file_name in file_names:
            partial_sentence_string = ''
            partial_sentence_tokens = []
            partial_sentence_labels = []
            for line in open(os.path.join(path, file_name), encoding='utf-8-sig'):
                if line != '\n': # 不是新句子开始
                    [token, label] = line.split(' ')
                    partial_sentence_string += token.strip()
                    partial_sentence_tokens.append(token.strip())
                    partial_sentence_labels.append(label.strip())
                else: # 新句子开始
                    if len(partial_sentence_tokens) < sentence_max_len:
                        # 已形成的句子直接入库
                        corpus_strings.append(partial_sentence_string)
                        corpus_tokens.append(partial_sentence_tokens)
                        corpus_labels.append(partial_sentence_labels)
                    else:
                        # 将过长句子分割入库
                        #print(f'句子长度{len(sentence_tokens)}大于{sentence_max_len}, 进行分割： {"".join(sentence_tokens[0:20])}...  ')                        
                        split_strings, split_tokens, split_labels = Preprocessor._split_sentence_tokens_at_full_stops(partial_sentence_tokens, partial_sentence_labels)                        
                        if all(len(st) <= sentence_max_len for st in split_tokens):
                            corpus_strings.extend(split_strings)
                            corpus_tokens.extend(split_tokens)
                            corpus_labels.extend(split_labels)
                        else:
                            # 若分割后仍然过长, 则跳过该条语料
                            skip_count += 1
                            partial_sentence_string = ''
                            partial_sentence_tokens = []
                            partial_sentence_labels = []
                            continue
                    partial_sentence_string = ''
                    partial_sentence_tokens = []
                    partial_sentence_labels = []
        print(f'跳过过长数据{skip_count}条')

        print('标签数量统计：')
        label_count = defaultdict(int)
        for sentence_labels in corpus_labels:
            for label in sentence_labels:
                label_count[label] += 1
        for key, value in label_count.items():
            print(f'{key:<20}{value:>10}')
        if known_labels is None:
            # 统计所有标签
            known_label_set = set()
            [[known_label_set.add(label) for label in sentence_labels] for sentence_labels in corpus_labels]
            known_labels = list(known_label_set)
            known_labels.append('控制标签') # [CLS][SEP][PAD]的标签

        corpus_label_indices = [[known_labels.index(label) for label in sentence_labels] for sentence_labels in corpus_labels]

        return corpus_strings, corpus_tokens, corpus_label_indices, known_labels

    @staticmethod
    def read_ccks_corpus(path:str, known_labels:list = None, sentence_max_len:int = 512 - 2) -> Tuple[List[str], List[List[str]], List[List[int]], List[str]]:
        """
        读取 ccks 数据集的内容

        参数:
            path (str): 数据集所在的目录, 该目录下所有的文件都会被读取
            sentence_max_len (int, optional): 句子的最大长度, 默认值 512-2.

        返回:
            Tuple[List[str], List[List[str]], List[List[int]], List[str]]: 四元组, 分别为：所有句子的字符串, 所有句子的字符、所有句子的标签、所有已知标签
        """        
        # 读取所有文件
        file_names = [fn for fn in os.listdir(path) if isfile(os.path.join(path, fn))]
        corpus = []
        skip_count = 0
        for file_name in file_names:
            for line in open(os.path.join(path, file_name), encoding='utf-8-sig'):
                if line.strip() != '':
                    j = json.loads(line)
                    sentence = j['originalText']
                    sentence = sentence.replace(' ', ',') # 替换原有空格为英文逗号
                    sentence_entity_info = [(e['label_type'], e['start_pos'], e['end_pos']) for e in j['entities']]
                    corpus.append((sentence, sentence_entity_info))
        # 统计所有实体类型
        print('标签数量统计：')
        label_count = defaultdict(int)
        for text, entities in corpus:
            for entity in entities:
                label_count[entity[0]] += 1
        for key, value in label_count.items():
            print(f'{key:<20}{value:>10}')
        entity_type_set = set()
        [[entity_type_set.add(entity[0]) for entity in entities] for text, entities in corpus]
        if known_labels is None:
            # 根据实体类型制造标签
            known_labels = ['O']
            [known_labels.extend(['B-' + entity_type, 'I-' + entity_type]) for entity_type in entity_type_set]
            known_labels.append('控制标签') # [CLS][SEP][PAD]的标签
            known_labels = known_labels
        #known_label_ids = [i for i in range(0, len(known_labels))]
        #index_label_dict = {index: label for index, label in enumerate(known_labels)}
        #label_index_dict = {label: index for index, label in enumerate(known_labels)}
        # 生成 token 数组, 以及标签索引数组
        corpus_strings = []
        corpus_tokens = []
        corpus_label_indices = []
        for sentence, sentence_entity_info in corpus: 
            # 生成与 token 同长度的标签索引数组
            sentence_tokens = [c for c in sentence] # 每个字符作为一个 token
            sentence_label_indices = np.ones(len(sentence_tokens), dtype=np.int32) * known_labels.index('O')
            for entity_info in sentence_entity_info:
                entity_type = entity_info[0]
                start_pos = entity_info[1]
                end_pos = entity_info[2]
                sentence_label_indices[start_pos] = known_labels.index('B-' + entity_type)
                sentence_label_indices[start_pos + 1 : end_pos] = known_labels.index('I-' + entity_type)
            # 剔除空格字符及非法字符
            sentence_string, sentence_tokens, sentence_label_indices = Preprocessor.filter_blank_chars(sentence_tokens, sentence_label_indices)
            sentence_string, sentence_tokens, sentence_label_indices = Preprocessor.filter_illegal_chars(sentence_tokens, sentence_label_indices)
            # 处理过长的句子
            if len(sentence_tokens) <= sentence_max_len:
                corpus_strings.append(sentence_string)
                corpus_tokens.append(sentence_tokens)
                corpus_label_indices.append(sentence_label_indices)
            else:
                # 将过长句子进行分割
                split_result = Preprocessor.split_sentence_into_smaller_pieces_if_needed(sentence_tokens, sentence_label_indices, sentence_max_len)
                if split_result is not None:
                    split_tokens, split_label_indices, split_strs = split_result
                    corpus_tokens.extend(split_tokens)
                    corpus_label_indices.extend(split_label_indices)
                    corpus_strings.extend(split_strs)
                else:
                    # 若难以恰当分割, 则跳过该条语料
                    skip_count += 1
                    continue
        print(f'跳过过长数据{skip_count}条')
        return corpus_strings, corpus_tokens, corpus_label_indices, known_labels

    @staticmethod
    def read_aiaiyi_corpus(path:str, known_labels:list = None, sentence_max_len:int = 512 - 2) -> Tuple[List[str], List[List[str]], List[List[int]], List[str]]:
        # 读取所有文件
        file_names = [fn for fn in os.listdir(path) if isfile(os.path.join(path, fn))]
        corpus = []
        skip_count = 0
        content:List[str] = []
        for file_name in file_names:
            for line in open(os.path.join(path, file_name), encoding='utf8'):
                content.append(line.strip())
        # 偶数为句子，奇数为标签
        sentences = content[0::2]
        labels = content[1::2]        
        # 统计所有实体类型
        label_count = defaultdict(int)
        for sentence_label_str in labels:
            for label in sentence_label_str.split(' '):
                if label != '':
                    label_count[label] += 1
        print('标签数量统计：')
        for key, value in label_count.items():
            print(f'{key:<20}{value:>10}')
        if known_labels is None:
            label_set = set()
            for sentence_label_str in labels:
                [label_set.add(label) for label in sentence_label_str.split(' ') if label != '']                
            known_labels = list(label_set)
            # 重新排列标签顺序
            known_labels.sort()
            known_labels.sort(key = lambda x: x[-1]) # 根据标签最后一个字符排序
            known_labels.append('控制标签') # [CLS][SEP][PAD]的标签
        # 生成 token 数组, 以及标签索引数组
        all_tokens = []
        for sentence in sentences:
            all_tokens.append([c for c in sentence])
        all_label_indices = []
        for sentence_labels in labels:
            sentence_labels = sentence_labels.split(' ')

            if '' in sentence_labels:
                kkk = 0

            sentence_label_indices = [known_labels.index(label) for label in sentence_labels]
            all_label_indices.append(sentence_label_indices)

        corpus_strings = []
        corpus_tokens = []
        corpus_label_indices = []
        for sentence_tokens, sentence_label_indices in zip(all_tokens, all_label_indices): 
            # 剔除空格字符及非法字符
            sentence_string, sentence_tokens, sentence_label_indices = Preprocessor.filter_blank_chars(sentence_tokens, sentence_label_indices)
            sentence_string, sentence_tokens, sentence_label_indices = Preprocessor.filter_illegal_chars(sentence_tokens, sentence_label_indices)
            # 处理过长的句子
            if len(sentence_tokens) <= sentence_max_len:
                corpus_strings.append(sentence_string)
                corpus_tokens.append(sentence_tokens)
                corpus_label_indices.append(sentence_label_indices)
            else:
                # 将过长句子进行分割
                split_result = Preprocessor.split_sentence_into_smaller_pieces_if_needed(sentence_tokens, sentence_label_indices, sentence_max_len)
                if split_result is not None:
                    split_tokens, split_label_indices, split_strs = split_result
                    corpus_tokens.extend(split_tokens)
                    corpus_label_indices.extend(split_label_indices)
                    corpus_strings.extend(split_strs)
                else:
                    # 若难以恰当分割, 则跳过该条语料
                    skip_count += 1
                    continue
        print(f'跳过过长数据{skip_count}条')
        return corpus_strings, corpus_tokens, corpus_label_indices, known_labels

    @staticmethod
    def read_full_text_corpus(path:str):
        file_names = [fn for fn in os.listdir(path) if isfile(os.path.join(path, fn))]
        sentence_texts = []
        for file_name in file_names:
            for line in open(os.path.join(path, file_name), encoding='utf-8-sig'):
                line = line.strip()
                sentence_texts.append([c for c in line])        
        return sentence_texts

    class MassiveFullTextIterator:
        def __init__(self, path:str) -> None:
            self.path = path            
        def __iter__(self):
            file_names = [fn for fn in os.listdir(self.path) if isfile(os.path.join(self.path, fn))]
            for file_name in file_names:
                for line in open(os.path.join(self.path, file_name), encoding='utf-8-sig'):
                    line = line.strip()
                    yield [c for c in line]

    @staticmethod
    def _split_sentence_tokens_at_full_stops(sentence_tokens, sentence_parallel_list) -> Tuple[List[str], List[List[str]], List[List[int]]]:
        split_tokens = []
        split_label_indices = []
        prev_i = 0
        for i, token in enumerate(sentence_tokens):
            if token == '。' and i > 0 and i < len(sentence_tokens) - 1:
                split_tokens.append(sentence_tokens[prev_i:i + 1])
                split_label_indices.append(sentence_parallel_list[prev_i:i + 1])
                prev_i = i + 1
            elif i == len(sentence_tokens) - 1:
                split_tokens.append(sentence_tokens[prev_i:])
                split_label_indices.append(sentence_parallel_list[prev_i:])
        split_strings = [''.join(t) for t in split_tokens]
        return split_strings, split_tokens, split_label_indices

    @staticmethod
    def split_sentence_into_smaller_pieces_if_needed(sentence_tokens:List[str], sentence_labels:List[int], desire_length:int):
        if len(sentence_tokens) > desire_length:
            # 句子过长，分为两半
            split_result = Preprocessor.split_sentence_into_two(sentence_tokens, sentence_labels)
            if split_result is None:
                # 二分失败
                return None
            # 二分后继续测试
            first_half_tokens, second_half_tokens, first_half_labels, second_half_labels = split_result
            first_result = Preprocessor.split_sentence_into_smaller_pieces_if_needed(first_half_tokens, first_half_labels, desire_length)
            second_result = Preprocessor.split_sentence_into_smaller_pieces_if_needed(second_half_tokens, second_half_labels, desire_length)
            if first_result is None or second_result is None:
                # 任何一半超长且二分失败
                return None
            else:
                first_tokens, first_labels, first_str = first_result
                second_tokens, second_labels, second_str = second_result
                tokens:List[List[str]] = []
                tokens.extend(first_tokens)
                tokens.extend(second_tokens)
                labels:List[List[int]] = []
                labels.extend(first_labels)
                labels.extend(second_labels)
                strs:List[str] = []
                strs.extend(first_str)
                strs.extend(second_str)
                return tokens, labels, strs
        else:
            # 句子不过长，返回只有一个元素的
            return [sentence_tokens], [sentence_labels], [''.join(sentence_tokens)]

    @staticmethod
    def split_sentence_into_two(sentence_tokens:List[str], sentence_labels:List[int]):
        """
        在标点处将句子分割为两半，尽可能在靠近中间的标点处。
        如果没有合适的分割位置，则返回 None
        """
        punctuation_positions = []
        for i, token in enumerate(sentence_tokens):
            # 该分割位置的字符将跟随前一个句子
            if token == '。' or token == '，' or token == '、' or token == '：' or token == ',' or token == ':':
                punctuation_positions.append(i)
        if len(punctuation_positions) == 0:
            # 没有找到任何分割位置则返回 None
            return None
        if len(punctuation_positions) == 1 and punctuation_positions[0] == len(sentence_tokens) - 1:
            # 分割位置位于尾部，即无法起到缩短长度的作用，则返回 None
            return None
        positions = np.array(punctuation_positions)
        dist_to_middle = np.abs(positions - len(sentence_tokens) / 2.0)
        chosen_index = dist_to_middle.argmin()
        split_position = punctuation_positions[chosen_index]
        first_half_tokens = sentence_tokens[:split_position + 1]
        second_half_tokens = sentence_tokens[split_position + 1:]
        first_half_labels = sentence_labels[:split_position + 1]
        second_half_labels = sentence_labels[split_position + 1:]
        return first_half_tokens, second_half_tokens, first_half_labels, second_half_labels

    @staticmethod
    def filter_blank_chars(tokens:List[str], label_indices:List[int]):
        sentence_tokens_no_blank = []
        sentence_label_indices_no_blank = []
        blank_char = re.compile(r'\s+')
        for t, l in zip(tokens, label_indices):
            if blank_char.fullmatch(t) == None:
                sentence_tokens_no_blank.append(t)
                sentence_label_indices_no_blank.append(l)
        return ''.join(sentence_tokens_no_blank), sentence_tokens_no_blank, sentence_label_indices_no_blank

    

    @staticmethod
    def filter_illegal_chars(tokens:List[str], label_indices:List[int]):
        #illegal_chars = ['\ue236', '\ue011', '\ue00b', '\ue705']
        filtered_tokens:List[str] = []
        filtered_label_indices:List[int] = []
        for t, l in zip(tokens, label_indices):
            # 跳过 Unicode 私有区字符
            if unicodedata.category(t) == 'Co':                
                continue
            filtered_tokens.append(t)
            filtered_label_indices.append(l)
        return ''.join(filtered_tokens), filtered_tokens, filtered_label_indices
