# encoding: utf-8
"""
数据清洗模块
"""
import re
from copy import deepcopy

import numpy as np

from ..tools.text_splitter import split_text_with_semantics, split_text_with_semantics_for_error


def split_long_text(text, detype='origin'):
    """
    拆分文本 返回文本list和拆分文本在原文的位置
    :param text:
    :param detype 处理模式 error_correct, origin
    :return:
    """
    if detype == 'error_correct':
        split_texts = split_text_with_semantics_for_error(text)
    else:
        split_texts = split_text_with_semantics(text)
    split_text_intervals = []
    for split_text in split_texts:
        if len(split_text_intervals) == 0:
            split_text_intervals.append([0, len(split_text)])
        else:
            last_interval = split_text_intervals[-1]
            split_text_intervals.append([last_interval[-1], last_interval[-1] + len(split_text)])
    return split_texts, split_text_intervals


def split_text_with_re(text):
    split_texts, split_text_intervals = [], []
    pattern = re.compile(r'[\u4e00-\u9fa5a-zA-Z0-9 ,.?!;:()\'"。，？！；：]+')
    matches = pattern.finditer(text)
    match_result = [(match.group(), match.start()) for match in matches]
    for split_text, start_index in match_result:
        split_texts.append(split_text)
        split_text_intervals.append([start_index, start_index + len(split_text)])
    return split_texts, split_text_intervals


def split_long_text_and_mark(info):
    """
    拆分长的训练数据, 主要是将元素标记进行拆分
    :param info:
    :return:
    """
    new_infos = {}
    text = info['text']
    info_list = info['info_list']
    schema = info['schema']
    split_texts, split_text_intervals = split_long_text(text)
    '''
    获取每个拆分本文在原始文本的起始位置和结束位置
    '''

    '''
    获取元素值在哪个拆分文档index
    '''
    new_info_split_indexes = {}
    for i, offset_info in enumerate(info_list):
        start, end = offset_info['offset']
        start_intervals = [j for j, split_interval in enumerate(split_text_intervals) if split_interval[0] <= start]
        end_intervals = [j for j, split_interval in enumerate(split_text_intervals) if split_interval[-1] >= end]
        if len(start_intervals) == 0 or len(end_intervals) == 0:
            raise Exception('训练数据拆分错误')
        actual_start_interval = max(start_intervals)
        actual_end_intervals = min(end_intervals)
        if actual_start_interval > actual_end_intervals:
            raise Exception('训练数据拆分错误')

        new_offset_info = deepcopy(offset_info)
        # new_offset_info['offset'] = [_ - split_text_intervals[actual_start_interval][0]
        #                              for _ in offset_info['offset']]
        # print(new_offset_info, offset_info, [actual_start_interval, actual_end_intervals])
        new_info_split_indexes[i] = {
            'split_index': [actual_start_interval, actual_end_intervals],
            'offset_info': new_offset_info,
        }

    '''
    确定新的内容块以及所包含的元素
    '''
    unique_split_indexes = np.unique([_['split_index'] for _ in new_info_split_indexes.values()], axis=0)
    for text_index, unique_split_index in enumerate(unique_split_indexes):
        new_infos[text_index] = {
            'split_index': unique_split_index,
            'info_list': [],
            'schema': schema,
            'text': ''.join(split_texts[unique_split_index[0]: unique_split_index[1]+1])
        }

    '''
    此处可以优化 因为时间问题 暂时搁置
    '''
    for info_index, split_info in new_info_split_indexes.items():
        split_index = split_info['split_index']
        offset_info = split_info['offset_info']
        offset = offset_info['offset']
        span = offset_info['span']
        for new_info in new_infos.values():
            info_split_index = new_info['split_index']
            if info_split_index[0] <= split_index[0] and info_split_index[1] >= split_index[1]:
                # 校验是否等值
                # print(split_index, info_split_index)
                # print(new_info['text'][offset[0]: offset[1]], span)
                new_offset_info = deepcopy(offset_info)
                new_offset = [_ - split_text_intervals[info_split_index[0]][0] for _ in offset]
                new_offset_info['offset'] = new_offset
                assert new_info['text'][new_offset[0]: new_offset[1]] == span
                new_info['info_list'].append(new_offset_info)

    result = []
    for new_info in new_infos.values():
        del new_info['split_index']
        result.append(new_info)
    return result


