# encoding: utf-8
import re

import difflib
import threading

from modelscope.pipelines import pipeline,  Pipeline
from modelscope.utils.constant import Tasks
from pycorrector import MacBertCorrector, Corrector
# from textgen import BartSeq2SeqModel
from pycorrector import ConvSeq2SeqCorrector

from ..center.clean import split_long_text, split_text_with_re
from ..config.base import ERROR_CORRECTION_MODEL_PATH, ERROR_CORRECTION_MODEL_DEVICE
from ..config.db import get_ignore_error_phrases
from ..tools.tool import get_offset
from ..model.errorCorrent.CQU.create import CQUErrorCorrectModel


def create_error_correction_model(model_path=ERROR_CORRECTION_MODEL_PATH):
    if 'bart' in model_path:
        model = pipeline(Tasks.text_error_correction, model=model_path, device=ERROR_CORRECTION_MODEL_DEVICE)
    elif 'DecBert' in model_path:
        model = pipeline(task=Tasks.text_error_correction,
                         model=model_path,
                         device=ERROR_CORRECTION_MODEL_DEVICE)
    elif 'CQU' in model_path:
        model = CQUErrorCorrectModel()
    else:
        # model = MacBertCorrector(ERROR_CORRECTION_MODEL_PATH)
        # model = ConvSeq2SeqCorrector(r'D:\contract\models\convseq2seq_correction')
        model = Corrector()
    return model


def judge_text_diff(text_infos):
    error_infos = []
    for text_info in text_infos:
        index = text_info['index']
        text = text_info['text']
        text_length = text_info['textLength']
        actual_length = len(text)
        if actual_length != text_length:
            error_infos.append(
                {
                    'index': index,
                    'origin_length': text_length,
                    'new_length': actual_length
                }
            )
    if len(error_infos) > 0:
        raise Exception('字符串接受差异', error_infos)


def correct_text_error(text_infos: list, correction_model, replace_character=False):
    """
    :param text_infos: 文本或者文本list, {index:1, text:1}
    :param correction_model:
    :param replace_character 是否替换特殊字符
    :return:
    """
    # 校验字符串长度
    judge_text_diff(text_infos)

    print('text_infos', text_infos)
    # 将其变成序列模式
    indexes, texts = [], []
    for _ in text_infos:
        indexes.append(_['index'])
        actual_text: str = _['text']
        if replace_character:
            actual_text = actual_text.replace('\xa0', ' ')
        texts.append(actual_text)
    if not isinstance(correction_model, Pipeline):
        if len(texts) > 24:
            outs = []
            split_texts = split_list(texts, 24)
            for split_text in split_texts:
                outs.extend(correction_model.correct_batch(split_text))
        else:
            outs = correction_model.correct_batch(texts)
    else:
        outs = correction_model(texts)
    result = []
    for index, text, out in zip(indexes, texts, outs):
        target = out.get('target', None)
        if target is None:
            target = out['output']
        correct_out = correct_error_out(text, target)
        result.append(
            {
                'index': index,
                'correct_out': correct_out,
                'is_correct': True if len(correct_out) else False
            }
        )
    return result


def new_correct_text_error(text_infos: list, correction_model, dtype='re'):
    """
    修正版本
    :param text_infos:
    :param correction_model:
    :param dtype: 模式
    :return:
    """
    batch_size = 12  # 批处理的大小
    # 校验字符串长度情况
    judge_text_diff(text_infos)

    '''
    将长的文本切分成短文本
    '''
    # 三个合集
    indexes, texts, correction_indexes, outs = [], [], [], []
    origin_texts = {}  # 原始文本
    for text_info in text_infos:
        index = text_info['index']
        text = text_info['text']
        origin_texts[index] = text
        if dtype == 're':
            split_texts, split_text_intervals = split_text_with_re(text)
            # print(split_texts)
        else:
            split_texts, split_text_intervals = split_long_text(text, detype='error_correct')
        for split_text, split_text_interval in zip(split_texts, split_text_intervals):
            indexes.append(index)
            texts.append(split_text)
            correction_indexes.append(split_text_interval[0])

    batch_texts = split_list(texts, batch_size)
    for batch_text in batch_texts:
        if isinstance(correction_model, Pipeline):
            outs.extend(correction_model(batch_text))

        # elif isinstance(correction_model, BartSeq2SeqModel):
        #     outs.extend([{'target': _} for _ in correction_model.predict(batch_text)])
        else:
            outs.extend(correction_model.correct_batch(batch_text))
    result_dict = {}
    for index, text, correction_index, out in zip(indexes, texts, correction_indexes, outs):
        target = out.get('target', None)
        if target is None:
            target = out['output']
        correct_out = correct_error_out(text, target)
        # 修正correct_out
        new_correct_out = []
        for _ in correct_out:
            new_correction_index = [_ + correction_index for _ in _['correction_index']]
            actual_origin_str = origin_texts[index][new_correction_index[0]: new_correction_index[1]]
            origin_str = _['origin_str']
            if actual_origin_str != origin_str:
                error_info = '错字审查定位错误, 原始信息: %s, 修正信息：%s' % (origin_str, actual_origin_str)
                raise Exception(error_info)
            if contains_chinese_characters(actual_origin_str) and contains_chinese_characters(_['correction_str']):
                new_correct_out.append(
                    {
                        'tag': _['tag'],
                        'correction_index': new_correction_index,
                        'origin_str': actual_origin_str,
                        'correction_str': _['correction_str'],
                    }
                )
        result_dict.setdefault(index, [])
        result_dict[index].extend(new_correct_out)
        # result.append(
        #     {
        #         'index': index,
        #         'correct_out': new_correct_out,
        #         'is_correct': True if len(correct_out) else False
        #     }
        # )
    result = []
    for index, correct_out in result_dict.items():
        result.append(
            {
                'index': index,
                'correct_out': correct_out,
                'is_correct': True if len(correct_out) else False
            }
        )
    return result


# 修正输出
def correct_error_out(text, out):
    """
    :param text:原文
    :param out: 修正过后的文章
    :return:
    """
    # 创建SequenceMatcher对象
    matcher = difflib.SequenceMatcher(a=text, b=out)
    # 获取差异报告
    diff_report = matcher.get_opcodes()
    # 检查差异报告中是否存在关键词错误
    correct_out = []
    for tag, i1, i2, j1, j2 in diff_report:
        if tag == 'replace':
            correct_out.append(
                {
                    'tag': tag,
                    'correction_index': [i1, i2],
                    'origin_str': text[i1: i2],
                    'correction_str': out[j1: j2],
                }
            )
    return correct_out


def split_list(input_list, chunk_size=24):
    return [input_list[i:i+chunk_size] for i in range(0, len(input_list), chunk_size)]


def contains_chinese_characters(s):
    # 正则表达式模式，匹配任意汉字
    pattern = re.compile('[\u4e00-\u9fff]')
    # 搜索字符串是否包含汉字
    match = pattern.search(s)
    return match is not None


def intersect_correct_result(result_main, result_assist, verification_type='equal'):
    """
    需要判断多个模型都出识别出错误 则表示真实的错误

    :param result_main: 主要的模型结果
    :param result_assist: 辅助判定的模型结果
    :param verification_type: 验证方式, equal 相等,  intersect 交叉
    :return:
    """
    result_summary = []
    result_assist_dict = dict((_['index'], _) for _ in result_assist)
    for corr in result_main:
        # 表示是否含有错误信息
        if corr['is_correct']:
            # 验证错误信息
            corr_assist = result_assist_dict.get(corr['index'], None)
            # 如果辅助模型结果含有这个段落错误信息
            # print('corr_assist', corr_assist)
            new_corr = {
                'index': corr['index'],
                'correct_out': [],
                'is_correct': False
            }
            if corr_assist is not None:
                for our_main_ in corr['correct_out']:
                    for out_assist_ in corr_assist['correct_out']:
                        # 判断是否有交叉情况
                        if index_check(our_main_['correction_index'], out_assist_['correction_index'], verification_type):
                            new_corr['correct_out'].append(our_main_)
                            break
                if len(new_corr['correct_out']) > 0:
                    new_corr['is_correct'] = True
        else:
            new_corr = corr
        if new_corr['is_correct']:
            result_summary.append(new_corr)
    # print('result_summary', result_summary)
    return result_summary


def del_ignore_errors(text_infos, correct_result):
    # 获取忽略的错字
    total_ignore_phrases, total_correct_ignore_phrases = get_ignore_error_phrases()
    text_infos_with_index = dict((_['index'], _) for _ in text_infos)
    new_correct_result = []
    for _ in correct_result:
        # 有错字需要审查
        if _['is_correct']:
            correct_out = _['correct_out']
            new_correct_out = []
            for correct_info in correct_out:
                # 获取原始的text
                origin_text = text_infos_with_index[_['index']]['text']
                ignore_phrases = total_ignore_phrases.get(correct_info['origin_str'], None)
                if ignore_phrases is not None:
                    print(origin_text, correct_info, ignore_phrases)
                if ignore_phrases is None or not ignore_error_correction(origin_text, correct_info, ignore_phrases):
                    new_correct_out.append(correct_info)
            if len(new_correct_out):
                new_correct_result.append(
                    {
                        'index': _['index'],
                        'is_correct': True,
                        'correct_out': new_correct_out
                    }
                )
    return new_correct_result


def ignore_error_correction(origin_text, correct_info, ignore_phrases):
    """
    如果错字在原始文本中, 他的左或右连续词符合ignore_phrases的词组, 则返回True, 否则返回False
    :param origin_text: 原始的文本
    :param correct_info: 由模型识别出来的错字, 其中correction_index表示错字在原始文本的索引位置, origin_str 原始字, correction_str, 模型修正
    :param ignore_phrases: 需要忽略的词组
    :return:
    origin_text = '本合童约定，违约方应按照国家和某市相关法律法规的柜顶有关条款的规顶承担违约责任。'
    correct_info = {'tag': 'replace', 'correction_index': [2, 3], 'origin_str': '童', 'correction_str': '同'}
    ignore_phrases = ['合童', '童装', '儿童装', '合童装']
    """
    origin_str = correct_info['origin_str']
    correction_index = correct_info['correction_index']
    length_origin_text = len(origin_text)
    option = False
    for ignore_phrase in ignore_phrases:
        # 获取所在校验字所在位置的集合
        offsets = get_offset(ignore_phrase, origin_str)
        for offset in offsets:
            # 获取左右偏移的区间
            left_right_interval = [offset[0], len(ignore_phrase) - offset[1]]
            check_string_interval = [max(correction_index[0] - left_right_interval[0], 0), min(correction_index[1] + left_right_interval[1], length_origin_text)]
            # print('check_string_interval', check_string_interval)
            check_string = origin_text[check_string_interval[0]: check_string_interval[1]]
            # print('check_string', check_string)
            if check_string == ignore_phrase:
                option = True
                break
        if option:
            break
    return option


def index_check(index_1, index_2, verification_type):
    # print('index_1', index_1, 'index_2', index_2, verification_type, index_1 == index_2)
    if verification_type == 'equal':
        return index_1 == index_2
    else:
        return not (index_1[1] < index_2[0] or index_2[1] < index_1[0])


def thread_correct_result(func, result_dict, label, *args, **kwargs):
    # print('args', args)
    result_dict[label] = func(*args, **kwargs)


# 交叉验证模型结果
def intersect_correct_text_error(text_infos, correct_error_model, assist_correct_error_model, intersect_data=True, dtype='re'):
    result_main = new_correct_text_error(text_infos, correct_error_model, dtype)
    if intersect_data:
        target_index = [_['index'] for _ in result_main if _['is_correct']]
        assist_text_infos = [_ for _ in text_infos if _['index'] in target_index]
    else:
        assist_text_infos = text_infos
    print('len_assist_text_infos', len(assist_text_infos))
    # print('assist_text_infos', assist_text_infos)
    if len(assist_text_infos):
        result_assist = new_correct_text_error(assist_text_infos, assist_correct_error_model, dtype)
    else:
        result_assist = []
    # threads = []
    #
    # result_dict = {}
    # t = threading.Thread(target=thread_correct_result, args=(
    #     new_correct_text_error, result_dict, 'result_main', text_infos, correct_error_model, dtype))
    # t.start()
    # threads.append(t)
    #
    # t = threading.Thread(target=thread_correct_result, args=(
    #     new_correct_text_error, result_dict, 'result_assist', text_infos,
    #     assist_correct_error_model, dtype))
    # t.start()
    # threads.append(t)
    #
    # for _ in threads:
    #     _.join()
    #
    # result_main = result_dict['result_main']
    # result_assist = result_dict['result_assist']
    result = intersect_correct_result(result_main, result_assist, verification_type='intersect')
    return result


