
"""
利用大模型再检查一下是否需要进行错别字纠正。
"""
from core.llm_answer import answer_with_llm
import json
from core.text_splitter import TextSplitter
from typing import Dict, List
import re


async def redo_correct_error(data: Dict, correct_result: List) -> List:
    """已纠正的错别字，再用大模型检查判断一下"""
    # 1. 先找出错别对应的段落文本
    doc_idx = 0
    filtered_docs = dict()
    splitter = TextSplitter(chunk_size=100, chunk_overlap=0)
    text_infos = {item['index']: item for item in data['textInfos']}
    for res_item in correct_result:
        index = res_item['index']
        if index not in text_infos:
            continue
        corrects = res_item['correct_out']
        source = text_infos[index]
        docs = splitter.create_documents(source['text'])
        for doc in docs:
            start_index = doc.metadata['start_index']
            end_index = start_index + len(doc.content)
            for correct in corrects:
                start_pos, end_pos = correct['correction_index']
                if start_pos >= start_index and end_pos < end_index:
                    filtered_docs[doc_idx] = {
                        '起始位置': start_index,
                        '原语句': doc.content,
                        '错别字位置': start_pos - start_index,
                        '错别字纠正': f"{correct['origin_str']} -> {correct['correction_str']}",
                        '段落号': index,
                        '纠正项': correct,
                    }
                    doc_idx += 1
                    break

    # 2. 让大模型再行检查判断
    correct_items = dict()
    for doc_idx, doc in filtered_docs.items():
        correct_items[doc_idx] = doc['原语句']
    correct_prompt = """
你是一名中文语言学家，现有一些可能含错别字的语句，你需要检查这些语句中是否含有错别字，如果有，需要指出错别字的起始位置并给出相应的纠正结果。

## **可能需要纠正错别字的语句**
%s

## **结构化输出模板**
请严格按照以下 JSON 格式输出结果，禁止添加任何自然语言描述、注释或多余内容，确保格式完全符合 JSON 规范（如使用双引号、正确的逗号分隔、字段名准确等）。
```json 
[
	{
        "idx": "<序号>",
        "text": "<原语句>",
        "start_index": "<错别字起始位置>",
		"source_word": "<原错别字>",
		"correct_word": "<纠正后的字>"
    }
]
```
""" % json.dumps(correct_items, ensure_ascii=False, indent=4)
    answer = await answer_with_llm(correct_prompt, parse_type='list')

    # 3. 修正大模型的结果
    if not isinstance(answer, list):
        return []

    correct_dict = dict()
    for item in answer:
        if not isinstance(item, dict):
            continue
        idx = str(item.get('idx', ''))
        if idx.isdigit():
            idx = int(idx)
        if idx not in filtered_docs:
            continue
        start_index = str(item.get('start_index', ''))
        if start_index.isdigit():
            start_index = int(start_index)
        else:
            start_index = 0
        source_word = item.get('source_word', '')

        # 若无汉字纠正，则跳过。
        if re.search('[\u4E00-\u9F5A]', source_word) is None:
            continue

        doc = filtered_docs[idx]
        # 大模型的位置判断可能不太准确，这里需要做一下修正。
        start_index = find_position(doc['原语句'], source_word, start_index)
        if start_index is None:
            continue
        end_index = start_index + len(source_word)

        correct_word = item.get('correct_word', '')
        if correct_word == '':
            continue
        index = doc['段落号']
        if index not in correct_dict:
            correct_dict[index] = []

        start_index += doc['起始位置']
        end_index += doc['起始位置']
        correct_item = {
            'tag': 'replace',
            'correction_index': [start_index, end_index],
            'origin_str': source_word,
            'correction_str': correct_word
        }
        correct_dict[index].append(correct_item)

    # 4. 转化为原输出格式
    result = []
    for index, corrects in correct_dict.items():
        result.append({
            'index': index,
            'is_correct': True,
            'correct_out': corrects,
        })
    return result


def find_position(text: str, source_word, start_index: int):
    """位置修正"""
    poses = range(len(text))
    sorted_poses = sorted(poses, key=lambda pos: abs(pos - start_index))
    word_len = len(source_word)
    for pos in sorted_poses:
        if text[pos: pos + word_len] == source_word:
            return pos
