import json
import re
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Dict, Any, List

from langchain_text_splitters import RecursiveCharacterTextSplitter

from llm import LLM

# 导入自定义模块
from prompt import *
from utils import file_chunker, batch_chunks, TokenCounter

llm = LLM("llm_model")
config = llm.get_config()

MODEL = config['modelname']
MODEL_CONTEXT_WINDOW = config['context_window']
MODEL_INPUT_WINDOW = MODEL_CONTEXT_WINDOW // 2

# 初始化服务组件
tokencounter = TokenCounter(config['modelpath'])
MAX_WORKERS = config['max_workers']

r_splitter = RecursiveCharacterTextSplitter(
    chunk_size=MODEL_INPUT_WINDOW,
    chunk_overlap=0,
    separators=["\n\n", "\n", "。", " ", ""]
)


# 工具函数（保持原代码逻辑不变）
def files_token_stat(session):
    """
    处理文件token统计，为每个文件添加token_num字段
    {"data": {
        "templateFiles": [{"fileId": 239, "content": ""}, ],
        "materialFiles": [{"fileId": 240, "content": ""}, ]
    }}
    """
    total_tokens = 0
    template_tokens = 0
    # 获取session中的数据
    data = session.get("data", {})

    # 处理templateFiles
    if "templateFiles" in data:
        for file in data["templateFiles"]:
            if "content" in file:
                file["token_num"] = tokencounter.token_count(file["content"])
                template_tokens = file["token_num"]
                total_tokens += file["token_num"]
            else:
                file["token_num"] = 0

    # 处理materialFiles
    if "materialFiles" in data:
        for file in data["materialFiles"]:
            if "content" in file:
                file["token_num"] = tokencounter.token_count(file["content"])
                total_tokens += file["token_num"]
            else:
                file["token_num"] = 0

    return template_tokens, total_tokens


def chunk_token_stat(chunks, key):
    total_tokens = 0
    for chunk in chunks:
        if key in chunk:
            chunk["claim_token_num"] = tokencounter.token_count(chunk[key])
            total_tokens += chunk["claim_token_num"]
        else:
            chunk["claim_token_num"] = 0
    return total_tokens


def parse_think(string):
    res = string
    if "</think>" in string:
        think, res = string.split("</think>")
        if len(res) == 0:
            res = think.replace("<think>", "")
    return res


def extract_json(res):
    try:
        json_data = json.loads(res)
        return json_data
    except:
        print("尝试解析```json")
    json_string = ""
    lines = res.split("\n")
    in_json_block = False
    for line in lines:
        if "```json" in line:
            in_json_block = True
            continue
        elif "```" in line and in_json_block:
            in_json_block = False
        if in_json_block:
            json_string += line

    # 移除可能的多余空格和换行符，但保留JSON结构
    json_string = json_string.strip()

    try:
        json_data = json.loads(json_string)
        return json_data
    except json.JSONDecodeError as e:
        print(f"JSON解析失败: {e}\n原始内容: {res}")
        return None


def extract_write_res(res):
    lines = res.split("\n")
    paper_string = ""
    not_paper_string = ""
    in_paper_block = False
    for line in lines:
        if "###" in line and "仿写结果" in line:
            in_paper_block = True
            continue
        elif in_paper_block:
            paper_string += line
        else:
            not_paper_string += line

    # 移除可能的多余空格和换行符，但保留JSON结构
    paper_string = paper_string.strip()
    return not_paper_string, paper_string


def extract_claim(template, fileid, chunkid, content, prev_result, standardText="", max_retry_times=3):
    global llm, MODEL
    extract_prompt = ""
    if standardText:
        extract_prompt = prompts['extract']['v1']["with_standard"]
    else:
        extract_prompt = prompts['extract']['v1']["no_standard"]

    # 修复替换占位符的方式
    llm_input = extract_prompt.replace("<!-具体审核要求-!>", template) \
        .replace("<!-待审文档-!>", content) \
        .replace("<!-待审文档前置内容-!>", prev_result if prev_result else "")
    if standardText:
        llm_input = llm_input.replace("<!-审核标准说明文档-!>", standardText)

    llm_output = ""
    llm_output_json = None
    i = 0
    while max_retry_times:
        llm_output = llm.generate_response(llm_input)
        # llm_output = parse_think(llm_output)
        llm_output_json = extract_json(llm_output)
        if llm_output_json is not None:
            break
        max_retry_times -= 1
        i += 1
        print(f"第{i}次重试")

    return llm_output, llm_output_json


def judge_score(template, references_text, standardText="", max_retry_times=3):
    global llm, MODEL
    # 修复替换占位符的方式
    judge_score_prompt = ""
    if standardText:
        judge_score_prompt = prompts['judge']['v1']["with_standard"]
    else:
        judge_score_prompt = prompts['judge']['v1']["no_standard"]

    llm_input = judge_score_prompt.replace("<!-具体审核要求-!>", template) \
        .replace("<!-待审文档-!>", references_text) \

    if standardText:
        llm_input = llm_input.replace("<!-审核标准说明文档-!>", standardText)

    llm_output = ""
    llm_output_json = None
    modified_template = None
    i = 0
    while max_retry_times:
        llm_output = llm.generate_response(llm_input)
        # llm_output = parse_think(llm_output)
        llm_output_json = extract_json(llm_output)
        if llm_output_json is not None:
            break
        max_retry_times -= 1
        i += 1
        print(f"第{i}次重试")

    return llm_output, llm_output_json, modified_template


def imitative_write(template, content, standardText="", max_retry_times=3):
    global llm, MODEL

    template_write_prompt = ""
    # 修复替换占位符的方式
    if standardText:
        template_write_prompt = prompts['write']['v1']["with_standard"]
    else:
        template_write_prompt = prompts['write']['v1']["no_standard"]

    llm_input = template_write_prompt.replace("<!-具体审核要求-!>", template) \
        .replace("<!-待审文档-!>", content) \

    if standardText:
        llm_input = llm_input.replace("<!-审核标准说明文档-!>", standardText)

    llm_output = ""
    llm_output_json = ""
    i = 0
    while max_retry_times:
        llm_output = llm.generate_response(llm_input)
        # llm_output = parse_think(llm_output)

        llm_output_json = extract_json(llm_output)
        if llm_output_json:
            break
        max_retry_times -= 1

        i += 1
        print(f"第{i}次重试")

    return llm_output, llm_output_json


def build_reference_content(references, key=None, start=1):
    prefix_references = []
    if key is None:
        prefix_references = [f"""##### 待审文档序号{i}：\n{ref}""" for i, ref in enumerate(references, start=start)]
    else:
        prefix_references = [f"""##### 待审文档序号{i}：\n{ref[key]}""" for i, ref in enumerate(references, start=start)]

    prefix_references = "\n".join(prefix_references)
    return prefix_references


def process_chunks_concurrently(template, chunks: List[Dict[str, Any]], max_workers: int = 2, standardText=""):
    """
    并发处理chunks，确保同一fileid的chunk按顺序处理，并原地添加claim字段

    :param chunks: 包含字典的列表，每个字典有fileid, chunkid, content
    :param max_workers: 线程池最大工作线程数
    """
    # 用于存储每个fileid的chunk结果和条件变量
    file_data = {}
    file_data_lock = threading.Lock()

    def process_chunk(chunk: Dict[str, Any]):
        nonlocal file_data
        fileid = chunk['fileid']
        chunkid = chunk['chunkid']
        content = chunk['content']

        # 确保该fileid有对应的数据结构
        with file_data_lock:
            if fileid not in file_data:
                file_data[fileid] = {
                    'results': {},
                    'condition': threading.Condition()
                }
            file_info = file_data[fileid]

        # 获取前一个chunk的结果（如果需要）
        prev_result = None
        if chunkid > 0:
            with file_info['condition']:
                # 等待前一个chunk完成
                while (chunkid - 1) not in file_info['results']:
                    file_info['condition'].wait()

                # 获取前一个chunk的结果
                prev_result = file_info['results'][chunkid - 1]
                prev_result = prev_result[:200] if len(prev_result) > 200 else prev_result

        # 处理当前chunk
        claim, claim_json = extract_claim(template, fileid, chunkid, content, prev_result, standardText)

        # 原地添加claim字段
        if claim_json is not None:
            chunk['claim'] = "\n".join(list(claim_json.values()))
            chunk["claim_json"] = json.dumps(claim_json, ensure_ascii=False)
        else:
            chunk['claim'] = parse_think(claim)
            chunk["claim_json"] = None

        # 存储结果并通知等待的线程
        with file_info['condition']:
            file_info['results'][chunkid] = chunk['claim']
            file_info['condition'].notify_all()

        print(f"Completed processing {fileid} chunk {chunkid}")

    # 创建线程池
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        # 按fileid和chunkid排序，确保先提交前面的chunk
        chunks_sorted = sorted(chunks, key=lambda x: (x['fileid'], x['chunkid']))

        # 提交所有任务
        futures = [executor.submit(process_chunk, chunk) for chunk in chunks_sorted]

        # 等待所有任务完成
        for future in futures:
            future.result()


def process_batch(batch, template_file_content, standardText=""):
    """处理单个批次并返回结果"""
    chunk_text = build_reference_content(batch, 'claim')
    _, chunk_scores, modified_template = judge_score(template_file_content, chunk_text, standardText)
    batch_results = []

    if chunk_scores:
        # 将打分结果与chunk关联
        for idx, chunk in enumerate(batch):
            chunk_name = f"待审文档{idx + 1}"
            if chunk_name in chunk_scores:
                # 使用正则表达式提取分数
                pattern = r'(\d+)分'
                matches = re.findall(pattern, chunk_scores[chunk_name])
                if matches:
                    batch_results.append({
                        'chunk': chunk,
                        'score': matches[0]
                    })

    return modified_template, batch_results


def select_median_string(modified_templates):
    # 按空格分割字符串，计算单词数，并保存字符串和其单词数
    strings_with_word_counts = [
        (s, len(s.split())) for s in modified_templates
    ]

    # 按单词数排序
    sorted_strings = sorted(strings_with_word_counts, key=lambda x: x[1])

    # 提取排序后的字符串
    sorted_strings_only = [s for s, _ in sorted_strings]

    # 计算中位数的索引
    n = len(sorted_strings_only)
    median_index = n // 2

    # 返回中位数字符串
    return sorted_strings_only[-1], sorted_strings_only[median_index], sorted_strings_only[0]


# 核心处理逻辑
def level_write(desc, verifyText, standardText=None):
    global MAX_WORKERS

    # 构建新的session结构
    session = {
        "data": {
            "templateFiles": [{"fileId": 0, "content": desc}],  # desc作为审核要求
            "materialFiles": [{"fileId": 1, "content": verifyText}]  # verifyText作为待审文档
        }
    }

    # 三阶段处理逻辑保持不变
    template_tokens, total_tokens = files_token_stat(session)
    template_file_content = session["data"]["templateFiles"][0]["content"]

    # 1. Token计数阶段
    if total_tokens <= MODEL_INPUT_WINDOW:
        reference_content = build_reference_content(session["data"]["materialFiles"], "content")
        llm_output, write_res = imitative_write(template_file_content, reference_content, standardText)
        return write_res

    # 2. Claim提取阶段
    template_chunks, reference_chunks = file_chunker(r_splitter, session)
    process_chunks_concurrently(template_file_content, reference_chunks, MAX_WORKERS, standardText)

    total_claim_tokens = chunk_token_stat(reference_chunks, "claim")
    if total_claim_tokens + template_tokens <= MODEL_INPUT_WINDOW:
        reference_content = build_reference_content(reference_chunks, "claim")
        llm_output, write_res = imitative_write(template_file_content, reference_content, standardText)
        return write_res

    # 3. 打分排序阶段
    sorted_reference_chunks = sorted(reference_chunks, key=lambda x: (x['fileid'], x['chunkid']))
    chunk_batchs = batch_chunks(sorted_reference_chunks, MODEL_INPUT_WINDOW)

    with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
        future_to_batch = {
            executor.submit(process_batch, batch, template_file_content, standardText): batch
            for batch in chunk_batchs
        }
    modified_templates = []
    all_chunk_scores = []
    for future in as_completed(future_to_batch):
        modified_template, batch_results = future.result()
        if modified_template:
            modified_templates.append(modified_template)
        all_chunk_scores.extend(batch_results)

    template_file_content = modified_templates[-1] if modified_templates else template_file_content

    sorted_chunks = sorted(all_chunk_scores, key=lambda x: x['score'], reverse=True)

    selected_chunks = []
    current_token_count = template_tokens
    for item in sorted_chunks:
        chunk = item['chunk']
        chunk_tokens = chunk['claim_token_num']
        if current_token_count + chunk_tokens <= MODEL_INPUT_WINDOW:
            selected_chunks.append(chunk)
            current_token_count += chunk_tokens
        else:
            break

    if selected_chunks:
        final_reference_content = build_reference_content(selected_chunks, 'claim')
        _, write_res = imitative_write(template_file_content, final_reference_content, standardText)
        return write_res
    else:
        return "无法完成审查：没有足够的审查对象可供参考"


if __name__ == '__main__':
    print(123)
