"""
核心处理入口

流程：
1、先利用分类器，判断文本所属的文件类型
2、根据文件类型，调用不同的文件处理模块
   - 中标文件：winning
   - 其他文件：开发中（暂返回未支持）
"""

from typing import Dict, List, Optional, Iterator
import json
import re
from bs4 import BeautifulSoup, Comment
from spider.models import SubDetailItem
from process.ai.unified import AI_PROVIDER
from process.core.handler.classifier import classify_item, classify_text
from process.core.handler.winning_result_announcement import process_and_ingest_winning
from process.core.handler.tender_announcement import process_and_ingest_tender
from process.core.handler.transaction_result import process_and_ingest_transaction
from process.core.handler.bid_candidate import process_and_ingest_bid_candidate
from process.core.handler.bid_opening_record import process_and_ingest_opening_record
from process.core.handler.bid_evaluation_result import process_and_ingest_evaluation_result
from concurrent.futures import ThreadPoolExecutor, as_completed
from django.db import close_old_connections

CONTEXT_LIMIT_CHARS = {
    "lmstudio": 40000,
    "ollama": 40000,
    "qwen": 40000,
}

def _sanitize_html_basic(html: str) -> str:
    # 基础 HTML 清洗：移除脚本/样式/导航等非正文，保留常见文本标签，并压缩空白与多余换行
    s = BeautifulSoup(html or '', 'lxml')
    for tag in ['script', 'style', 'noscript', 'header', 'footer', 'nav', 'aside']:
        for t in s.find_all(tag):
            t.decompose()
    for c in s.find_all(string=lambda x: isinstance(x, Comment)):
        c.extract()
    allowed = {
        'p': [], 'br': [], 'h1': [], 'h2': [], 'h3': [], 'h4': [], 'h5': [], 'h6': [],
        'ul': [], 'ol': [], 'li': [], 'table': [], 'thead': [], 'tbody': [], 'tr': [], 'th': [], 'td': [],
        'a': ['href'], 'span': [], 'em': [], 'strong': [], 'b': [], 'i': []
    }
    for t in s.find_all(True):
        if t.name not in allowed:
            t.unwrap()
            continue
        attrs = dict(t.attrs)
        keep = allowed.get(t.name, [])
        for k in list(attrs.keys()):
            if k not in keep:
                del t.attrs[k]
    for t in s.find_all(True):
        if t.name in ['div', 'section']:
            t.unwrap()
    for t in s.find_all(True):
        if not t.get_text(strip=True) and t.name in ['span', 'p']:
            t.decompose()
    body = s.body
    content = body.decode_contents() if body else s.decode()
    content = re.sub(r'\s+', ' ', content)
    content = re.sub(r'(\s*<br\s*/?>\s*){2,}', '<br/>', content, flags=re.IGNORECASE)
    content = re.sub(r'(\s*<p>\s*</p>\s*)+', '', content, flags=re.IGNORECASE)
    return content.strip()


def run(
    task_id: Optional[int] = None,
    limit: Optional[int] = None,
    model: Optional[str] = None,
    random_pick: bool = False,
    **kwargs,
) -> Dict[str, object]:
    """一句话调用：筛选 SubDetailItem → 分类 → 分发到对应处理模块。"""
    # 仅选择未处理项
    q = SubDetailItem.objects.filter(is_processed=False)
    if task_id is not None:
        # 指定任务范围
        q = q.filter(task_id=task_id)
    if random_pick:
        q = q.order_by('?')
    else:
        # 顺序策略：返回次数少优先 → 不可疑优先 → 时间顺序稳定
        q = q.order_by('return_count', 'is_suspect', 'created_at', 'id')
    # 限字段读取，降低行宽与磁盘读取
    q = q.only('id', 'url', 'html_content', 'task_id', 'created_at')
    if limit is not None and limit > 0:
        # 单轮只取需要的条数
        q = q[:limit]
    # 按需迭代，避免一次性物化整个结果集造成高 I/O

    results: List[Dict[str, object]] = []
    success = 0
    failed = 0
    total = 0
    done = 0

    # 流式迭代，分块读取，降低内存与磁盘压力
    for item in q.iterator(chunk_size=100):
        total += 1
        # 轻量清洗正文，避免直接传入原始 HTML
        sanitized_html = _sanitize_html_basic(getattr(item, "html_content", "") or "")
        data = {"url": item.url, "html_content": sanitized_html}
        # 构造轻量 JSON（只包含必要字段），降低序列化体积
        payload_str = json.dumps(data, ensure_ascii=False)
        limit_chars = CONTEXT_LIMIT_CHARS.get(AI_PROVIDER, 20000)
        # 上下文长度保护：过长则标记等待大上下文模型
        if len(payload_str) > limit_chars:
            item.requires_large_context = True
            item.is_processed = False
            item.is_suspect = False
            item.return_count = (item.return_count or 0) + 1
            item.save(update_fields=["requires_large_context", "is_processed", "is_suspect", "return_count"])
            results.append({
                "item_id": item.id,
                "type": None,
                "ok": False,
                "action": None,
                "errors": [f"上下文过长：{len(payload_str)} 字符，已标记等待大上下文模型处理"],
                "record_id": None,
                "url": item.url,
                "requires_large_context": True,
            })
            failed += 1
            continue
        # 分类调用：识别类型与置信度
        classify = classify_text(text=json.dumps(data, ensure_ascii=False), extra_context={"url": item.url}, model=model, **kwargs)
        ftype = classify.get("type") or "other"
        item.recognized_confidence = classify.get("confidence")
        item.recognized_reason = classify.get("reason") or ""
        cu = classify.get("usage") or {}
        item.classify_input_tokens = (item.classify_input_tokens or 0) + int(cu.get("prompt_tokens", 0) or 0)
        item.classify_output_tokens = (item.classify_output_tokens or 0) + int(cu.get("completion_tokens", 0) or 0)

        # 2) 分发处理
        try:
            # 根据识别类型分发到具体处理器
            if ftype == "winning_result_announcement":
                result = process_and_ingest_winning(item=item, model=model, json_payload=data, **kwargs)
            elif ftype in ("tender_announcement",):
                result = process_and_ingest_tender(item=item, model=model, json_payload=data, **kwargs)
            elif ftype in ("transaction_result",):
                result = process_and_ingest_transaction(item=item, model=model, json_payload=data, **kwargs)
            elif ftype in ("bid_candidate",):
                result = process_and_ingest_bid_candidate(item=item, model=model, json_payload=data, **kwargs)
            elif ftype in ("bid_opening_record",):
                result = process_and_ingest_opening_record(item=item, model=model, json_payload=data, **kwargs)
            elif ftype in ("bid_evaluation_result",):
                result = process_and_ingest_evaluation_result(item=item, model=model, json_payload=data, **kwargs)
            else:
                result = {"ok": False, "errors": [f"未支持的文件类型: {ftype}"]}
        except Exception as e:
            result = {"ok": False, "errors": [f"处理异常: {e!r}"]}
        # 记录识别类型
        item.recognized_type = ftype
        
        # 3) 标记与计数
        # 统计使用量并更新处理状态，必要字段用 update_fields 提升写入效率
        item.return_count = (item.return_count or 0) + 1
        pu = result.get("usage") or {}
        item.process_input_tokens = (item.process_input_tokens or 0) + int(pu.get("prompt_tokens", 0) or 0)
        item.process_output_tokens = (item.process_output_tokens or 0) + int(pu.get("completion_tokens", 0) or 0)
        item.ai_input_tokens = int((item.classify_input_tokens or 0) + (item.process_input_tokens or 0))
        item.ai_output_tokens = int((item.classify_output_tokens or 0) + (item.process_output_tokens or 0))
        if result.get("ok"):
            item.is_processed = True
            success += 1
        else:
            item.is_processed = False
            failed += 1
            item.is_suspect = True
        item.save(update_fields=[
            "recognized_type",
            "recognized_confidence",
            "recognized_reason",
            "return_count",
            "is_processed",
            "is_suspect",
            "classify_input_tokens",
            "classify_output_tokens",
            "process_input_tokens",
            "process_output_tokens",
            "ai_input_tokens",
            "ai_output_tokens",
        ])

        results.append({
            # 汇总本条结果，便于上层命令输出进度
            "item_id": item.id,
            "task_id": item.task_id,
            "type": ftype,
            "ok": result.get("ok"),
            "action": result.get("action"),
            "errors": result.get("errors"),
            "record_id": result.get("id"),
            "url": item.url,
            "requires_large_context": getattr(item, "requires_large_context", False),
            "classification": classify,
        })

        done += 1

    return {
        "total": len(results),
        "success": success,
        "failed": failed,
        "results": results,
    }


def run_iter(
    task_id: Optional[int] = None,
    limit: Optional[int] = None,
    model: Optional[str] = None,
    random_pick: bool = False,
    concurrency: Optional[int] = None,
    workers: Optional[int] = None,
    **kwargs,
) -> Iterator[Dict[str, object]]:
    # 仅选择未处理项
    q = SubDetailItem.objects.filter(is_processed=False)
    if task_id is not None:
        # 指定任务范围
        q = q.filter(task_id=task_id)
    if random_pick:
        q = q.order_by('?')
    else:
        # 顺序策略：返回次数少优先 → 不可疑优先 → 时间顺序稳定
        q = q.order_by('return_count', 'is_suspect', 'created_at', 'id')
    # 限字段读取，降低行宽与磁盘读取
    q = q.only('id', 'url', 'html_content', 'task_id', 'created_at')
    if limit is not None and limit > 0:
        # 单轮只取需要的条数
        q = q[:limit]
    

    eff = workers if (workers and workers > 0) else concurrency
    if eff and eff > 1:
        # 并发：仅生成本轮主键列表，避免加载完整对象
        ids: List[int] = list(q.values_list('id', flat=True))
        def _process_one(item_id: int) -> Dict[str, object]:
            close_old_connections()
            # 每任务按主键限字段读取，控制单次 I/O
            item = SubDetailItem.objects.only('id', 'url', 'html_content', 'task_id', 'created_at').filter(id=item_id).first()
            if not item:
                return {
                    "item_id": item_id,
                    "task_id": None,
                    "type": None,
                    "ok": False,
                    "action": None,
                    "errors": ["未找到条目"],
                    "record_id": None,
                    "url": None,
                    "requires_large_context": False,
                }
            # 轻量清洗正文，避免直接传入原始 HTML
            sanitized_html = _sanitize_html_basic(getattr(item, "html_content", "") or "")
            data = {"url": item.url, "html_content": sanitized_html}
            # 构造轻量 JSON（只包含必要字段），降低序列化体积
            payload_str = json.dumps(data, ensure_ascii=False)
            limit_chars = CONTEXT_LIMIT_CHARS.get(AI_PROVIDER, 20000)
            # 上下文长度保护：过长则标记等待大上下文模型
            if len(payload_str) > limit_chars:
                item.requires_large_context = True
                item.is_processed = False
                item.is_suspect = False
                item.return_count = (item.return_count or 0) + 1
                item.save(update_fields=["requires_large_context", "is_processed", "is_suspect", "return_count"])
                return {
                    "item_id": item.id,
                    "task_id": item.task_id,
                    "type": None,
                    "ok": False,
                    "action": None,
                    "errors": [f"上下文过长：{len(payload_str)} 字符，已标记等待大上下文模型处理"],
                    "record_id": None,
                    "url": item.url,
                    "requires_large_context": True,
                }
            # 分类调用：识别类型与置信度
            classify = classify_text(text=json.dumps(data, ensure_ascii=False), extra_context={"url": item.url}, model=model, **kwargs)
            ftype = classify.get("type") or "other"
            item.recognized_confidence = classify.get("confidence")
            item.recognized_reason = classify.get("reason") or ""
            cu = classify.get("usage") or {}
            item.classify_input_tokens = (item.classify_input_tokens or 0) + int(cu.get("prompt_tokens", 0) or 0)
            item.classify_output_tokens = (item.classify_output_tokens or 0) + int(cu.get("completion_tokens", 0) or 0)
            try:
                if ftype == "winning_result_announcement":
                    result = process_and_ingest_winning(item=item, model=model, json_payload=data, **kwargs)
                elif ftype in ("tender_announcement",):
                    result = process_and_ingest_tender(item=item, model=model, json_payload=data, **kwargs)
                elif ftype in ("transaction_result",):
                    result = process_and_ingest_transaction(item=item, model=model, json_payload=data, **kwargs)
                elif ftype in ("bid_candidate",):
                    result = process_and_ingest_bid_candidate(item=item, model=model, json_payload=data, **kwargs)
                elif ftype in ("bid_opening_record",):
                    result = process_and_ingest_opening_record(item=item, model=model, json_payload=data, **kwargs)
                elif ftype in ("bid_evaluation_result",):
                    result = process_and_ingest_evaluation_result(item=item, model=model, json_payload=data, **kwargs)
                else:
                    result = {"ok": False, "errors": [f"未支持的文件类型: {ftype}"]}
            except Exception as e:
                result = {"ok": False, "errors": [f"处理异常: {e!r}"]}
            item.recognized_type = ftype
            item.return_count = (item.return_count or 0) + 1
            pu = result.get("usage") or {}
            item.process_input_tokens = (item.process_input_tokens or 0) + int(pu.get("prompt_tokens", 0) or 0)
            item.process_output_tokens = (item.process_output_tokens or 0) + int(pu.get("completion_tokens", 0) or 0)
            item.ai_input_tokens = int((item.classify_input_tokens or 0) + (item.process_input_tokens or 0))
            item.ai_output_tokens = int((item.classify_output_tokens or 0) + (item.process_output_tokens or 0))
            if result.get("ok"):
                item.is_processed = True
            else:
                item.is_processed = False
                item.is_suspect = True
            # 仅更新必要字段，提高写入效率
            item.save(update_fields=[
                "recognized_type",
                "recognized_confidence",
                "recognized_reason",
                "return_count",
                "is_processed",
                "is_suspect",
                "classify_input_tokens",
                "classify_output_tokens",
                "process_input_tokens",
                "process_output_tokens",
                "ai_input_tokens",
                "ai_output_tokens",
            ])
            return {
                "item_id": item.id,
                "task_id": item.task_id,
                "type": ftype,
                "ok": result.get("ok"),
                "action": result.get("action"),
                "errors": result.get("errors"),
                "record_id": result.get("id"),
                "url": item.url,
                "requires_large_context": getattr(item, "requires_large_context", False),
                "classification": classify,
            }
        with ThreadPoolExecutor(max_workers=eff) as ex:
            # 并发执行并按完成顺序输出结果
            futures = [ex.submit(_process_one, _id) for _id in ids]
            for fut in as_completed(futures):
                yield fut.result()
        return
    # 顺序分支：流式迭代分块读取
    for item in q.iterator(chunk_size=100):
        sanitized_html = _sanitize_html_basic(getattr(item, "html_content", "") or "")
        data = {"url": item.url, "html_content": sanitized_html}
        # 构造轻量 JSON（只包含必要字段），降低序列化体积
        payload_str = json.dumps(data, ensure_ascii=False)
        limit_chars = CONTEXT_LIMIT_CHARS.get(AI_PROVIDER, 20000)
        # 上下文长度保护：过长则标记等待大上下文模型
        if len(payload_str) > limit_chars:
            item.requires_large_context = True
            item.is_processed = False
            item.is_suspect = False
            item.return_count = (item.return_count or 0) + 1
            item.save(update_fields=["requires_large_context", "is_processed", "is_suspect", "return_count"])
            yield {
                "item_id": item.id,
                "task_id": item.task_id,
                "type": None,
                "ok": False,
                "action": None,
                "errors": [f"上下文过长：{len(payload_str)} 字符，已标记等待大上下文模型处理"],
                "record_id": None,
                "url": item.url,
                "requires_large_context": True,
            }
            continue
        # 分类调用：识别类型与置信度
        classify = classify_text(text=json.dumps(data, ensure_ascii=False), extra_context={"url": item.url}, model=model, **kwargs)
        ftype = classify.get("type") or "other"
        item.recognized_confidence = classify.get("confidence")
        item.recognized_reason = classify.get("reason") or ""
        cu = classify.get("usage") or {}
        item.classify_input_tokens = (item.classify_input_tokens or 0) + int(cu.get("prompt_tokens", 0) or 0)
        item.classify_output_tokens = (item.classify_output_tokens or 0) + int(cu.get("completion_tokens", 0) or 0)
        try:
            if ftype == "winning_result_announcement":
                result = process_and_ingest_winning(item=item, model=model, json_payload=data, **kwargs)
            elif ftype in ("tender_announcement",):
                result = process_and_ingest_tender(item=item, model=model, json_payload=data, **kwargs)
            elif ftype in ("transaction_result",):
                result = process_and_ingest_transaction(item=item, model=model, json_payload=data, **kwargs)
            elif ftype in ("bid_candidate",):
                result = process_and_ingest_bid_candidate(item=item, model=model, json_payload=data, **kwargs)
            elif ftype in ("bid_opening_record",):
                result = process_and_ingest_opening_record(item=item, model=model, json_payload=data, **kwargs)
            elif ftype in ("bid_evaluation_result",):
                result = process_and_ingest_evaluation_result(item=item, model=model, json_payload=data, **kwargs)
            else:
                result = {"ok": False, "errors": [f"未支持的文件类型: {ftype}"]}
        except Exception as e:
            result = {"ok": False, "errors": [f"处理异常: {e!r}"]}
        item.recognized_type = ftype
        item.return_count = (item.return_count or 0) + 1
        pu = result.get("usage") or {}
        item.process_input_tokens = (item.process_input_tokens or 0) + int(pu.get("prompt_tokens", 0) or 0)
        item.process_output_tokens = (item.process_output_tokens or 0) + int(pu.get("completion_tokens", 0) or 0)
        item.ai_input_tokens = int((item.classify_input_tokens or 0) + (item.process_input_tokens or 0))
        item.ai_output_tokens = int((item.classify_output_tokens or 0) + (item.process_output_tokens or 0))
        if result.get("ok"):
            item.is_processed = True
        else:
            item.is_processed = False
            item.is_suspect = True
        # 仅更新必要字段，提高写入效率
        item.save(update_fields=[
            "recognized_type",
            "recognized_confidence",
            "recognized_reason",
            "return_count",
            "is_processed",
            "is_suspect",
            "classify_input_tokens",
            "classify_output_tokens",
            "process_input_tokens",
            "process_output_tokens",
            "ai_input_tokens",
            "ai_output_tokens",
        ])
        yield {
            "item_id": item.id,
            "task_id": item.task_id,
            "type": ftype,
            "ok": result.get("ok"),
            "action": result.get("action"),
            "errors": result.get("errors"),
            "record_id": result.get("id"),
            "url": item.url,
            "requires_large_context": getattr(item, "requires_large_context", False),
            "classification": classify,
        }

__all__ = ["run", "run_iter"]


# 使用示例（调用方式）：
# 1) 按任务批量处理（仅处理未处理项）
# from process.core.process import run
# summary = run(task_id=123, limit=50)
# print(summary)
#
# 2) 自定义模型与提示参数（透传给底层调用）
# summary = run(
#     task_id=123,
#     model="qwen-plus",
#     system="你是一个结构化抽取助手",
#     instruction="提取中标记录字段",
#     output_format="输出 JSON，字段对齐 WinningResultAnnouncement",
#     extra_context={"source": "某平台", "note": "批量任务"},
# )
#
# 3) 传递生成参数（如 temperature、top_p）
# summary = run(task_id=123, temperature=0.2, top_p=0.9)
#
# 返回结构说明（summary）：
# {
#   "total": 本次处理条数,
#   "success": 成功条数,
#   "failed": 失败条数,
#   "results": [
#       {
#           "item_id": SubDetailItem.id,
#           "type": 识别出的文件类型（如 "winning"）, 
#           "ok": True/False,
#           "action": "created"/"updated"（仅 winning 类型入库时返回）, 
#           "errors": [错误信息列表],
#           "record_id": WinningResultAnnouncement.id（成功入库时返回）
#       },
#       ...
#   ]
# }
#
# 前置配置：
# - 安装 openai 库并配置阿里云百炼兼容模式
# - 环境变量 DASHSCOPE_API_KEY 必须可用（或在 process/ai/qwen.py 中本地硬编码）
# - 地域可通过 DASHSCOPE_REGION=sg|intl 切换；或设置 DASHSCOPE_BASE_URL 完整 URL
