#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
工作流编排模块
从 page/backend.py 中提炼的工作流解析与执行逻辑
"""

import logging
from typing import Dict, Any, List, Tuple

from pathlib import Path

try:
    # 优先使用包内绝对导入
    from src.character_cleaner import CharacterCleaner
    from src.sentence_cleaner import SentenceCleaner
    from src.config import CharacterCleanerConfig, SentenceCleanerConfig
except ImportError:
    # 兼容相对导入
    from .character_cleaner import CharacterCleaner
    from .sentence_cleaner import SentenceCleaner
    from .config import CharacterCleanerConfig, SentenceCleanerConfig


logger = logging.getLogger(__name__)


def validate_data_format(data: list, format_type: str) -> Tuple[bool, str]:
    """
    验证输入数据与选择的格式是否一致
    支持的格式：pretrain、sft、kto、dpo
    - pretrain: 每条数据需包含 text 字段（字符串）
    - sft: 每条数据需包含 instruction（字符串）、output（字符串）；可选 input（字符串）、system（字符串）、history（二维列表）
    - dpo: 每条数据需包含 instruction（字符串）、chosen（字符串）、rejected（字符串）；可选 input（字符串）
    - kto: 每条数据需包含 instruction（字符串）、output（字符串）、kto_tag（布尔）；可选 input（字符串）
    """
    if not isinstance(data, list):
        return False, "JSON文件必须包含一个数组"
    if len(data) == 0:
        return True, ""
    if format_type not in {"pretrain", "sft", "dpo", "kto"}:
        return False, f"不支持的数据格式: {format_type}"

    for idx, item in enumerate(data, start=1):
        if not isinstance(item, dict):
            return False, f"第 {idx} 条数据不是对象"

        if format_type == "pretrain":
            if "text" not in item:
                return False, f"第 {idx} 条数据缺少必需字段: text"
            v = item.get("text")
            if v is None or not isinstance(v, str):
                return False, f"第 {idx} 条数据字段 text 类型应为字符串"

        elif format_type == "sft":
            required = ["instruction", "output"]
            missing = [k for k in required if k not in item]
            if missing:
                return False, f"第 {idx} 条数据缺少必需字段: {', '.join(missing)}"
            if not isinstance(item.get("instruction"), str):
                return False, f"第 {idx} 条数据字段 instruction 类型应为字符串"
            if not isinstance(item.get("output"), str):
                return False, f"第 {idx} 条数据字段 output 类型应为字符串"
            if "input" in item and item["input"] is not None and not isinstance(item["input"], str):
                return False, f"第 {idx} 条数据字段 input 类型应为字符串"
            if "system" in item and item["system"] is not None and not isinstance(item["system"], str):
                return False, f"第 {idx} 条数据字段 system 类型应为字符串"
            if "history" in item and item["history"] is not None:
                history = item["history"]
                if not isinstance(history, list):
                    return False, f"第 {idx} 条数据字段 history 类型应为列表"
                for h_i, h in enumerate(history, start=1):
                    if not isinstance(h, (list, tuple)) or len(h) != 2:
                        return False, f"第 {idx} 条数据 history 第 {h_i} 项需为长度为2的列表 [指令, 回答]"
                    if not all(isinstance(x, str) for x in h):
                        return False, f"第 {idx} 条数据 history 第 {h_i} 项的元素类型应为字符串"

        elif format_type == "dpo":
            required = ["instruction", "chosen", "rejected"]
            missing = [k for k in required if k not in item]
            if missing:
                return False, f"第 {idx} 条数据缺少必需字段: {', '.join(missing)}"
            if not isinstance(item.get("instruction"), str):
                return False, f"第 {idx} 条数据字段 instruction 类型应为字符串"
            if not isinstance(item.get("chosen"), str):
                return False, f"第 {idx} 条数据字段 chosen 类型应为字符串"
            if not isinstance(item.get("rejected"), str):
                return False, f"第 {idx} 条数据字段 rejected 类型应为字符串"
            if "input" in item and item["input"] is not None and not isinstance(item["input"], str):
                return False, f"第 {idx} 条数据字段 input 类型应为字符串"

        elif format_type == "kto":
            required = ["instruction", "output", "kto_tag"]
            missing = [k for k in required if k not in item]
            if missing:
                return False, f"第 {idx} 条数据缺少必需字段: {', '.join(missing)}"
            if not isinstance(item.get("instruction"), str):
                return False, f"第 {idx} 条数据字段 instruction 类型应为字符串"
            if not isinstance(item.get("output"), str):
                return False, f"第 {idx} 条数据字段 output 类型应为字符串"
            if not isinstance(item.get("kto_tag"), bool):
                return False, f"第 {idx} 条数据字段 kto_tag 类型应为布尔（true/false）"
            if "input" in item and item["input"] is not None and not isinstance(item["input"], str):
                return False, f"第 {idx} 条数据字段 input 类型应为字符串"

    return True, ""


class WorkflowEngine:
    """工作流执行引擎"""

    def __init__(self, character_cleaner: CharacterCleaner, sentence_cleaner: SentenceCleaner):
        self.character_cleaner = character_cleaner
        self.sentence_cleaner = sentence_cleaner

    def _clean_json_fields(self, data: list, format_type: str, config: dict) -> Dict[str, Any]:
        """复用 api_server 中的字符清洗逻辑，直接调用清洗器"""
        cfg = CharacterCleanerConfig(**config) if config else CharacterCleanerConfig()

        def clean_text_field(text):
            if isinstance(text, str) and text.strip():
                return self.character_cleaner.process_text(text, cfg)
            return text

        cleaned_data = []
        processed_count = 0
        for item in data:
            if not isinstance(item, dict):
                cleaned_data.append(item)
                continue

            cleaned_item = item.copy()
            if format_type == 'pretrain':
                if 'text' in cleaned_item:
                    cleaned_item['text'] = clean_text_field(cleaned_item['text'])
                    processed_count += 1
            elif format_type == 'sft':
                for field in ['instruction', 'input', 'output', 'system']:
                    if field in cleaned_item:
                        cleaned_item[field] = clean_text_field(cleaned_item[field])
                        processed_count += 1
            elif format_type == 'kto':
                for field in ['instruction', 'input', 'output']:
                    if field in cleaned_item:
                        cleaned_item[field] = clean_text_field(cleaned_item[field])
                        processed_count += 1
            elif format_type == 'dpo':
                for field in ['instruction', 'input', 'chosen', 'rejected']:
                    if field in cleaned_item:
                        cleaned_item[field] = clean_text_field(cleaned_item[field])
                        processed_count += 1

            cleaned_data.append(cleaned_item)

        return {
            "cleaned_data": cleaned_data,
            "processed_fields": processed_count,
            "stats": self.character_cleaner.get_stats(),
        }

    def _filter_sentences(self, data: list, config: dict) -> Dict[str, Any]:
        cfg = SentenceCleanerConfig(**config) if config else SentenceCleanerConfig()
        texts = []
        for item in data:
            if isinstance(item, dict) and 'text' in item and isinstance(item['text'], str):
                texts.append(item['text'])
        filtered_texts = self.sentence_cleaner.filter_sentences(texts, cfg)
        filtered_data = [{"text": t} for t in filtered_texts]
        return {
            "filtered_data": filtered_data,
            "stats": self.sentence_cleaner.get_stats(),
            "original_count": len(texts),
            "final_count": len(filtered_texts),
        }

    def execute(self, workflow_data: Dict[str, Any], input_data: list, format_type: str) -> Dict[str, Any]:
        """执行工作流，返回处理后的数据与节点结果"""
        nodes: List[Dict[str, Any]] = workflow_data.get('nodes', [])
        connections = workflow_data.get('connections', [])  # 目前未使用，保留扩展

        # 处理节点顺序（简化：按照出现顺序）
        processing_nodes = [n for n in nodes if n.get('type') in ['character-cleaner', 'sentence-cleaner']]

        current_data = input_data.copy()
        processing_results: List[Dict[str, Any]] = []

        for idx, node in enumerate(processing_nodes, start=1):
            ntype = node.get('type')
            ncfg = node.get('config', {})
            if ntype == 'character-cleaner':
                result = self._clean_json_fields(current_data, format_type, ncfg)
                current_data = result['cleaned_data']
                processing_results.append({
                    "node_id": node.get('id'),
                    "node_type": "character-cleaner",
                    "status": "success",
                    "input_count": len(input_data) if idx == 1 else len(processing_results[-1].get('output_count', len(current_data))),
                    "output_count": len(current_data),
                    "stats": result.get('stats', {}),
                })
            elif ntype == 'sentence-cleaner':
                result = self._filter_sentences(current_data, ncfg)
                current_data = result['filtered_data']
                processing_results.append({
                    "node_id": node.get('id'),
                    "node_type": "sentence-cleaner",
                    "status": "success",
                    "input_count": len(input_data) if idx == 1 else len(processing_results[-1].get('output_count', len(current_data))),
                    "output_count": len(current_data),
                    "stats": result.get('stats', {}),
                })

        return {
            "final_data": current_data,
            "processing_results": processing_results,
        }