"""
数据分析插件
"""
import csv
import random
from typing import Any, Dict, List, Union, Optional
import numpy as np
import scipy.stats as stats
from collections import defaultdict
from plugins.base_plugin import BasePlugin
from core.logger import get_logger

# 获取日志记录器
logger = get_logger(__name__)


class PearsonCorrelation(BasePlugin):
    """
    Pearson相关性分析插件
    计算数据集中各数值列之间的Pearson相关系数
    """
    
    def execute(self, input_data: Any) -> dict:
        """
        执行Pearson相关性分析
        
        Args:
            input_data: 输入数据，可以是原始数据列表或包含raw_data键的字典
        
        Returns:
            包含相关系数矩阵和元数据的字典
        """
        # 处理输入数据
        if isinstance(input_data, dict) and "raw_data" in input_data:
            raw_data = input_data["raw_data"]
        else:
            raw_data = input_data
        
        # 验证输入数据
        if not raw_data or not isinstance(raw_data, list):
            logger.error("Input data must be a non-empty list")
            return {
                "correlation_matrix": None,
                "error": "Invalid input data format",
                "pearson_correlation_by": self.plugin_id
            }
        
        # 获取配置参数
        config = self.config.get("config", {})
        column_names = config.get("column_names", [])
        
        # 如果没有指定列名，尝试从数据中推断数值列
        if not column_names:
            column_names = self._infer_numeric_columns(raw_data)
            if not column_names:
                logger.warning("No numeric columns found in data")
                return {
                    "correlation_matrix": None,
                    "error": "No numeric columns found",
                    "pearson_correlation_by": self.plugin_id
                }
        
        # 提取数据矩阵
        data_matrix = self._extract_data_matrix(raw_data, column_names)
        
        # 如果数据不足，返回错误
        if data_matrix.shape[0] < 2 or data_matrix.shape[1] < 2:
            logger.warning(f"Insufficient data for correlation analysis: {data_matrix.shape}")
            return {
                "correlation_matrix": None,
                "error": "Insufficient data points for correlation analysis",
                "pearson_correlation_by": self.plugin_id
            }
        
        # 计算相关系数矩阵
        correlation_matrix = self._calculate_correlation(data_matrix)
        
        # 格式化结果为字典形式
        formatted_result = self._format_correlation_result(correlation_matrix, column_names)
        
        logger.info(f"Pearson correlation analysis completed for columns: {column_names}")
        return {
            "correlation_matrix": formatted_result,
            "pearson_correlation_by": self.plugin_id,
            "columns_analyzed": column_names
        }
        
    
    def _infer_numeric_columns(self, data: List[Dict[str, Any]]) -> List[str]:
        """从数据中推断数值列"""
        if not data:
            return []
        
        # 获取第一行数据的所有键
        sample_record = data[0]
        numeric_columns = []
        
        # 检查每个键对应的值是否为数值类型
        for key in sample_record.keys():
            # 确保所有记录都有这个键且值为数值类型
            if all(key in record and isinstance(record[key], (int, float)) and not np.isnan(record[key]) \
                   for record in data if record.get(key) is not None):
                numeric_columns.append(key)
        
        return numeric_columns
    
    def _extract_data_matrix(self, data: List[Dict[str, Any]], column_names: List[str]) -> np.ndarray:
        """从数据中提取指定列的数据矩阵"""
        # 初始化一个列表来存储数据矩阵
        matrix_data = []
        
        # 遍历每条记录，提取指定列的值
        for record in data:
            row = []
            for column in column_names:
                # 处理缺失值或非数值值
                if column not in record or record[column] is None or not isinstance(record[column], (int, float)) or np.isnan(record[column]):
                    # 可以选择跳过这条记录或用其他方式处理缺失值
                    # 这里我们选择跳过
                    row = None
                    break
                row.append(record[column])
            
            # 如果行有效（没有缺失值），添加到矩阵数据中
            if row is not None:
                matrix_data.append(row)
        
        # 转换为numpy数组
        return np.array(matrix_data)
    
    def _calculate_correlation(self, data_matrix: np.ndarray) -> np.ndarray:
        """计算Pearson相关系数矩阵"""
        # 使用numpy的corrcoef函数计算相关系数矩阵
        correlation_matrix = np.corrcoef(data_matrix, rowvar=False)
        
        # 确保结果是浮点数类型
        return correlation_matrix.astype(float)
    
    def _format_correlation_result(self, correlation_matrix: np.ndarray, column_names: List[str]) -> Dict[str, Dict[str, float]]:
        """将相关系数矩阵格式化为字典形式"""
        result = {}
        num_columns = len(column_names)
        
        # 遍历矩阵中的每一对列
        for i in range(num_columns):
            column_result = {}
            for j in range(num_columns):
                # 格式化相关系数到小数点后6位
                correlation_value = float(round(correlation_matrix[i, j], 6))
                column_result[column_names[j]] = correlation_value
            result[column_names[i]] = column_result
        
        return result
    
    def validate_config(self) -> bool:
        """验证插件配置是否有效"""
        # 配置验证逻辑
        config = self.config.get("config", {})
        
        # 检查column_names是否为列表类型（如果提供）
        if "column_names" in config and not isinstance(config["column_names"], list):
            logger.error("column_names must be a list")
            return False
        
        return True


class CSVHeaderManager(BasePlugin):
    """
    CSV文件列名管理器
    用于为没有第一行列信息的CSV文件添加列名
    """
    
    def execute(self, input_data: Any) -> dict:
        """
        执行CSV列名添加操作
        
        Args:
            input_data: 输入数据，可以是CSV文件路径或包含CSV数据的字典
        
        Returns:
            包含处理结果的字典
        """
        # 获取配置参数
        config = self.config.get("config", {})
        column_names = config.get("column_names", [])  # 列名列表
        auto_generate = config.get("auto_generate", False)  # 是否自动生成列名
        prefix = config.get("prefix", "col_")  # 自动生成列名的前缀
        has_header = config.get("has_header", False)  # 原始数据是否已有列名
        
        # 处理输入数据
        if isinstance(input_data, dict):
            data = input_data["raw_data"]
        else:
            logger.error("Input must be a CSV file path or dict with 'csv_file' key")
            return {
                "success": False,
                "error": "Invalid input format",
                "processed_by": self.plugin_id
            }
        
        try:
            rows = list(data)
            
            if not rows:
                logger.error("CSV file is empty")
                return {
                    "success": False,
                    "error": "CSV file is empty",
                    "processed_by": self.plugin_id
                }
            
            # 确定列数
            num_columns = len(rows[0])
            
            # 处理列名
            if column_names:
                # 使用指定的列名
                if len(column_names) != num_columns:
                    logger.warning(f"Number of specified column names ({len(column_names)}) "
                                 f"does not match number of columns in data ({num_columns})")
                    # 调整列名数量
                    if len(column_names) < num_columns:
                        # 列名不足，自动生成剩余列名
                        for i in range(len(column_names), num_columns):
                            column_names.append(f"{prefix}{i}")
                    else:
                        # 列名过多，截取前num_columns个
                        column_names = column_names[:num_columns]
            elif auto_generate:
                # 自动生成列名
                column_names = [f"{prefix}{i}" for i in range(num_columns)]
            else:
                logger.error("No column names provided and auto_generate is False")
                return {
                    "success": False,
                    "error": "No column names provided and auto_generate is False",
                    "processed_by": self.plugin_id
                }
            
            # 构建输出文件路径
            output_file = config.get("output_file")
            if not output_file:
                # 如果没有指定输出文件，在原文件名基础上添加后缀
                raise ValueError("No output_file specified in config")
            
            # 写入新的CSV文件
            with open(output_file, 'w', newline='', encoding='utf-8') as f:
                writer = csv.writer(f)
                # 写入列名
                writer.writerow(column_names)
                
                # 如果原始数据有列名（第一行是列名），跳过第一行
                start_row = 1 if has_header else 0
                # 写入数据行
                for row in rows[start_row:]:
                    writer.writerow(row)
            
            logger.info(f"CSV file with headers saved to: {output_file}")
            return {
                "success": True,
                "output_file": output_file,
                "column_names": column_names,
                "original_columns": num_columns,
                "data_rows": len(rows) - start_row,
                "processed_by": self.plugin_id
            }
            
        except FileNotFoundError:
            logger.error(f"CSV file not found: {csv_file}")
            return {
                "success": False,
                "error": f"CSV file not found: {csv_file}",
                "processed_by": self.plugin_id
            }
        except Exception as e:
            logger.error(f"Failed to process CSV file: {e}")
            return {
                "success": False,
                "error": f"Failed to process CSV file: {e}",
                "processed_by": self.plugin_id
            }
    
    def validate_config(self) -> bool:
        """验证插件配置是否有效"""
        config = self.config.get("config", {})
        
        # 检查列名配置
        column_names = config.get("column_names", [])
        auto_generate = config.get("auto_generate", False)
        
        if not column_names and not auto_generate:
            logger.error("Either column_names must be provided or auto_generate must be True")
            return False
        
        # 如果提供了列名，检查是否为列表
        if column_names and not isinstance(column_names, list):
            logger.error("column_names must be a list")
            return False
        
        # 检查自动生成配置
        if auto_generate and not isinstance(auto_generate, bool):
            logger.error("auto_generate must be a boolean")
            return False
        
        # 检查前缀配置
        prefix = config.get("prefix", "col_")
        if not isinstance(prefix, str) or not prefix.strip():
            logger.error("prefix must be a non-empty string")
            return False
        
        # 检查has_header配置
        has_header = config.get("has_header", False)
        if not isinstance(has_header, bool):
            logger.error("has_header must be a boolean")
            return False
        
        return True


class DataSaver(BasePlugin):
    """
    数据保存插件
    用于保存合并或拆分后的数据到文件或返回处理结果
    支持多种输出格式和保存选项
    """
    
    def execute(self, input_data: Any) -> dict:
        """
        执行数据保存操作
        
        Args:
            input_data: 输入数据，可以是原始数据或包含raw_data的字典
        
        Returns:
            包含保存结果信息的字典
        """
        # 获取配置参数
        config = self.config.get("config", {})
        save_format = config.get("format", "csv")  # 默认csv格式
        output_path = config.get("output_path", None)  # 输出文件路径
        include_metadata = config.get("include_metadata", True)  # 是否包含元数据
        pretty_print = config.get("pretty_print", True)  # 是否格式化输出
        split_output_folder = config.get("split_output_folder", False)  # 是否为split_data使用文件夹输出
        
        # 处理输入数据格式
        if isinstance(input_data, dict) and "raw_data" in input_data:
            raw_data = input_data["raw_data"]
            metadata = input_data.get("metadata", {})
        elif isinstance(input_data, dict) and "split_data" in input_data:
            # 处理数据拆分插件的输出
            split_data = input_data["split_data"]
            
            # 如果split_data是列表（拆分结果），且启用了文件夹输出，使用专门的文件夹保存方法
            if isinstance(split_data, list) and split_output_folder and output_path:
                # 构建适合文件夹保存的格式
                folder_data = {
                    "split_data": split_data,
                    "group_names": input_data.get("group_names", []),
                    "group_keys": input_data.get("group_keys", [])
                }
                return self._save_split_data_to_folder(
                    folder_data, input_data, output_path, save_format, config
                )
            
            # 否则，将拆分数据作为普通数据处理
            raw_data = split_data
            metadata = {
                "original_count": input_data.get("original_count", 0),
                "split_info": input_data.get("split_info", {})
            }
        elif isinstance(input_data, dict) and "merged_data" in input_data:
            # 处理数据合并插件的输出
            raw_data = input_data["merged_data"]
            metadata = {
                "merge_info": input_data.get("merge_info", {}),
                "merged_by": input_data.get("merged_by", "")
            }
        else:
            raw_data = input_data
            metadata = {}
        
        # 验证数据
        if not raw_data:
            logger.error("No data to save")
            return {
                "saved": False,
                "error": "No data to save",
                "saved_by": self.plugin_id,
                "format": save_format
            }
        
        # 根据格式保存数据
        if save_format.lower() == "json":
            result = self._save_as_json(raw_data, metadata, output_path, include_metadata, pretty_print)
        elif save_format.lower() == "csv":
            result = self._save_as_csv(raw_data, output_path, config)
        elif save_format.lower() == "txt":
            result = self._save_as_txt(raw_data, output_path, pretty_print)
        else:
            logger.error(f"Unsupported save format: {save_format}")
            return {
                "saved": False,
                "error": f"Unsupported save format: {save_format}",
                "saved_by": self.plugin_id,
                "format": save_format
            }
        
        if result["success"]:
            logger.info(f"Data saved successfully in {save_format} format")
            return {
                "saved": True,
                "file_path": result.get("file_path"),
                "record_count": len(raw_data) if isinstance(raw_data, list) else 1,
                "format": save_format,
                "saved_by": self.plugin_id,
                "metadata": metadata if include_metadata else {}
            }
        else:
            return {
                "saved": False,
                "error": result.get("error", "Unknown error"),
                "saved_by": self.plugin_id,
                "format": save_format
            }
    
    def _save_as_json(self, data: Any, metadata: dict, output_path: str = None, 
                     include_metadata: bool = True, pretty_print: bool = True) -> dict:
        """保存为JSON格式"""
        import json
        
        try:
            # 构建输出内容
            if include_metadata:
                output_data = {
                    "data": data,
                    "metadata": metadata,
                    "saved_info": {
                        "timestamp": self._get_timestamp(),
                        "record_count": len(data) if isinstance(data, list) else 1,
                        "format": "json"
                    }
                }
            else:
                output_data = data
            
            # 如果没有指定输出路径，返回数据内容
            if not output_path:
                return {
                    "success": True,
                    "data": output_data,
                    "format": "json"
                }
            
            # 确保目录存在
            import os
            os.makedirs(os.path.dirname(output_path), exist_ok=True)
            
            # 写入文件
            with open(output_path, 'w', encoding='utf-8') as f:
                if pretty_print:
                    json.dump(output_data, f, ensure_ascii=False, indent=2, default=str)
                else:
                    json.dump(output_data, f, ensure_ascii=False, default=str)
            
            return {
                "success": True,
                "file_path": output_path,
                "format": "json"
            }
            
        except Exception as e:
            logger.error(f"Failed to save JSON file: {e}")
            return {
                "success": False,
                "error": f"Failed to save JSON file: {e}"
            }
    
    def _save_as_csv(self, data: Any, output_path: str = None, config: dict = None) -> dict:
        """保存为CSV格式"""
        import csv
        import os
        
        try:
            # 处理不同类型的数据
            if isinstance(data, list):
                if not data:
                    return {
                        "success": False,
                        "error": "Empty data list"
                    }
                
                # 获取CSV列名
                if config and "column_names" in config:
                    fieldnames = config["column_names"]
                elif isinstance(data[0], dict):
                    # 从字典键获取列名
                    fieldnames = list(data[0].keys())
                else:
                    # 非字典数据，统一用 "value" 列
                    fieldnames = ["value"]
                    # 创建新的字典格式数据
                    dict_data = []
                    for item in data:
                        if isinstance(item, dict):
                            dict_data.append(item)
                        else:
                            dict_data.append({"value": item})
                    data = dict_data
                
                # 如果没有指定输出路径，返回CSV内容
                if not output_path:
                    import io
                    output = io.StringIO()
                    writer = csv.DictWriter(output, fieldnames=fieldnames)
                    writer.writeheader()
                    for row in data:
                        if isinstance(row, dict):
                            writer.writerow(row)
                    return {
                        "success": True,
                        "data": output.getvalue(),
                        "format": "csv"
                    }
                
                # 确保目录存在
                os.makedirs(os.path.dirname(output_path), exist_ok=True)
                
                # 写入CSV文件
                with open(output_path, 'w', newline='', encoding='utf-8') as f:
                    writer = csv.DictWriter(f, fieldnames=fieldnames)
                    writer.writeheader()
                    for row in data:
                        if isinstance(row, dict):
                            writer.writerow(row)
                
                return {
                    "success": True,
                    "file_path": output_path,
                    "format": "csv"
                }
            else:
                return {
                    "success": False,
                    "error": "CSV format requires list of dictionaries"
                }
                
        except Exception as e:
            logger.error(f"Failed to save CSV file: {e}")
            return {
                "success": False,
                "error": f"Failed to save CSV file: {e}"
            }
    
    def _save_as_txt(self, data: Any, output_path: str = None, pretty_print: bool = True) -> dict:
        """保存为纯文本格式"""
        import os
        
        try:
            # 格式化数据为字符串
            if pretty_print:
                content = self._format_data_pretty(data)
            else:
                content = str(data)
            
            # 如果没有指定输出路径，返回内容
            if not output_path:
                return {
                    "success": True,
                    "data": content,
                    "format": "txt"
                }
            
            # 确保目录存在
            os.makedirs(os.path.dirname(output_path), exist_ok=True)
            
            # 写入文件
            with open(output_path, 'w', encoding='utf-8') as f:
                f.write(content)
            
            return {
                "success": True,
                "file_path": output_path,
                "format": "txt"
            }
            
        except Exception as e:
            logger.error(f"Failed to save TXT file: {e}")
            return {
                "success": False,
                "error": f"Failed to save TXT file: {e}"
            }
    
    def _format_data_pretty(self, data: Any) -> str:
        """美化格式化数据"""
        import json
        
        if isinstance(data, list):
            if not data:
                return "Empty data list"
            
            # 如果是字典列表，创建表格格式
            if isinstance(data[0], dict):
                # 获取所有键
                keys = list(data[0].keys())
                
                # 计算每列的最大宽度
                col_widths = {}
                for key in keys:
                    max_width = len(str(key))
                    for row in data:
                        max_width = max(max_width, len(str(row.get(key, ""))))
                    col_widths[key] = max_width + 2  # 添加一些填充
                
                # 构建表格
                result = []
                
                # 表头
                header = "|".join(f"{str(key):^{col_widths[key]}}" for key in keys)
                separator = "+".join("-" * col_widths[key] for key in keys)
                result.append(separator)
                result.append(header)
                result.append(separator)
                
                # 数据行
                for row in data:
                    row_str = "|".join(f"{str(row.get(key, '')):^{col_widths[key]}}" for key in keys)
                    result.append(row_str)
                
                result.append(separator)
                return "\n".join(result)
            else:
                # 简单列表
                return "\n".join(f"- {item}" for item in data)
        elif isinstance(data, dict):
            return json.dumps(data, ensure_ascii=False, indent=2, default=str)
        else:
            return str(data)
    
    def _save_split_data_to_folder(self, split_data: dict, input_data: dict, 
                                  output_folder: str, save_format: str, config: dict) -> dict:
        """
        将split_data中的每个分组分别保存到指定文件夹
        
        Args:
            split_data: 拆分后的数据字典，包含split_data列表、group_names和group_keys
            input_data: 原始输入数据，包含split_info等信息
            output_folder: 输出文件夹路径
            save_format: 保存格式
            config: 配置参数
        
        Returns:
            保存结果字典
        """
        import os
        
        try:
            # 确保输出文件夹存在
            os.makedirs(output_folder, exist_ok=True)
            
            saved_files = []
            total_records = 0
            
            # 获取配置参数
            include_metadata = config.get("include_metadata", True)
            pretty_print = config.get("pretty_print", True)
            file_prefix = config.get("file_prefix", "split")  # 文件名前缀
            
            # 获取拆分信息
            split_info = input_data.get("split_info", {})
            original_count = input_data.get("original_count", 0)
            
            # 获取拆分数据
            split_data_list = split_data.get("split_data", [])
            group_names = split_data.get("group_names", [])
            group_keys = split_data.get("group_keys", [])
            
            # 如果没有显式的分组名，使用默认命名
            if not group_names:
                group_names = [f"group_{i}" for i in range(len(split_data_list))]
            
            # 遍历每个分组
            for i, group_data in enumerate(split_data_list):
                if not group_data:  # 跳过空分组
                    continue
                
                # 获取分组名
                group_name = group_names[i] if i < len(group_names) else f"group_{i}"
                
                # 清理分组名，移除不合法的文件名字符
                clean_group_name = "".join(c for c in str(group_name) if c.isalnum() or c in ('_', '-'))
                if not clean_group_name:
                    clean_group_name = f"group_{i}"
                
                # 构建文件名
                timestamp = self._get_timestamp().replace(" ", "_").replace(":", "-")
                filename = f"{file_prefix}_{clean_group_name}_{timestamp}.{save_format}"
                file_path = os.path.join(output_folder, filename)
                
                # 构建元数据
                metadata = {
                    "group_name": group_name,
                    "group_index": i,
                    "group_size": len(group_data),
                    "original_count": original_count,
                    "split_info": split_info.get(group_name, {}),
                    "split_timestamp": self._get_timestamp()
                }
                
                # 根据格式保存分组数据
                if save_format.lower() == "json":
                    result = self._save_as_json(group_data, metadata, file_path, include_metadata, pretty_print)
                elif save_format.lower() == "csv":
                    result = self._save_as_csv(group_data, file_path, config)
                elif save_format.lower() == "txt":
                    result = self._save_as_txt(group_data, file_path, pretty_print)
                else:
                    logger.warning(f"Unsupported format for group {group_name}: {save_format}")
                    continue
                
                if result["success"]:
                    saved_files.append({
                        "group": group_name,
                        "group_index": i,
                        "file_path": file_path,
                        "record_count": len(group_data)
                    })
                    total_records += len(group_data)
                    logger.info(f"Group '{group_name}' saved to {file_path}")
                else:
                    logger.error(f"Failed to save group '{group_name}': {result.get('error')}")
            
            if not saved_files:
                return {
                    "saved": False,
                    "error": "No groups were successfully saved",
                    "saved_by": self.plugin_id,
                    "format": save_format
                }
            
            return {
                "saved": True,
                "file_path": output_folder,  # 为了兼容性，使用file_path字段
                "output_folder": output_folder,
                "saved_files": saved_files,
                "total_groups": len(saved_files),
                "total_records": total_records,
                "record_count": total_records,  # 为了兼容性
                "format": save_format,
                "saved_by": self.plugin_id,
                "metadata": {
                    "split_output_folder": True,
                    "original_count": original_count,
                    "split_info": split_info,
                    "saved_files": saved_files
                }
            }
            
        except Exception as e:
            logger.error(f"Failed to save split data to folder: {e}")
            return {
                "saved": False,
                "error": f"Failed to save split data to folder: {e}",
                "saved_by": self.plugin_id,
                "format": save_format
            }
    
    def _get_timestamp(self) -> str:
        """获取当前时间戳"""
        from datetime import datetime
        return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    
    def validate_config(self) -> bool:
        """验证插件配置是否有效"""
        config = self.config.get("config", {})
        
        # 验证保存格式
        save_format = config.get("format", "json")
        valid_formats = ["json", "csv", "txt"]
        if save_format.lower() not in valid_formats:
            logger.error(f"Invalid save format: {save_format}")
            return False
        
        # 验证输出路径（如果提供）
        output_path = config.get("output_path", "")
        if output_path:
            # 检查路径格式
            import os
            try:
                # 尝试创建目录（不实际创建文件）
                os.makedirs(os.path.dirname(output_path), exist_ok=True)
            except Exception as e:
                logger.error(f"Invalid output path: {e}")
                return False
        
        # 验证其他布尔参数
        for param in ["include_metadata", "pretty_print", "split_output_folder"]:
            if param in config and not isinstance(config[param], bool):
                logger.error(f"{param} must be a boolean value")
                return False
        
        # 验证文件名前缀
        file_prefix = config.get("file_prefix", "")
        if file_prefix and not isinstance(file_prefix, str):
            logger.error("file_prefix must be a string")
            return False
        
        # CSV格式特定验证
        if save_format.lower() == "csv":
            column_names = config.get("column_names", [])
            if column_names and not isinstance(column_names, list):
                logger.error("column_names must be a list for CSV format")
                return False
        
        return True


class DataMerger(BasePlugin):
    """
    数据合并插件
    根据指定的主键列合并两个数据loader的数据
需要支持    支持单个主键或多个主键的组合
    """
    
    def execute(self, input_data: Any) -> dict:
        """
        执行数据合并
        
        Args:
            input_data: 输入数据，支持多种格式：
                       1. 节点数据格式: {"data_loader_1": {"raw_data": [...]}, "data_loader_2": {"raw_data": [...]}}
                       2. 直接数据格式: {"data1": [...], "data2": [...]}
                       3. 列表格式: [[...], [...]]
        
        Returns:
            包含合并后数据的字典
        """
        # 验证输入数据格式
        if not isinstance(input_data, (dict, list)):
            logger.error("Input data must be a dictionary or list")
            return {
                "merged_data": None,
                "error": "Invalid input data format",
                "merged_by": self.plugin_id
            }
        
        # 获取配置参数
        config = self.config.get("config", {})
        data_sources = config.get("data_sources", [])
        
        # 根据不同的输入格式提取数据
        data1, data2 = None, None
        
        if isinstance(input_data, dict):
            # 格式1: 节点数据格式 - 检查是否有raw_data键
            raw_data_sources = []
            for node_result in input_data.values():
                if isinstance(node_result, dict) and "raw_data" in node_result:
                    raw_data_sources.append(node_result["raw_data"])
            
            if len(raw_data_sources) >= 2:
                # 使用data_sources配置指定顺序，如果没有配置则按发现顺序
                if data_sources and len(data_sources) >= 2:
                    data1 = input_data[data_sources[0]]["raw_data"]
                    data2 = input_data[data_sources[1]]["raw_data"]
                else:
                    data1, data2 = raw_data_sources[0], raw_data_sources[1]
            elif len(raw_data_sources) == 1:
                # 只有一个数据源，尝试找其他格式
                if len(input_data) >= 2:
                    # 尝试其他键
                    other_keys = [k for k in input_data.keys() if k not in [list(input_data.keys())[0]]]
                    if other_keys:
                        data1 = raw_data_sources[0]
                        if isinstance(input_data[other_keys[0]], list):
                            data2 = input_data[other_keys[0]]
                        elif isinstance(input_data[other_keys[0]], dict) and "raw_data" in input_data[other_keys[0]]:
                            data2 = input_data[other_keys[0]]["raw_data"]
            else:
                # 格式2: 直接数据格式
                dict_values = list(input_data.values())
                if len(dict_values) >= 2:
                    data1, data2 = dict_values[0], dict_values[1]
                else:
                    logger.error("Insufficient data sources in dictionary")
                    return {
                        "merged_data": None,
                        "error": "Insufficient data sources",
                        "merged_by": self.plugin_id
                    }
        
        elif isinstance(input_data, list):
            # 格式3: 列表格式
            if len(input_data) >= 2:
                data1, data2 = input_data[0], input_data[1]
            else:
                logger.error("Insufficient data sources in list")
                return {
                    "merged_data": None,
                    "error": "Insufficient data sources",
                    "merged_by": self.plugin_id
                }

        # 验证数据源格式
        if not isinstance(data1, list) or not isinstance(data2, list):
            logger.error("Data sources must be lists")
            return {
                "merged_data": None,
                "error": "Data sources must be lists",
                "merged_by": self.plugin_id
            }
        
        # 获取配置参数
        config = self.config.get("config", {})
        primary_key = config.get("primary_key", None)
        merge_type = config.get("merge_type", "inner")  # inner, left, right, outer
        merge_type = config.get("merge_type", "inner")  # inner, left, right, outer
        
        # 验证主键配置
        if not primary_key:
            logger.error("Primary key must be specified in config")
            return {
                "merged_data": None,
                "error": "Primary key not specified",
                "merged_by": self.plugin_id
            }
        
        # 执行数据合并
        merged_data = self._merge_data(data1, data2, primary_key, merge_type)
        
        logger.info(f"Data merged successfully with {merge_type} join on key(s) '{primary_key}'")
        return {
            "merged_data": merged_data,
            "merged_by": self.plugin_id,
            "primary_key": primary_key,
            "merge_type": merge_type
        }
    
    def _merge_data(self, data1: List[Dict[str, Any]], data2: List[Dict[str, Any]], 
                   primary_key: Union[str, List[str]], merge_type: str) -> List[Dict[str, Any]]:
        """
        根据主键合并两个数据源
        
        Args:
            data1: 第一个数据源
            data2: 第二个数据源
            primary_key: 主键列名（可以是单个字符串或字符串列表）
            merge_type: 合并类型（inner, left, right, outer）
        
        Returns:
            合并后的数据列表
        """
        # 确保primary_key是列表形式
        if isinstance(primary_key, str):
            primary_keys = [primary_key]
        else:
            primary_keys = primary_key
        
        # 构建数据1的索引字典
        data1_index = {}
        for record in data1:
            # 构建复合键
            key_values = []
            valid_record = True
            for key in primary_keys:
                if key in record and record[key] is not None:
                    key_values.append(record[key])  # 保持原始数据类型
                else:
                    valid_record = False
                    break
            
            if valid_record:
                composite_key = tuple(key_values)
                data1_index[composite_key] = record
        
        # 构建数据2的索引字典
        data2_index = {}
        for record in data2:
            # 构建复合键
            key_values = []
            valid_record = True
            for key in primary_keys:
                if key in record and record[key] is not None:
                    key_values.append(record[key])  # 保持原始数据类型
                else:
                    valid_record = False
                    break
            
            if valid_record:
                composite_key = tuple(key_values)
                data2_index[composite_key] = record
        
        # 根据合并类型确定要处理的键集合
        if merge_type == "inner":
            # 内连接：只包含两个数据源都有的键
            keys_to_process = set(data1_index.keys()) & set(data2_index.keys())
        elif merge_type == "left":
            # 左连接：包含数据1的所有键
            keys_to_process = set(data1_index.keys())
        elif merge_type == "right":
            # 右连接：包含数据2的所有键
            keys_to_process = set(data2_index.keys())
        elif merge_type == "outer":
            # 外连接：包含两个数据源的所有键
            keys_to_process = set(data1_index.keys()) | set(data2_index.keys())
        else:
            # 默认使用内连接
            logger.warning(f"Unknown merge type '{merge_type}', using 'inner' instead")
            keys_to_process = set(data1_index.keys()) & set(data2_index.keys())
        
        # 执行合并
        merged_result = []
        for composite_key in keys_to_process:
            # 创建合并后的记录
            merged_record = {}
            
            # 添加数据1的字段（如果有）
            if composite_key in data1_index:
                merged_record.update(data1_index[composite_key])
            else:
                # 对于右连接和外连接，数据1可能没有该键
                # 添加主键字段
                for i, key in enumerate(primary_keys):
                    merged_record[key] = composite_key[i]
            
            # 添加数据2的字段（如果有），注意避免覆盖数据1的字段
            if composite_key in data2_index:
                for field, value in data2_index[composite_key].items():
                    # 避免覆盖主键字段（它们已经在数据1中或已设置）
                    if field not in primary_keys:
                        merged_record[field] = value
            
            merged_result.append(merged_record)
        
        return merged_result
    
    def validate_config(self) -> bool:
        """验证插件配置是否有效"""
        # 配置验证逻辑
        config = self.config.get("config", {})
        
        # 检查primary_key是否存在且为字符串或字符串列表
        if "primary_key" not in config:
            logger.error("primary_key must be specified in config")
            return False
            
        primary_key = config["primary_key"]
        if not isinstance(primary_key, (str, list)):
            logger.error("primary_key must be a string or a list of strings")
            return False
            
        # 如果是列表，检查每个元素都是字符串
        if isinstance(primary_key, list):
            if not primary_key:  # 空列表
                logger.error("primary_key list cannot be empty")
                return False
            for key in primary_key:
                if not isinstance(key, str):
                    logger.error("All elements in primary_key list must be strings")
                    return False
        elif not primary_key:  # 空字符串
            logger.error("primary_key cannot be an empty string")
            return False
        
        # 检查merge_type是否为有效的合并类型（如果提供）
        if "merge_type" in config and config["merge_type"] not in ["inner", "left", "right", "outer"]:
            logger.error("merge_type must be one of: inner, left, right, outer")
            return False
        
        return True


class SpearmanCorrelation(BasePlugin):
    """
    Spearman相关性分析插件
    计算数据集中各数值列之间的Spearman秩相关系数
    """
    
    def execute(self, input_data: Any) -> dict:
        """
        执行Spearman相关性分析
        
        Args:
            input_data: 输入数据，可以是原始数据列表或包含raw_data键的字典
        
        Returns:
            包含相关系数矩阵和元数据的字典
        """
        # 处理输入数据
        if isinstance(input_data, dict) and "raw_data" in input_data:
            raw_data = input_data["raw_data"]
        else:
            raw_data = input_data
        
        # 验证输入数据
        if not raw_data or not isinstance(raw_data, list):
            logger.error("Input data must be a non-empty list")
            return {
                "correlation_matrix": None,
                "error": "Invalid input data format",
                "spearman_correlation_by": self.plugin_id
            }
        
        # 获取配置参数
        config = self.config.get("config", {})
        column_names = config.get("column_names", [])
        
        # 如果没有指定列名，尝试从数据中推断数值列
        if not column_names:
            column_names = self._infer_numeric_columns(raw_data)
            if not column_names:
                logger.warning("No numeric columns found in data")
                return {
                    "correlation_matrix": None,
                    "error": "No numeric columns found",
                    "spearman_correlation_by": self.plugin_id
                }
        
        # 提取数据矩阵
        data_matrix = self._extract_data_matrix(raw_data, column_names)
        
        # 如果数据不足，返回错误
        if data_matrix.shape[0] < 2 or data_matrix.shape[1] < 2:
            logger.warning(f"Insufficient data for correlation analysis: {data_matrix.shape}")
            return {
                "correlation_matrix": None,
                "error": "Insufficient data points for correlation analysis",
                "spearman_correlation_by": self.plugin_id
            }
        
        # 计算相关系数矩阵
        correlation_matrix = self._calculate_correlation(data_matrix)
        
        # 格式化结果为字典形式
        formatted_result = self._format_correlation_result(correlation_matrix, column_names)
        
        logger.info(f"Spearman correlation analysis completed for columns: {column_names}")
        return {
            "correlation_matrix": formatted_result,
            "spearman_correlation_by": self.plugin_id,
            "columns_analyzed": column_names
        }
    
    def _infer_numeric_columns(self, data: List[Dict[str, Any]]) -> List[str]:
        """从数据中推断数值列"""
        if not data:
            return []
        
        # 获取第一行数据的所有键
        sample_record = data[0]
        numeric_columns = []
        
        # 检查每个键对应的值是否为数值类型
        for key in sample_record.keys():
            # 确保所有记录都有这个键且值为数值类型
            if all(key in record and isinstance(record[key], (int, float)) and not np.isnan(record[key]) \
                   for record in data if record.get(key) is not None):
                numeric_columns.append(key)
        
        return numeric_columns
    
    def _extract_data_matrix(self, data: List[Dict[str, Any]], column_names: List[str]) -> np.ndarray:
        """从数据中提取指定列的数据矩阵"""
        # 初始化一个列表来存储数据矩阵
        matrix_data = []
        
        # 遍历每条记录，提取指定列的值
        for record in data:
            row = []
            for column in column_names:
                # 处理缺失值或非数值值
                if column not in record or record[column] is None or not isinstance(record[column], (int, float)) or np.isnan(record[column]):
                    # 可以选择跳过这条记录或用其他方式处理缺失值
                    # 这里我们选择跳过
                    row = None
                    break
                row.append(record[column])
            
            # 如果行有效（没有缺失值），添加到矩阵数据中
            if row is not None:
                matrix_data.append(row)
        
        # 转换为numpy数组
        return np.array(matrix_data)
    
    def _calculate_correlation(self, data_matrix: np.ndarray) -> np.ndarray:
        """计算Spearman秩相关系数矩阵"""
        # 使用scipy的stats.spearmanr函数计算Spearman相关系数
        # 返回相关系数和p值，但我们只需要相关系数矩阵
        correlation_matrix, _ = stats.spearmanr(data_matrix, axis=0)
        
        # 确保结果是浮点数类型
        return correlation_matrix.astype(float)
    
    def _format_correlation_result(self, correlation_matrix: np.ndarray, column_names: List[str]) -> Dict[str, Dict[str, float]]:
        """将相关系数矩阵格式化为字典形式"""
        result = {}
        num_columns = len(column_names)
        
        # 遍历矩阵中的每一对列
        for i in range(num_columns):
            column_result = {}
            for j in range(num_columns):
                # 格式化相关系数到小数点后6位
                correlation_value = float(round(correlation_matrix[i, j], 6))
                column_result[column_names[j]] = correlation_value
            result[column_names[i]] = column_result
        
        return result
    
    def validate_config(self) -> bool:
        """验证插件配置是否有效"""
        # 配置验证逻辑
        config = self.config.get("config", {})
        
        # 检查column_names是否为列表类型（如果提供）
        if "column_names" in config and not isinstance(config["column_names"], list):
            logger.error("column_names must be a list")
            return False
        
        return True


class DataSplitter(BasePlugin):
    """
    数据拆分插件
    支持多种数据拆分策略：
    1. 按比例拆分（如训练集/测试集）
    2. 按条件拆分（基于字段条件）
    3. 按字段值拆分（基于字段的唯一值）
    4. 按数量拆分（指定每个子集的记录数）
    """

    def execute(self, input_data: Any) -> dict:
        """
        执行数据拆分

        Args:
            input_data: 输入数据，支持多种格式：
                       1. 节点数据格式: {"data_loader": {"raw_data": [...]}}
                       2. 直接数据格式: {"data": [...]}
                       3. 列表格式: [...]

        Returns:
            包含拆分后数据的字典
        """
        # 提取原始数据
        raw_data = self._extract_raw_data(input_data)

        # 验证输入数据
        if not raw_data or not isinstance(raw_data, list):
            logger.error("Input data must be a non-empty list")
            return {
                "split_data": None,
                "error": "Invalid input data format",
                "split_by": self.plugin_id
            }

        # 获取配置参数
        config = self.config.get("config", {})
        split_strategy = config.get("split_strategy", "ratio")  # ratio, condition, field_value, count

        # 根据拆分策略执行相应的拆分逻辑
        if split_strategy == "ratio":
            result = self._split_by_ratio(raw_data, config)
        elif split_strategy == "condition":
            result = self._split_by_condition(raw_data, config)
        elif split_strategy == "field_value":
            result = self._split_by_field_value(raw_data, config)
        elif split_strategy == "count":
            result = self._split_by_count(raw_data, config)
        else:
            logger.error(f"Unknown split strategy: {split_strategy}")
            return {
                "split_data": None,
                "error": f"Unknown split strategy: {split_strategy}",
                "split_by": self.plugin_id
            }

        if result is None:
            return {
                "split_data": None,
                "error": "Split operation failed",
                "split_by": self.plugin_id
            }

        logger.info(f"Data split successfully using {split_strategy} strategy")
        
        # 提取实际的split_data（新的返回格式包含split_data键）
        actual_split_data = result.get("split_data", [])
        group_names = result.get("group_names", [])
        group_keys = result.get("group_keys", [])
        
        return {
            "split_data": actual_split_data,
            "split_by": self.plugin_id,
            "split_strategy": split_strategy,
            "original_count": len(raw_data),
            "split_info": self._generate_split_info(actual_split_data),
            "group_names": group_names,
            "group_keys": group_keys
        }

    def _extract_raw_data(self, input_data: Any) -> Optional[List[Dict[str, Any]]]:
        """从输入数据中提取raw_data"""
        if isinstance(input_data, dict):
            # 检查是否是节点数据格式
            for key, value in input_data.items():
                if isinstance(value, dict) and "raw_data" in value:
                    return value["raw_data"]

            # 检查是否是直接数据格式
            if "data" in input_data and isinstance(input_data["data"], list):
                return input_data["data"]

            # 如果字典的值是列表，假设就是数据
            for key, value in input_data.items():
                if isinstance(value, list):
                    return value

        elif isinstance(input_data, list):
            # 列表格式
            return input_data

        return None

    def _split_by_ratio(self, data: List[Dict[str, Any]], config: Dict[str, Any]) -> Optional[
        Dict[str, Any]]:
        """按比例拆分数据"""
        # 获取拆分比例配置
        ratios = config.get("ratios", [0.8, 0.2])  # 默认80%训练集，20%测试集
        random_seed = config.get("random_seed", None)  # 随机种子，保证可重现
        shuffle = config.get("shuffle", True)  # 是否打乱数据

        # 验证比例配置
        if not ratios or not isinstance(ratios, list) or len(ratios) < 2:
            logger.error("Ratios must be a list with at least 2 elements")
            return None

        # 归一化比例
        total_ratio = sum(ratios)
        if total_ratio <= 0:
            logger.error("Sum of ratios must be greater than 0")
            return None

        # 转换为实际数量
        data_size = len(data)
        split_sizes = [int(data_size * ratio / total_ratio) for ratio in ratios]

        # 调整最后一个子集的大小以确保所有记录都被包含
        split_sizes[-1] = data_size - sum(split_sizes[:-1])

        # 创建数据副本并根据需要打乱
        data_copy = data.copy()
        if shuffle:
            if random_seed is not None:
                random.seed(random_seed)
            random.shuffle(data_copy)

        # 执行拆分
        result = []
        start_idx = 0
        for size in split_sizes:
            end_idx = start_idx + size
            result.append(data_copy[start_idx:end_idx])
            start_idx = end_idx

        # 返回字典格式
        return {
            "split_data": result,
            "group_names": [f"group_{i}" for i in range(len(result))]
        }

    def _split_by_condition(self, data: List[Dict[str, Any]], config: Dict[str, Any]) -> Optional[
        Dict[str, Any]]:
        """按条件拆分数据"""
        # 获取条件配置
        condition_field = config.get("condition_field")
        condition_value = config.get("condition_value")
        condition_operator = config.get("condition_operator", "==")  # ==, !=, >, <, >=, <=, in, not_in

        if not condition_field:
            logger.error("condition_field must be specified for condition split strategy")
            return None

        # 验证操作符
        valid_operators = ["==", "!=", ">", "<", ">=", "<=", "in", "not_in"]
        if condition_operator not in valid_operators:
            logger.error(f"Invalid condition_operator: {condition_operator}")
            return None

        # 根据条件拆分数据
        matching_data = []
        non_matching_data = []

        for record in data:
            field_value = record.get(condition_field)

            # 根据操作符进行条件判断
            if self._evaluate_condition(field_value, condition_value, condition_operator):
                matching_data.append(record)
            else:
                non_matching_data.append(record)

        # 返回字典格式
        return {
            "split_data": [matching_data, non_matching_data],
            "group_names": ["matching", "non_matching"]
        }

    def _split_by_field_value(self, data: List[Dict[str, Any]], config: Dict[str, Any]) -> Optional[
        Dict[str, Any]]:
        """按字段值拆分数据"""
        # 获取字段配置
        split_field = config.get("split_field")
        target_values = config.get("target_values", [])  # 指定要拆分的值，如果为空则拆分所有唯一值
        max_groups = config.get("max_groups", 10)  # 最大分组数，避免分组过多

        if not split_field:
            logger.error("split_field must be specified for field_value split strategy")
            return None

        # 按字段值分组
        groups = defaultdict(list)
        for record in data:
            field_value = record.get(split_field, "null")  # 缺失值用"null"表示
            groups[field_value].append(record)

        # 如果指定了目标值，只保留这些值对应的分组
        if target_values:
            filtered_groups = {}
            for value in target_values:
                if value in groups:
                    filtered_groups[value] = groups[value]
            groups = filtered_groups

        # 限制分组数量
        if len(groups) > max_groups:
            # 按分组大小排序，保留最大的几个分组
            sorted_groups = sorted(groups.items(), key=lambda x: len(x[1]), reverse=True)
            groups = dict(sorted_groups[:max_groups])

        # 返回分组结果（保持字典格式以便DataSaver处理）
        return {
            "split_data": list(groups.values()),
            "group_keys": list(groups.keys())
        }

    def _split_by_count(self, data: List[Dict[str, Any]], config: Dict[str, Any]) -> Optional[
        Dict[str, Any]]:
        """按数量拆分数据"""
        # 获取数量配置
        counts = config.get("counts", [])  # 每个子集的记录数
        remainder_strategy = config.get("remainder_strategy", "discard")  # discard, distribute, last_group

        if not counts or not isinstance(counts, list):
            logger.error("counts must be a list for count split strategy")
            return None

        data_size = len(data)
        total_requested = sum(counts)

        # 处理剩余数据
        if total_requested > data_size:
            logger.warning(f"Requested {total_requested} records but only {data_size} available")
            # 按比例调整数量
            adjustment_ratio = data_size / total_requested
            counts = [max(1, int(count * adjustment_ratio)) for count in counts]
            total_requested = sum(counts)

        remainder = data_size - total_requested

        # 根据剩余数据处理策略调整
        if remainder > 0:
            if remainder_strategy == "discard":
                # 丢弃剩余数据
                pass
            elif remainder_strategy == "distribute":
                # 将剩余数据分配到前几个组
                for i in range(remainder):
                    if i < len(counts):
                        counts[i] += 1
            elif remainder_strategy == "last_group":
                # 将剩余数据添加到最后一个组
                counts[-1] += remainder

        # 执行拆分
        result = []
        start_idx = 0
        for count in counts:
            end_idx = min(start_idx + count, data_size)
            result.append(data[start_idx:end_idx])
            start_idx = end_idx
            if start_idx >= data_size:
                break

        # 返回字典格式
        return {
            "split_data": result,
            "group_names": [f"group_{i}" for i in range(len(result))]
        }

    def _evaluate_condition(self, field_value: Any, condition_value: Any, operator: str) -> bool:
        """评估条件表达式"""
        try:
            if operator == "==":
                return field_value == condition_value
            elif operator == "!=":
                return field_value != condition_value
            elif operator == ">":
                return field_value is not None and condition_value is not None and field_value > condition_value
            elif operator == "<":
                return field_value is not None and condition_value is not None and field_value < condition_value
            elif operator == ">=":
                return field_value is not None and condition_value is not None and field_value >= condition_value
            elif operator == "<=":
                return field_value is not None and condition_value is not None and field_value <= condition_value
            elif operator == "in":
                return field_value in condition_value if isinstance(condition_value, (list, tuple, set)) else False
            elif operator == "not_in":
                return field_value not in condition_value if isinstance(condition_value, (list, tuple, set)) else True
            else:
                return False
        except (TypeError, ValueError):
            return False

    def _generate_split_info(self, split_result: List[List[Dict[str, Any]]]) -> Dict[str, Any]:
        """生成拆分信息"""
        info = {
            "total_groups": len(split_result),
            "group_sizes": [len(group) for group in split_result],
            "total_records": sum(len(group) for group in split_result)
        }

        # 如果是按比例拆分，添加比例信息
        if len(split_result) > 0:
            total = info["total_records"]
            info["group_ratios"] = [len(group) / total if total > 0 else 0 for group in split_result]

        return info

    def validate_config(self) -> bool:
        """验证插件配置是否有效"""
        config = self.config.get("config", {})
        split_strategy = config.get("split_strategy", "ratio")

        # 验证拆分策略
        valid_strategies = ["ratio", "condition", "field_value", "count"]
        if split_strategy not in valid_strategies:
            logger.error(f"Invalid split_strategy: {split_strategy}")
            return False

        # 根据策略验证特定配置
        if split_strategy == "ratio":
            ratios = config.get("ratios", [])
            if not isinstance(ratios, list) or len(ratios) < 2:
                logger.error("For ratio strategy, ratios must be a list with at least 2 elements")
                return False

            # 验证比例值
            for ratio in ratios:
                if not isinstance(ratio, (int, float)) or ratio <= 0:
                    logger.error("All ratios must be positive numbers")
                    return False

        elif split_strategy == "condition":
            if "condition_field" not in config:
                logger.error("For condition strategy, condition_field must be specified")
                return False

            condition_operator = config.get("condition_operator", "==")
            valid_operators = ["==", "!=", ">", "<", ">=", "<=", "in", "not_in"]
            if condition_operator not in valid_operators:
                logger.error(f"Invalid condition_operator: {condition_operator}")
                return False

        elif split_strategy == "field_value":
            if "split_field" not in config:
                logger.error("For field_value strategy, split_field must be specified")
                return False

            max_groups = config.get("max_groups", 10)
            if not isinstance(max_groups, int) or max_groups <= 0:
                logger.error("max_groups must be a positive integer")
                return False

        elif split_strategy == "count":
            counts = config.get("counts", [])
            if not isinstance(counts, list) or len(counts) < 1:
                logger.error("For count strategy, counts must be a list with at least 1 element")
                return False

            # 验证数量值
            for count in counts:
                if not isinstance(count, int) or count <= 0:
                    logger.error("All counts must be positive integers")
                    return False

            remainder_strategy = config.get("remainder_strategy", "discard")
            valid_remainder_strategies = ["discard", "distribute", "last_group"]
            if remainder_strategy not in valid_remainder_strategies:
                logger.error(f"Invalid remainder_strategy: {remainder_strategy}")
                return False

        return True