#!/usr/bin/env python3
"""
数据保存插件完整Pipeline演示
展示DataSaver如何与DataMerger和DataSplitter配合使用
"""

import os
import sys
import json
import tempfile

# 添加项目根目录到Python路径
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))

from plugins.data_analysis.data_analysis import DataMerger, DataSplitter, DataSaver

def create_sample_data():
    """创建示例数据"""
    # 员工基本信息
    employee_data = [
        {"emp_id": 1, "name": "张三", "department": "IT", "age": 28},
        {"emp_id": 2, "name": "李四", "department": "HR", "age": 32},
        {"emp_id": 3, "name": "王五", "department": "Finance", "age": 35},
        {"emp_id": 4, "name": "赵六", "department": "IT", "age": 29},
        {"emp_id": 5, "name": "孙七", "department": "HR", "age": 31}
    ]
    
    # 员工薪资信息
    salary_data = [
        {"emp_id": 1, "salary": 8000, "bonus": 1000},
        {"emp_id": 2, "salary": 7500, "bonus": 800},
        {"emp_id": 3, "salary": 9000, "bonus": 1200},
        {"emp_id": 4, "salary": 8500, "bonus": 1100},
        {"emp_id": 5, "salary": 7200, "bonus": 700}
    ]
    
    # 员工业绩数据
    performance_data = [
        {"emp_id": 1, "score": 85, "rating": "A"},
        {"emp_id": 2, "score": 78, "rating": "B"},
        {"emp_id": 3, "score": 92, "rating": "A"},
        {"emp_id": 4, "score": 88, "rating": "A"},
        {"emp_id": 5, "score": 76, "rating": "B"}
    ]
    
    return employee_data, salary_data, performance_data

def demo_merger_saver_pipeline():
    """演示数据合并+保存Pipeline"""
    print("=== 演示1: 数据合并+保存Pipeline ===")
    
    employee_data, salary_data, performance_data = create_sample_data()
    
    with tempfile.TemporaryDirectory() as temp_dir:
        # 第一步：合并员工基本信息和薪资信息
        print("\n第一步：合并员工基本信息和薪资信息")
        merger_config = {
            "merge_type": "inner",
            "primary_key": "emp_id"
        }
        
        merger = DataMerger("employee_salary_merger", {
            "config": merger_config
        })
        
        merge_input = {
            "raw_data": [employee_data, salary_data]
        }
        
        merge_result = merger.execute(merge_input)
        print(f"合并结果：{merge_result}")
        
        # 第二步：保存合并结果
        print("\n第二步：保存合并结果")
        saver_config = {
            "format": "json",
            "output_path": os.path.join(temp_dir, "merged_employee_data.json"),
            "include_metadata": True,
            "pretty_print": True
        }
        
        saver = DataSaver("merged_data_saver", {
            "config": saver_config
        })
        
        save_result = saver.execute(merge_result)
        print(f"保存结果：{save_result}")
        
        # 第三步：再次合并，加入业绩数据
        print("\n第三步：合并业绩数据")
        final_merger_config = {
            "merge_type": "inner",
            "primary_key": "emp_id"
        }
        
        final_merger = DataMerger("final_merger", {
            "config": final_merger_config
        })
        
        final_merge_input = {
            "raw_data": [merge_result["merged_data"], performance_data]
        }
        
        final_merge_result = final_merger.execute(final_merge_input)
        print(f"最终合并结果：{final_merge_result}")
        
        # 第四步：保存最终合并结果到CSV
        print("\n第四步：保存最终合并结果到CSV")
        final_saver_config = {
            "format": "csv",
            "output_path": os.path.join(temp_dir, "final_employee_data.csv")
        }
        
        final_saver = DataSaver("final_data_saver", {
            "config": final_saver_config
        })
        
        final_save_result = final_saver.execute(final_merge_result)
        print(f"最终保存结果：{final_save_result}")
        
        # 显示保存的文件内容
        print(f"\n保存的文件：")
        for file in ["merged_employee_data.json", "final_employee_data.csv"]:
            file_path = os.path.join(temp_dir, file)
            if os.path.exists(file_path):
                print(f"- {file} (大小: {os.path.getsize(file_path)} bytes)")
                
                # 显示部分内容
                with open(file_path, 'r', encoding='utf-8') as f:
                    content = f.read()
                    print(f"  内容预览: {content[:200]}...")

def demo_splitter_saver_pipeline():
    """演示数据拆分+保存Pipeline"""
    print("\n\n=== 演示2: 数据拆分+保存Pipeline ===")
    
    # 创建完整的员工数据
    complete_employee_data = [
        {"emp_id": 1, "name": "张三", "department": "IT", "age": 28, "salary": 8000, "score": 85},
        {"emp_id": 2, "name": "李四", "department": "HR", "age": 32, "salary": 7500, "score": 78},
        {"emp_id": 3, "name": "王五", "department": "Finance", "age": 35, "salary": 9000, "score": 92},
        {"emp_id": 4, "name": "赵六", "department": "IT", "age": 29, "salary": 8500, "score": 88},
        {"emp_id": 5, "name": "孙七", "department": "HR", "age": 31, "salary": 7200, "score": 76},
        {"emp_id": 6, "name": "周八", "department": "IT", "age": 27, "salary": 7800, "score": 82},
        {"emp_id": 7, "name": "吴九", "department": "Finance", "age": 33, "salary": 8800, "score": 89},
        {"emp_id": 8, "name": "郑十", "department": "HR", "age": 30, "salary": 7000, "score": 75}
    ]
    
    with tempfile.TemporaryDirectory() as temp_dir:
        # 第一步：按部门拆分数据
        print("\n第一步：按部门拆分数据")
        splitter_config = {
            "split_strategy": "field_value",
            "split_field": "department",
            "target_values": ["IT", "HR", "Finance"]
        }
        
        splitter = DataSplitter("department_splitter", {
            "config": splitter_config
        })
        
        split_input = {
            "raw_data": complete_employee_data
        }
        
        split_result = splitter.execute(split_input)
        print(f"拆分结果：{split_result}")
        
        # 第二步：保存每个部门的数据
        print("\n第二步：保存每个部门的数据")
        
        for dept in ["IT", "HR", "Finance"]:
            if dept in split_result["split_data"]:
                # 为每个部门创建单独的文件
                dept_data = {
                    "department": dept,
                    "employees": split_result["split_data"][dept],
                    "count": len(split_result["split_data"][dept])
                }
                
                saver_config = {
                    "format": "json",
                    "output_path": os.path.join(temp_dir, f"{dept}_department.json"),
                    "include_metadata": True,
                    "pretty_print": True
                }
                
                saver = DataSaver(f"{dept}_saver", {
                    "config": saver_config
                })
                
                save_result = saver.execute(dept_data)
                print(f"{dept}部门数据保存结果：{save_result}")
        
        # 第三步：按年龄拆分（训练集和测试集）
        print("\n第三步：按年龄拆分（训练集和测试集）")
        age_splitter_config = {
            "split_strategy": "condition",
            "condition_field": "age",
            "conditions": [
                {"field": "age", "operator": "<", "value": 30, "group_name": "young_employees"},
                {"field": "age", "operator": ">=", "value": 30, "group_name": "senior_employees"}
            ]
        }
        
        age_splitter = DataSplitter("age_splitter", {
            "config": age_splitter_config
        })
        
        age_split_result = age_splitter.execute(split_input)
        print(f"年龄拆分结果：{age_split_result}")
        
        # 第四步：保存年龄分组数据
        print("\n第四步：保存年龄分组数据")
        if age_split_result.get("split_data"):
            for group_name in ["young_employees", "senior_employees"]:
                if group_name in age_split_result["split_data"]:
                    group_config = {
                        "format": "csv",
                        "output_path": os.path.join(temp_dir, f"{group_name}.csv")
                    }
                    
                    group_saver = DataSaver(f"{group_name}_saver", {
                        "config": group_config
                    })
                    
                    group_save_result = group_saver.execute(age_split_result["split_data"][group_name])
                    print(f"{group_name}数据保存结果：{group_save_result}")
        else:
            print("年龄分组数据为空，跳过保存")

def demo_mixed_pipeline():
    """演示混合Pipeline（合并+拆分+保存）"""
    print("\n\n=== 演示3: 混合Pipeline（合并+拆分+保存） ===")
    
    # 创建完整的员工数据
    complete_employee_data = [
        {"emp_id": 1, "name": "张三", "department": "IT", "age": 28, "salary": 8000, "score": 85},
        {"emp_id": 2, "name": "李四", "department": "HR", "age": 32, "salary": 7500, "score": 78},
        {"emp_id": 3, "name": "王五", "department": "Finance", "age": 35, "salary": 9000, "score": 92},
        {"emp_id": 4, "name": "赵六", "department": "IT", "age": 29, "salary": 8500, "score": 88},
        {"emp_id": 5, "name": "孙七", "department": "HR", "age": 31, "salary": 7200, "score": 76}
    ]
    
    with tempfile.TemporaryDirectory() as temp_dir:
        # 第一步：按绩效评分拆分
        print("\n第一步：按绩效评分拆分")
        performance_splitter_config = {
            "split_strategy": "field_value",
            "split_field": "score",
            "target_values": [85, 78, 92, 88, 76]
        }
        
        performance_splitter = DataSplitter("performance_splitter", {
            "config": performance_splitter_config
        })
        
        split_input = {
            "raw_data": complete_employee_data
        }
        
        performance_split_result = performance_splitter.execute(split_input)
        print(f"绩效拆分结果：{performance_split_result}")
        
        # 第二步：保存不同绩效等级的数据
        print("\n第二步：保存不同绩效等级的数据")
        
        # 保存为JSON格式
        json_config = {
            "format": "json",
            "output_path": os.path.join(temp_dir, "complete_employee_data.json"),
            "include_metadata": True,
            "pretty_print": True
        }
        
        json_saver = DataSaver("complete_json_saver", {
            "config": json_config
        })
        
        json_save_result = json_saver.execute(complete_employee_data)
        print(f"完整员工数据JSON保存结果：{json_save_result}")
        
        # 保存拆分结果为CSV
        if performance_split_result.get("split_data"):
            csv_config = {
                "format": "csv",
                "output_path": os.path.join(temp_dir, "split_employees.csv")
            }
            
            csv_saver = DataSaver("split_csv_saver", {
                "config": csv_config
            })
            
            # 将分组数据合并为一个列表用于CSV保存
            all_employees = []
            for group_data in performance_split_result["split_data"]:
                if isinstance(group_data, list):
                    all_employees.extend(group_data)
            
            csv_save_result = csv_saver.execute(all_employees)
            print(f"拆分数据CSV保存结果：{csv_save_result}")
        
        # 保存为TXT格式（表格形式）
        txt_config = {
            "format": "txt",
            "output_path": os.path.join(temp_dir, "employee_table.txt"),
            "pretty_print": True
        }
        
        txt_saver = DataSaver("employee_txt_saver", {
            "config": txt_config
        })
        
        txt_save_result = txt_saver.execute(complete_employee_data)
        print(f"员工数据TXT表格保存结果：{txt_save_result}")

def demo_error_handling():
    """演示错误处理"""
    print("\n\n=== 演示4: 错误处理 ===")
    
    # 测试空数据
    print("\n测试空数据保存")
    saver_config = {
        "format": "json"
    }
    
    saver = DataSaver("error_test_saver", {
        "config": saver_config
    })
    
    # 测试空数据
    empty_result = saver.execute([])
    print(f"空数据保存结果：{empty_result}")
    
    # 测试None数据
    none_result = saver.execute(None)
    print(f"None数据保存结果：{none_result}")
    
    # 测试无效格式
    print("\n测试无效格式")
    invalid_config = {
        "format": "invalid_format"
    }
    
    invalid_saver = DataSaver("invalid_saver", {
        "config": invalid_config
    })
    
    test_data = [{"id": 1, "name": "测试"}]
    invalid_result = invalid_saver.execute(test_data)
    print(f"无效格式保存结果：{invalid_result}")

if __name__ == "__main__":
    print("开始数据保存插件完整Pipeline演示...\n")
    
    try:
        # 运行所有演示
        demo_merger_saver_pipeline()
        demo_splitter_saver_pipeline()
        demo_mixed_pipeline()
        demo_error_handling()
        
        print("\n=== 所有演示完成 ===")
        print("✓ DataSaver插件成功集成到数据处理Pipeline中")
        print("✓ 支持JSON、CSV、TXT多种格式保存")
        print("✓ 完美兼容DataMerger和DataSplitter插件")
        print("✓ 提供完善的错误处理和配置验证")
        
    except Exception as e:
        print(f"\n✗ 演示过程中出现错误: {e}")
        import traceback
        traceback.print_exc()