#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
缺失描述场景测试 - 验证数据引擎在缺少描述信息时的处理能力
"""

import asyncio
import pandas as pd
import json
import os
from datetime import datetime
from data_engine.core.engine import DataEngine
from data_engine.utils.llm_adapter import LLMClient
from data_engine.utils.logger import get_logger, setup_logging

# 设置日志级别为DEBUG
setup_logging(level="DEBUG")

# 获取测试日志器
test_logger = get_logger("missing_desc_test")


async def test_no_descriptions():
    """测试完全没有描述信息的情况"""
    test_logger.info("=== 测试完全没有描述信息 ===")
    
    # 创建测试数据
    data = pd.DataFrame({
        'emp_id': [1001, 1002, 1003, 1004, 1005],
        'name': ['张三', '李四', '王五', '赵六', '钱七'],
        'dept': ['tech', 'sales', 'hr', 'finance', 'tech'],
        'sal': [8000, 6000, 5500, 7000, 9000],
        'perf': [85, 78, 92, 88, 95]
    })
    
    # 初始化引擎
    llm_client = LLMClient(
        model="openai/qwen-turbo",
        api_base="https://dashscope.aliyuncs.com/compatible-mode/v1",
        api_key="sk-kkkkkkkkkkkkkkkkkkk"
    )
    
    # 启用调试模式
    engine = DataEngine(llm_client, debug_mode=True)
    
    # 加载数据集 - 不提供任何描述
    engine.load_dataset(df=data)
    
    # 测试问题
    question = "显示员工薪资情况"
    test_logger.info(f"问题: {question}")
    
    start_time = datetime.now()
    result = await engine.process_query(question)
    end_time = datetime.now()
    
    execution_time = (end_time - start_time).total_seconds()
    
    test_logger.info(f"✅ 处理时间: {execution_time:.3f}秒")
    test_logger.info(f"✅ 成功: {result.success}")
    test_logger.info(f"✅ 图表类型: {result.chart_type}")
    test_logger.info(f"✅ 消息: {result.message}")
    
    return {
        "test_type": "no_descriptions",
        "success": result.success,
        "execution_time": execution_time,
        "chart_type": result.chart_type,
        "data_rows": len(data),
        "chart_config": result.chart_config,
        "provided_desc": {
            "dataset_desc": None,
            "table_desc": None,
            "field_desc": None
        }
    }


async def test_partial_descriptions():
    """测试部分描述信息的情况"""
    test_logger.info("\n=== 测试部分描述信息 ===")
    
    # 创建测试数据
    data = pd.DataFrame({
        'product_code': ['P001', 'P002', 'P003', 'P004'],
        'sales_qty': [120, 85, 200, 150],
        'unit_price': [299.99, 199.99, 399.99, 249.99],
        'category_id': [1, 2, 1, 3]
    })
    
    # 初始化引擎
    llm_client = LLMClient(
        model="openai/qwen-turbo",
        api_base="https://dashscope.aliyuncs.com/compatible-mode/v1",
        api_key="sk-kkkkkkkkkkkkkkkkkkk"
    )
    
    # 启用调试模式
    engine = DataEngine(llm_client, debug_mode=True)
    
    # 加载数据集 - 只提供数据集描述，缺少表格描述和字段描述
    engine.load_dataset(
        df=data,
        dataset_desc="产品销售数据"
        # 故意不提供 table_desc 和 field_desc
    )
    
    # 测试问题
    question = "分析产品销售情况"
    test_logger.info(f"问题: {question}")
    
    start_time = datetime.now()
    result = await engine.process_query(question)
    end_time = datetime.now()
    
    execution_time = (end_time - start_time).total_seconds()
    
    test_logger.info(f"✅ 处理时间: {execution_time:.3f}秒")
    test_logger.info(f"✅ 成功: {result.success}")
    test_logger.info(f"✅ 图表类型: {result.chart_type}")
    test_logger.info(f"✅ 消息: {result.message}")
    
    return {
        "test_type": "partial_descriptions",
        "success": result.success,
        "execution_time": execution_time,
        "chart_type": result.chart_type,
        "data_rows": len(data),
        "chart_config": result.chart_config,
        "provided_desc": {
            "dataset_desc": "产品销售数据",
            "table_desc": None,
            "field_desc": None
        }
    }


async def test_only_field_descriptions():
    """测试只有字段描述的情况"""
    test_logger.info("\n=== 测试只有字段描述 ===")
    
    # 创建测试数据
    data = pd.DataFrame({
        'order_date': ['2024-01-15', '2024-01-16', '2024-01-17'],
        'customer_type': ['VIP', '普通', 'VIP'],
        'order_amount': [1500.00, 800.00, 2200.00],
        'payment_method': ['支付宝', '微信', '银行卡']
    })
    
    # 初始化引擎
    llm_client = LLMClient(
        model="openai/qwen-turbo",
        api_base="https://dashscope.aliyuncs.com/compatible-mode/v1",
        api_key="sk-kkkkkkkkkkkkkkkkkkk"
    )
    
    # 启用调试模式
    engine = DataEngine(llm_client, debug_mode=True)
    
    # 加载数据集 - 只提供字段描述
    engine.load_dataset(
        df=data,
        field_desc={
            "order_date": "订单日期",
            "customer_type": "客户类型",
            "order_amount": "订单金额（元）",
            "payment_method": "支付方式"
        }
        # 故意不提供 dataset_desc 和 table_desc
    )
    
    # 测试问题
    question = "按客户类型统计订单金额"
    test_logger.info(f"问题: {question}")
    
    start_time = datetime.now()
    result = await engine.process_query(question)
    end_time = datetime.now()
    
    execution_time = (end_time - start_time).total_seconds()
    
    test_logger.info(f"✅ 处理时间: {execution_time:.3f}秒")
    test_logger.info(f"✅ 成功: {result.success}")
    test_logger.info(f"✅ 图表类型: {result.chart_type}")
    test_logger.info(f"✅ 消息: {result.message}")
    
    return {
        "test_type": "only_field_descriptions",
        "success": result.success,
        "execution_time": execution_time,
        "chart_type": result.chart_type,
        "data_rows": len(data),
        "chart_config": result.chart_config,
        "provided_desc": {
            "dataset_desc": None,
            "table_desc": None,
            "field_desc": "provided"
        }
    }


async def test_empty_strings():
    """测试空字符串描述的情况"""
    test_logger.info("\n=== 测试空字符串描述 ===")
    
    # 创建测试数据
    data = pd.DataFrame({
        'region': ['北京', '上海', '广州', '深圳'],
        'revenue': [1200000, 1500000, 980000, 1100000],
        'cost': [800000, 950000, 650000, 720000],
        'profit': [400000, 550000, 330000, 380000]
    })
    
    # 初始化引擎
    llm_client = LLMClient(
        model="openai/qwen-turbo",
        api_base="https://dashscope.aliyuncs.com/compatible-mode/v1",
        api_key="sk-kkkkkkkkkkkkkkkkkkk"
    )
    
    # 启用调试模式
    engine = DataEngine(llm_client, debug_mode=True)
    
    # 加载数据集 - 提供空字符串描述
    engine.load_dataset(
        df=data,
        dataset_desc="",  # 空字符串
        table_desc="",   # 空字符串
        field_desc={}    # 空字典
    )
    
    # 测试问题
    question = "比较各地区的利润情况"
    test_logger.info(f"问题: {question}")
    
    start_time = datetime.now()
    result = await engine.process_query(question)
    end_time = datetime.now()
    
    execution_time = (end_time - start_time).total_seconds()
    
    test_logger.info(f"✅ 处理时间: {execution_time:.3f}秒")
    test_logger.info(f"✅ 成功: {result.success}")
    test_logger.info(f"✅ 图表类型: {result.chart_type}")
    test_logger.info(f"✅ 消息: {result.message}")
    
    return {
        "test_type": "empty_strings",
        "success": result.success,
        "execution_time": execution_time,
        "chart_type": result.chart_type,
        "data_rows": len(data),
        "chart_config": result.chart_config,
        "provided_desc": {
            "dataset_desc": "empty_string",
            "table_desc": "empty_string",
            "field_desc": "empty_dict"
        }
    }


async def test_mixed_scenarios():
    """测试混合场景 - 有些字段有描述，有些没有"""
    test_logger.info("\n=== 测试混合场景 ===")
    
    # 创建测试数据
    data = pd.DataFrame({
        'user_id': [1, 2, 3, 4, 5],
        'login_count': [25, 12, 45, 8, 33],
        'purchase_amount': [2500.0, 0.0, 4200.0, 150.0, 1800.0],
        'last_active': ['2024-01-20', '2024-01-18', '2024-01-21', '2024-01-15', '2024-01-19'],
        'device_type': ['mobile', 'desktop', 'mobile', 'tablet', 'mobile']
    })
    
    # 初始化引擎
    llm_client = LLMClient(
        model="openai/qwen-turbo",
        api_base="https://dashscope.aliyuncs.com/compatible-mode/v1",
        api_key="sk-kkkkkkkkkkkkkkkkkkk"
    )
    
    # 启用调试模式
    engine = DataEngine(llm_client, debug_mode=True)
    
    # 加载数据集 - 混合描述（有些有，有些没有）
    engine.load_dataset(
        df=data,
        dataset_desc="用户行为数据",
        table_desc="",  # 空字符串
        field_desc={
            "user_id": "用户ID",
            "login_count": "登录次数",
            # purchase_amount 没有描述
            "last_active": "最后活跃日期"
            # device_type 没有描述
        }
    )
    
    # 测试问题
    question = "分析用户活跃度和购买行为的关系"
    test_logger.info(f"问题: {question}")
    
    start_time = datetime.now()
    result = await engine.process_query(question)
    end_time = datetime.now()
    
    execution_time = (end_time - start_time).total_seconds()
    
    test_logger.info(f"✅ 处理时间: {execution_time:.3f}秒")
    test_logger.info(f"✅ 成功: {result.success}")
    test_logger.info(f"✅ 图表类型: {result.chart_type}")
    test_logger.info(f"✅ 消息: {result.message}")
    
    return {
        "test_type": "mixed_scenarios",
        "success": result.success,
        "execution_time": execution_time,
        "chart_type": result.chart_type,
        "data_rows": len(data),
        "chart_config": result.chart_config,
        "provided_desc": {
            "dataset_desc": "用户行为数据",
            "table_desc": "empty_string",
            "field_desc": "partial"
        }
    }


async def main():
    """主测试函数"""
    test_logger.info("🚀 开始缺失描述场景测试...\n")
    
    results = []
    
    try:
        # 测试各种缺失场景
        test_functions = [
            test_no_descriptions,
            test_partial_descriptions,
            test_only_field_descriptions,
            test_empty_strings,
            test_mixed_scenarios
        ]
        
        for test_func in test_functions:
            result = await test_func()
            results.append(result)
        
        # 保存测试结果
        test_summary = {
            "timestamp": datetime.now().isoformat(),
            "total_tests": len(results),
            "successful_tests": sum(1 for r in results if r["success"]),
            "failed_tests": sum(1 for r in results if not r["success"]),
            "results": results,
            "average_execution_time": sum(r["execution_time"] for r in results) / len(results),
            "description_scenarios": {
                "no_descriptions": results[0]["success"],
                "partial_descriptions": results[1]["success"],
                "only_field_descriptions": results[2]["success"],
                "empty_strings": results[3]["success"],
                "mixed_scenarios": results[4]["success"]
            }
        }
        
        # 确保输出目录存在
        os.makedirs("test_output", exist_ok=True)
        
        # 保存结果
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = f"test_output/missing_desc_test_{timestamp}.json"
        
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(test_summary, f, ensure_ascii=False, indent=2)
        
        test_logger.info(f"\n📊 测试总结:")
        test_logger.info(f"✅ 成功测试: {test_summary['successful_tests']}/{test_summary['total_tests']}")
        test_logger.info(f"❌ 失败测试: {test_summary['failed_tests']}/{test_summary['total_tests']}")
        test_logger.info(f"⚡ 平均处理时间: {test_summary['average_execution_time']:.3f}秒")
        test_logger.info(f"\n📋 各场景测试结果:")
        for scenario, success in test_summary['description_scenarios'].items():
            status = "✅" if success else "❌"
            test_logger.info(f"   {status} {scenario}: {'成功' if success else '失败'}")
        test_logger.info(f"\n📁 详细结果已保存到: {filename}")
        
        # 输出ECharts配置JSON
        test_logger.info("\n📈 ECharts图表配置:")
        for i, result in enumerate(results):
            if result.get('chart_config'):
                test_logger.info(f"\n--- 测试 {i+1}: {result['test_type']} ---")
                test_logger.info(json.dumps(result['chart_config'], ensure_ascii=False, indent=2))
        
    except Exception as e:
        test_logger.error(f"❌ 测试失败: {str(e)}")
        import traceback
        test_logger.error(traceback.format_exc())
        
        # 保存错误信息
        error_data = {
            "timestamp": datetime.now().isoformat(),
            "error": str(e),
            "traceback": traceback.format_exc(),
            "completed_tests": len(results)
        }
        
        os.makedirs("test_output", exist_ok=True)
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = f"test_output/missing_desc_test_error_{timestamp}.json"
        
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(error_data, f, ensure_ascii=False, indent=2)
        
        test_logger.error(f"📁 错误信息已保存到: {filename}")


if __name__ == "__main__":
    asyncio.run(main())