#!/usr/bin/env python3
"""
供应链业务数据集调用接口
基于SCOR模型组织的Kaggle数据集，支持直接业务调用
"""

import pandas as pd
import numpy as np
import os
import json
from datetime import datetime
from typing import Dict, List, Optional, Union

class BusinessDatasetInterface:
    """供应链业务数据集调用接口"""
    
    def __init__(self):
        self.base_path = "data/external/kaggle/scor_datasets"
        self.datasets_info = self._load_available_datasets()
        
    def _load_available_datasets(self) -> Dict:
        """加载可用数据集信息"""
        datasets = {
            "plan": {
                "需求预测数据": {
                    "file": "Historical Product Demand.csv",
                    "description": "历史产品需求数据，用于需求预测和规划",
                    "use_cases": ["需求预测", "销售分析", "库存规划"],
                    "key_columns": ["Product_Code", "Warehouse", "Product_Category", "Date", "Order_Demand"]
                },
                "沃尔玛销售数据": {
                    "file": "Walmart.csv",
                    "description": "沃尔玛销售数据，用于零售需求分析",
                    "use_cases": ["零售分析", "季节性预测", "促销效果分析"],
                    "key_columns": ["Store", "Date", "Weekly_Sales", "Holiday_Flag", "Temperature"]
                }
            },
            "source": {
                "供应链数据": {
                    "file": "supply_chain_data.csv",
                    "description": "供应商和采购相关数据",
                    "use_cases": ["供应商分析", "采购优化", "供应风险评估"],
                    "key_columns": ["Product type", "SKU", "Price", "Availability", "Number of products sold"]
                }
            },
            "deliver": {
                "交付数据": {
                    "file": "Train.csv",
                    "description": "交付和物流相关数据",
                    "use_cases": ["交付分析", "物流优化", "客户服务分析"],
                    "key_columns": ["ID", "Warehouse_block", "Mode_of_Shipment", "Customer_care_calls", "Reached.on.Time_Y.N"]
                }
            },
            "comprehensive": {
                "DataCo智能供应链": {
                    "file": "DataCoSupplyChainDataset.csv",
                    "description": "DataCo智能供应链综合数据集",
                    "use_cases": ["端到端分析", "AI训练", "综合优化"],
                    "key_columns": ["Type", "Days for shipping (real)", "Days for shipment (scheduled)", "Benefit per order", "Sales per customer"]
                },
                "供应链管理数据集": {
                    "file": "SCM Dataset.xlsx",
                    "description": "供应链管理综合数据集",
                    "use_cases": ["供应链建模", "流程优化", "决策支持"],
                    "key_columns": ["根据Excel文件结构确定"]
                },
                "绿色物流数据集": {
                    "file": "SCM_Dataset_Updated_with_Green_Logistics.xlsx",
                    "description": "包含绿色物流的供应链数据集",
                    "use_cases": ["可持续供应链", "碳足迹分析", "绿色物流优化"],
                    "key_columns": ["根据Excel文件结构确定"]
                }
            }
        }
        return datasets
    
    def list_available_datasets(self) -> Dict:
        """列出所有可用的数据集"""
        print("=== 可用供应链业务数据集 ===")
        
        scor_mapping = {
            "plan": "计划 (Plan) - 需求预测与规划",
            "source": "采购 (Source) - 供应商与采购管理", 
            "deliver": "交付 (Deliver) - 物流与配送",
            "comprehensive": "综合数据集 - 端到端供应链分析"
        }
        
        for scor_process, datasets in self.datasets_info.items():
            print(f"\n📊 {scor_mapping.get(scor_process, scor_process)}")
            for name, info in datasets.items():
                print(f"  • {name}")
                print(f"    文件: {info['file']}")
                print(f"    描述: {info['description']}")
                print(f"    应用场景: {', '.join(info['use_cases'])}")
        
        return self.datasets_info
    
    def load_dataset(self, scor_process: str, dataset_name: str, **kwargs) -> pd.DataFrame:
        """加载指定数据集
        
        Args:
            scor_process: SCOR流程 (plan/source/deliver/comprehensive)
            dataset_name: 数据集名称
            **kwargs: pandas读取参数
        
        Returns:
            pandas.DataFrame: 加载的数据集
        """
        if scor_process not in self.datasets_info:
            raise ValueError(f"不支持的SCOR流程: {scor_process}")
        
        if dataset_name not in self.datasets_info[scor_process]:
            raise ValueError(f"数据集 '{dataset_name}' 在 '{scor_process}' 流程中不存在")
        
        dataset_info = self.datasets_info[scor_process][dataset_name]
        file_path = os.path.join(self.base_path, 
                                scor_process if scor_process != 'comprehensive' else '综合性数据集', 
                                dataset_info['file'])
        
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"数据文件不存在: {file_path}")
        
        print(f"正在加载: {dataset_name}")
        print(f"文件路径: {file_path}")
        
        # 根据文件扩展名选择读取方法
        if file_path.endswith('.csv'):
            df = pd.read_csv(file_path, **kwargs)
        elif file_path.endswith('.xlsx') or file_path.endswith('.xls'):
            df = pd.read_excel(file_path, **kwargs)
        else:
            raise ValueError(f"不支持的文件格式: {file_path}")
        
        print(f"✓ 成功加载数据集: {df.shape[0]}行 x {df.shape[1]}列")
        return df
    
    def get_dataset_info(self, scor_process: str, dataset_name: str) -> Dict:
        """获取数据集详细信息"""
        if scor_process not in self.datasets_info:
            raise ValueError(f"不支持的SCOR流程: {scor_process}")
        
        if dataset_name not in self.datasets_info[scor_process]:
            raise ValueError(f"数据集 '{dataset_name}' 在 '{scor_process}' 流程中不存在")
        
        return self.datasets_info[scor_process][dataset_name]
    
    def analyze_dataset(self, df: pd.DataFrame, dataset_name: str = "") -> Dict:
        """分析数据集基本信息"""
        analysis = {
            "dataset_name": dataset_name,
            "analysis_date": datetime.now().isoformat(),
            "basic_info": {
                "rows": len(df),
                "columns": len(df.columns),
                "memory_usage_mb": round(df.memory_usage(deep=True).sum() / 1024 / 1024, 2)
            },
            "columns_info": {
                "column_names": df.columns.tolist(),
                "data_types": df.dtypes.astype(str).to_dict(),
                "null_counts": df.isnull().sum().to_dict(),
                "null_percentages": (df.isnull().sum() / len(df) * 100).round(2).to_dict()
            },
            "data_quality": {
                "duplicate_rows": df.duplicated().sum(),
                "duplicate_percentage": round(df.duplicated().sum() / len(df) * 100, 2),
                "completeness": round((1 - df.isnull().sum().sum() / (len(df) * len(df.columns))) * 100, 2)
            }
        }
        
        # 数值列统计
        numeric_columns = df.select_dtypes(include=[np.number]).columns.tolist()
        if numeric_columns:
            analysis["numeric_summary"] = df[numeric_columns].describe().to_dict()
        
        # 分类列统计
        categorical_columns = df.select_dtypes(include=['object']).columns.tolist()
        if categorical_columns:
            analysis["categorical_summary"] = {}
            for col in categorical_columns[:5]:  # 只分析前5个分类列
                analysis["categorical_summary"][col] = {
                    "unique_values": df[col].nunique(),
                    "top_values": df[col].value_counts().head().to_dict()
                }
        
        return analysis
    
    def create_business_analysis_template(self, scor_process: str, dataset_name: str) -> str:
        """创建业务分析模板代码"""
        template = f'''
# {dataset_name} 业务分析模板
# SCOR流程: {scor_process.upper()}

from business_dataset_interface import BusinessDatasetInterface
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns

# 初始化数据接口
data_interface = BusinessDatasetInterface()

# 加载数据集
df = data_interface.load_dataset("{scor_process}", "{dataset_name}")

# 数据集基本分析
analysis = data_interface.analyze_dataset(df, "{dataset_name}")
print("数据集基本信息:")
print(f"行数: {{analysis['basic_info']['rows']}}")
print(f"列数: {{analysis['basic_info']['columns']}}")
print(f"内存使用: {{analysis['basic_info']['memory_usage_mb']}}MB")
print(f"数据完整性: {{analysis['data_quality']['completeness']}}%")

# 显示前几行数据
print("\n数据预览:")
print(df.head())

# 显示数据类型
print("\n数据类型:")
print(df.dtypes)

# 缺失值分析
print("\n缺失值统计:")
missing_data = df.isnull().sum()
if missing_data.sum() > 0:
    print(missing_data[missing_data > 0])
else:
    print("无缺失值")

# 根据SCOR流程进行特定分析
'''
        
        # 根据SCOR流程添加特定分析代码
        if scor_process == "plan":
            template += '''
# 需求规划分析
if 'Date' in df.columns or 'date' in df.columns:
    # 时间序列分析
    date_col = 'Date' if 'Date' in df.columns else 'date'
    df[date_col] = pd.to_datetime(df[date_col])
    
    # 需求趋势分析
    demand_cols = [col for col in df.columns if 'demand' in col.lower() or 'sales' in col.lower()]
    if demand_cols:
        plt.figure(figsize=(12, 6))
        for col in demand_cols[:3]:  # 最多显示3个需求列
            df.groupby(date_col)[col].sum().plot(label=col)
        plt.title('需求趋势分析')
        plt.legend()
        plt.show()

# 产品分析
product_cols = [col for col in df.columns if 'product' in col.lower()]
if product_cols:
    print(f"\n产品相关列: {product_cols}")
'''
        elif scor_process == "source":
            template += '''
# 供应商分析
supplier_cols = [col for col in df.columns if 'supplier' in col.lower() or 'vendor' in col.lower()]
if supplier_cols:
    print(f"\n供应商相关列: {supplier_cols}")

# 价格分析
price_cols = [col for col in df.columns if 'price' in col.lower() or 'cost' in col.lower()]
if price_cols:
    print(f"\n价格相关列: {price_cols}")
    for col in price_cols:
        if df[col].dtype in ['int64', 'float64']:
            print(f"{col} 统计:")
            print(df[col].describe())
'''
        elif scor_process == "deliver":
            template += '''
# 交付分析
delivery_cols = [col for col in df.columns if any(keyword in col.lower() for keyword in ['delivery', 'ship', 'transport', 'time'])]
if delivery_cols:
    print(f"\n交付相关列: {delivery_cols}")

# 准时交付分析
ontime_cols = [col for col in df.columns if 'time' in col.lower() and 'on' in col.lower()]
if ontime_cols:
    for col in ontime_cols:
        if df[col].dtype == 'object':
            print(f"\n{col} 分布:")
            print(df[col].value_counts())
'''
        
        template += '''

# 保存分析结果
analysis_file = f"{dataset_name.replace(' ', '_')}_analysis.json"
with open(analysis_file, 'w', encoding='utf-8') as f:
    import json
    json.dump(analysis, f, ensure_ascii=False, indent=2, default=str)

print(f"\n分析结果已保存至: {analysis_file}")
'''
        
        return template
    
    def quick_start_guide(self):
        """快速开始指南"""
        print("\n=== 供应链业务数据集快速使用指南 ===")
        print("\n1. 查看可用数据集:")
        print("   data_interface = BusinessDatasetInterface()")
        print("   data_interface.list_available_datasets()")
        
        print("\n2. 加载数据集:")
        print("   # 加载需求预测数据")
        print("   df = data_interface.load_dataset('plan', '需求预测数据')")
        
        print("\n3. 分析数据集:")
        print("   analysis = data_interface.analyze_dataset(df, '需求预测数据')")
        
        print("\n4. 生成分析模板:")
        print("   template = data_interface.create_business_analysis_template('plan', '需求预测数据')")
        print("   with open('analysis_template.py', 'w', encoding='utf-8') as f:")
        print("       f.write(template)")
        
        print("\n5. 常用业务场景:")
        scenarios = {
            "需求预测": "data_interface.load_dataset('plan', '需求预测数据')",
            "供应商分析": "data_interface.load_dataset('source', '供应链数据')",
            "交付优化": "data_interface.load_dataset('deliver', '交付数据')",
            "端到端分析": "data_interface.load_dataset('comprehensive', 'DataCo智能供应链')"
        }
        
        for scenario, code in scenarios.items():
            print(f"   {scenario}: {code}")

def main():
    """主函数 - 交互式数据集管理"""
    interface = BusinessDatasetInterface()
    
    print("供应链业务数据集调用接口")
    print("基于SCOR模型的Kaggle数据集管理")
    
    while True:
        print("\n请选择操作:")
        print("1. 查看可用数据集")
        print("2. 加载数据集")
        print("3. 分析数据集")
        print("4. 生成分析模板")
        print("5. 快速使用指南")
        print("0. 退出")
        
        choice = input("\n请输入选择 (0-5): ").strip()
        
        if choice == "0":
            break
        elif choice == "1":
            interface.list_available_datasets()
        elif choice == "2":
            print("\n可用SCOR流程: plan, source, deliver, comprehensive")
            scor_process = input("请输入SCOR流程: ").strip()
            
            if scor_process in interface.datasets_info:
                print(f"\n{scor_process}流程可用数据集:")
                for name in interface.datasets_info[scor_process].keys():
                    print(f"  - {name}")
                
                dataset_name = input("请输入数据集名称: ").strip()
                
                try:
                    df = interface.load_dataset(scor_process, dataset_name)
                    print(f"\n数据预览:")
                    print(df.head())
                    print(f"\n数据形状: {df.shape}")
                except Exception as e:
                    print(f"加载失败: {e}")
            else:
                print("无效的SCOR流程")
        
        elif choice == "3":
            print("请先加载数据集...")
            # 这里可以扩展为交互式分析
        
        elif choice == "4":
            print("\n可用SCOR流程: plan, source, deliver, comprehensive")
            scor_process = input("请输入SCOR流程: ").strip()
            
            if scor_process in interface.datasets_info:
                print(f"\n{scor_process}流程可用数据集:")
                for name in interface.datasets_info[scor_process].keys():
                    print(f"  - {name}")
                
                dataset_name = input("请输入数据集名称: ").strip()
                
                if dataset_name in interface.datasets_info[scor_process]:
                    template = interface.create_business_analysis_template(scor_process, dataset_name)
                    template_file = f"{dataset_name.replace(' ', '_')}_analysis_template.py"
                    
                    with open(template_file, 'w', encoding='utf-8') as f:
                        f.write(template)
                    
                    print(f"\n分析模板已生成: {template_file}")
                else:
                    print("无效的数据集名称")
            else:
                print("无效的SCOR流程")
        
        elif choice == "5":
            interface.quick_start_guide()
        
        else:
            print("无效选择，请重新输入")

if __name__ == "__main__":
    main()