#!/usr/bin/env python3
"""
Kaggle供应链数据集分析与推荐
基于当前项目的AI智能决策需求，匹配合适的数据集
"""

import pandas as pd
import numpy as np
import json
from datetime import datetime
from pathlib import Path
import logging

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class KaggleDatasetAnalyzer:
    """Kaggle数据集分析器"""
    
    def __init__(self):
        self.project_root = Path(__file__).parent
        self.data_dir = self.project_root / "data"
        self.current_datasets = self._analyze_current_data()
        
    def _analyze_current_data(self):
        """分析当前项目的数据结构"""
        current_data = {
            "processed": [],
            "raw": []
        }
        
        # 分析processed数据
        processed_dir = self.data_dir / "processed"
        if processed_dir.exists():
            current_data["processed"] = [f.name for f in processed_dir.glob("*.csv")]
            
        # 分析raw数据
        raw_dir = self.data_dir / "raw"
        if raw_dir.exists():
            current_data["raw"] = [f.name for f in raw_dir.glob("*.csv")]
            
        return current_data
    
    def get_recommended_datasets(self):
        """获取推荐的Kaggle数据集"""
        
        # 基于搜索结果和项目需求的推荐数据集
        recommended_datasets = [
            {
                "name": "Smart Logistics Supply Chain Dataset",
                "kaggle_url": "https://www.kaggle.com/datasets/ziya07/smart-logistics-supply-chain-dataset",
                "description": "智慧物流供应链数据集，包含物流优化、配送路径、仓储管理等数据",
                "ai_applications": [
                    "路径优化",
                    "配送时间预测",
                    "仓储容量规划",
                    "物流成本分析"
                ],
                "match_score": 95,
                "project_relevance": "高度匹配项目的物流管理和路径优化需求"
            },
            {
                "name": "DataCo SMART SUPPLY CHAIN FOR BIG DATA ANALYSIS",
                "kaggle_url": "https://www.kaggle.com/datasets/shashwatwork/dataco-smart-supply-chain-for-big-data-analysis",
                "description": "大数据供应链分析数据集，包含订单、客户、产品、供应商等完整供应链数据",
                "ai_applications": [
                    "需求预测",
                    "客户行为分析",
                    "供应商绩效评估",
                    "订单模式识别"
                ],
                "match_score": 92,
                "project_relevance": "完美匹配项目的供应链管理和AI决策需求"
            },
            {
                "name": "Supply Chain Management Dataset",
                "kaggle_url": "https://www.kaggle.com/datasets/lastman0800/supply-chain-management",
                "description": "供应链管理数据集，包含库存、采购、销售等核心业务数据",
                "ai_applications": [
                    "库存优化",
                    "采购决策支持",
                    "销售预测",
                    "成本控制分析"
                ],
                "match_score": 88,
                "project_relevance": "匹配项目的库存管理和采购优化功能"
            },
            {
                "name": "Demand Forecast for Optimized Inventory Planning",
                "kaggle_url": "https://www.kaggle.com/datasets/oscarm524/demand-forecast-for-optimized-inventory-planning",
                "description": "需求预测和库存优化数据集，专门用于库存规划和需求预测模型训练",
                "ai_applications": [
                    "需求预测建模",
                    "库存水平优化",
                    "安全库存计算",
                    "补货策略制定"
                ],
                "match_score": 90,
                "project_relevance": "直接支持项目的需求预测和库存优化AI功能"
            },
            {
                "name": "Supply Chain Data",
                "kaggle_url": "https://www.kaggle.com/datasets/laurinbrechter/supply-chain-data",
                "description": "综合供应链数据，包含多维度的供应链运营数据",
                "ai_applications": [
                    "供应链可视化",
                    "异常检测",
                    "绩效分析",
                    "风险评估"
                ],
                "match_score": 85,
                "project_relevance": "支持项目的异常检测和绩效分析功能"
            }
        ]
        
        return recommended_datasets
    
    def analyze_ai_decision_requirements(self):
        """分析当前项目的AI智能决策需求"""
        
        ai_requirements = {
            "demand_forecasting": {
                "description": "需求预测",
                "current_implementation": "基于历史数据的时间序列预测",
                "data_needs": [
                    "历史销售数据",
                    "季节性因素",
                    "促销活动数据",
                    "外部经济指标"
                ],
                "ai_models": ["ARIMA", "LSTM", "Prophet", "XGBoost"]
            },
            "inventory_optimization": {
                "description": "库存优化",
                "current_implementation": "基于安全库存和再订货点的优化",
                "data_needs": [
                    "库存周转数据",
                    "供应商交付时间",
                    "需求变动性",
                    "存储成本数据"
                ],
                "ai_models": ["强化学习", "遗传算法", "线性规划"]
            },
            "supplier_analysis": {
                "description": "供应商分析",
                "current_implementation": "基于绩效指标的供应商评估",
                "data_needs": [
                    "供应商绩效数据",
                    "质量指标",
                    "交付准时率",
                    "成本数据"
                ],
                "ai_models": ["聚类分析", "决策树", "随机森林"]
            },
            "anomaly_detection": {
                "description": "异常检测",
                "current_implementation": "基于统计阈值的异常识别",
                "data_needs": [
                    "实时运营数据",
                    "历史异常案例",
                    "多维度指标数据"
                ],
                "ai_models": ["Isolation Forest", "One-Class SVM", "LSTM-Autoencoder"]
            },
            "logistics_optimization": {
                "description": "物流优化",
                "current_implementation": "基于距离和成本的路径规划",
                "data_needs": [
                    "配送路径数据",
                    "交通状况",
                    "车辆容量",
                    "配送时间窗"
                ],
                "ai_models": ["遗传算法", "蚁群算法", "强化学习"]
            }
        }
        
        return ai_requirements
    
    def match_datasets_to_requirements(self):
        """将推荐数据集与AI需求进行匹配"""
        
        datasets = self.get_recommended_datasets()
        requirements = self.analyze_ai_decision_requirements()
        
        matches = []
        
        for dataset in datasets:
            for req_key, req_info in requirements.items():
                # 计算匹配度
                match_keywords = set()
                for app in dataset["ai_applications"]:
                    if any(keyword in app.lower() for keyword in req_info["description"].lower().split()):
                        match_keywords.add(req_key)
                
                if match_keywords:
                    matches.append({
                        "dataset": dataset["name"],
                        "requirement": req_info["description"],
                        "match_strength": len(match_keywords),
                        "applications": dataset["ai_applications"],
                        "kaggle_url": dataset["kaggle_url"]
                    })
        
        return matches
    
    def generate_integration_plan(self):
        """生成数据集集成计划"""
        
        integration_plan = {
            "phase_1": {
                "title": "核心数据集集成",
                "datasets": [
                    "DataCo SMART SUPPLY CHAIN FOR BIG DATA ANALYSIS",
                    "Demand Forecast for Optimized Inventory Planning"
                ],
                "timeline": "1-2周",
                "priority": "高",
                "ai_capabilities": [
                    "需求预测增强",
                    "库存优化改进",
                    "客户行为分析"
                ]
            },
            "phase_2": {
                "title": "物流优化数据集成",
                "datasets": [
                    "Smart Logistics Supply Chain Dataset"
                ],
                "timeline": "2-3周",
                "priority": "中",
                "ai_capabilities": [
                    "路径优化",
                    "配送时间预测",
                    "物流成本分析"
                ]
            },
            "phase_3": {
                "title": "综合分析数据集成",
                "datasets": [
                    "Supply Chain Management Dataset",
                    "Supply Chain Data"
                ],
                "timeline": "3-4周",
                "priority": "中",
                "ai_capabilities": [
                    "异常检测增强",
                    "供应商分析改进",
                    "风险评估"
                ]
            }
        }
        
        return integration_plan
    
    def create_dataset_download_script(self):
        """创建数据集下载脚本"""
        
        script_content = '''#!/usr/bin/env python3
"""
Kaggle数据集下载脚本
使用Kaggle API下载推荐的供应链数据集
"""

import os
import subprocess
from pathlib import Path

def setup_kaggle_api():
    """设置Kaggle API"""
    print("请确保已安装kaggle包: pip install kaggle")
    print("请确保已配置Kaggle API凭证: ~/.kaggle/kaggle.json")
    print("API凭证可从 https://www.kaggle.com/account 获取")

def download_datasets():
    """下载推荐的数据集"""
    
    datasets = [
        "ziya07/smart-logistics-supply-chain-dataset",
        "shashwatwork/dataco-smart-supply-chain-for-big-data-analysis",
        "lastman0800/supply-chain-management",
        "oscarm524/demand-forecast-for-optimized-inventory-planning",
        "laurinbrechter/supply-chain-data"
    ]
    
    download_dir = Path("data/external/kaggle")
    download_dir.mkdir(parents=True, exist_ok=True)
    
    for dataset in datasets:
        print(f"下载数据集: {dataset}")
        try:
            subprocess.run([
                "kaggle", "datasets", "download", 
                "-d", dataset,
                "-p", str(download_dir),
                "--unzip"
            ], check=True)
            print(f"✓ 成功下载: {dataset}")
        except subprocess.CalledProcessError as e:
            print(f"✗ 下载失败: {dataset} - {e}")
        except FileNotFoundError:
            print("✗ 未找到kaggle命令，请先安装: pip install kaggle")
            break

if __name__ == "__main__":
    setup_kaggle_api()
    download_datasets()
'''
        
        return script_content
    
    def generate_report(self):
        """生成完整的分析报告"""
        
        report = {
            "analysis_date": datetime.now().isoformat(),
            "current_data_structure": self.current_datasets,
            "recommended_datasets": self.get_recommended_datasets(),
            "ai_requirements": self.analyze_ai_decision_requirements(),
            "dataset_matches": self.match_datasets_to_requirements(),
            "integration_plan": self.generate_integration_plan(),
            "next_steps": [
                "配置Kaggle API凭证",
                "下载优先级高的数据集",
                "创建数据预处理管道",
                "更新AI模型训练数据",
                "验证AI决策效果改进"
            ]
        }
        
        return report

def main():
    """主函数"""
    print("=== Kaggle供应链数据集分析与推荐 ===")
    
    analyzer = KaggleDatasetAnalyzer()
    
    # 生成分析报告
    report = analyzer.generate_report()
    
    # 保存报告
    report_file = Path("Kaggle_Datasets_Analysis_Report.json")
    with open(report_file, 'w', encoding='utf-8') as f:
        json.dump(report, f, ensure_ascii=False, indent=2)
    
    print(f"✓ 分析报告已保存: {report_file}")
    
    # 创建下载脚本
    download_script = analyzer.create_dataset_download_script()
    script_file = Path("download_kaggle_datasets.py")
    with open(script_file, 'w', encoding='utf-8') as f:
        f.write(download_script)
    
    print(f"✓ 下载脚本已创建: {script_file}")
    
    # 显示推荐摘要
    print("\n=== 推荐数据集摘要 ===")
    for dataset in report["recommended_datasets"][:3]:  # 显示前3个
        print(f"\n📊 {dataset['name']}")
        print(f"   匹配度: {dataset['match_score']}%")
        print(f"   应用: {', '.join(dataset['ai_applications'][:2])}...")
        print(f"   链接: {dataset['kaggle_url']}")
    
    print("\n=== 集成计划 ===")
    for phase, info in report["integration_plan"].items():
        print(f"\n🚀 {info['title']} ({info['timeline']})")
        print(f"   优先级: {info['priority']}")
        print(f"   数据集: {len(info['datasets'])}个")
        print(f"   AI能力: {', '.join(info['ai_capabilities'][:2])}...")
    
    print("\n=== 后续步骤 ===")
    for i, step in enumerate(report["next_steps"], 1):
        print(f"{i}. {step}")

if __name__ == "__main__":
    main()