# main.py
from pyspark.sql import SparkSession
from data_generation.traffic_data import TrafficDataGenerator
from data_processing.traffic_processor import TrafficDataProcessor
from analysis.trend_analysis import TrendAnalyzer
from analysis.source_analysis import SourceAnalyzer
from analysis.device_analysis import DeviceAnalyzer
from analysis.product_analysis import ProductAnalyzer
from visualization.dashboard_builder import DashboardBuilder
from storage.data_storage import DataStorage
from confg import Config
import os
import datetime


def setup_directories():
    """创建必要的目录结构"""
    config = Config()
    os.makedirs(os.path.join(config.DATA_DIR, "raw"), exist_ok=True)
    os.makedirs(os.path.join(config.DATA_DIR, "processed"), exist_ok=True)
    os.makedirs(os.path.join(config.DATA_DIR, "exports"), exist_ok=True)
    os.makedirs(os.path.dirname(config.OUTPUT_PATH), exist_ok=True)


def main():
    # 初始化目录结构
    setup_directories()

    # 初始化Spark
    spark = SparkSession.builder \
        .appName("HXL-Snack-Traffic-Dashboard") \
        .config("spark.sql.execution.arrow.pyspark.enabled", "true") \
        .getOrCreate()

    config = Config()
    storage = DataStorage(spark)

    # 数据准备
    if config.REGENERATE_DATA or \
            (not config.USE_EXISTING_DATA or not storage.data_exists(storage.raw_data_path)):
        # 生成模拟数据
        print("生成模拟数据...")
        data_gen = TrafficDataGenerator()
        traffic_pd = data_gen.generate_traffic_data()
        traffic_df = spark.createDataFrame(traffic_pd)

        # 保存原始数据
        if config.SAVE_RAW_DATA:
            storage.save_raw_data(traffic_df)

        # 导出CSV查看
        if config.EXPORT_CSV:
            storage.export_to_csv(traffic_df, "raw_traffic_data.csv")
    else:
        # 加载现有数据
        print("加载现有原始数据...")
        traffic_df = storage.load_raw_data()

    # 数据预处理
    print("数据预处理...")
    processor = TrafficDataProcessor()
    processed_df = processor.preprocess_data(traffic_df)

    # 保存处理后的数据
    if config.SAVE_PROCESSED_DATA:
        storage.save_processed_data(processed_df)

    # 导出处理后的CSV
    if config.EXPORT_CSV:
        storage.export_to_csv(processed_df, "processed_traffic_data.csv")

    # 执行分析
    print("执行数据分析...")
    analysis_results = {
        'daily_trend': TrendAnalyzer.analyze_daily_trend(processed_df),
        'weekly_trend': TrendAnalyzer.analyze_weekly_trend(processed_df),
        'source_performance': SourceAnalyzer.analyze_source_performance(processed_df),
        'device_performance': DeviceAnalyzer.analyze_device_performance(processed_df),
        'device_by_source': DeviceAnalyzer.analyze_device_by_source(processed_df),
        'weekday_pattern': ProductAnalyzer.analyze_weekday_pattern(processed_df),
        'top_products': processor.extract_top_products(processed_df)
    }

    # 导出分析结果CSV
    if config.EXPORT_CSV:
        for name, df in analysis_results.items():
            storage.export_to_csv(df, f"analysis_{name}.csv")

    # 构建仪表板
    print("构建可视化仪表板...")
    builder = DashboardBuilder(config)
    dashboard = builder.create_dashboard(analysis_results)

    # 保存仪表板
    dashboard.write_html(config.OUTPUT_PATH)
    print(f"仪表板已保存至: {config.OUTPUT_PATH}")

    # 停止Spark
    spark.stop()
    print("任务完成!")


if __name__ == "__main__":
    main()