package com.ecommerce

import com.ecommerce.data.DataCleaner
import com.ecommerce.utils.ConfigLoader
import com.ecommerce.analysis._
import org.apache.log4j.Logger
import org.apache.spark.sql.{SparkSession, DataFrame}

object EcommerceApp {
  private val logger = Logger.getLogger(EcommerceApp.getClass)

  def main(args: Array[String]): Unit = {
    try {
      val config = ConfigLoader.loadConfig
      logger.info(s"Spark Master: ${config.getString("spark.master")}")
      val spark = SparkSession.builder()
        .appName(config.getString("spark.app.name"))
        .master(config.getString("spark.master"))
        .config("spark.executor.memory", config.getString("spark.executor.memory"))
        .config("spark.driver.memory", config.getString("spark.driver.memory"))
        .config("spark.serializer", config.getString("spark.serializer"))
        // 添加以下配置
        .config("spark.sql.legacy.timeParserPolicy", "LEGACY")
        .getOrCreate()

      logger.info("开始数据清洗...")
      DataCleaner.clean(spark)

      val cleanedData = spark.read
        .option("header", "true")
        .option("sep", ",")
        .option("inferSchema", "false")
        .csv(config.getString("hadoop.clean.data"))

      val analyzers = List(
        ("weekly_sales_trend", new WeeklySalesTrendAnalyzer()),
        ("hot_keywords", new HotKeywordAnalyzer()),
        ("country_order_return", new CountryOrderReturnAnalyzer()),
        ("top10_products", new Top10ProductsAnalyzer()),
        ("country_sales_distribution", new CountrySalesDistributionAnalyzer()),
        ("price_sales_relation", new PriceSalesRelationAnalyzer())
      )

      analyzers.foreach { case (tableName, analyzer) =>
        val result: DataFrame = analyzer.analyze(spark, cleanedData)
        analyzer.performSave(result, tableName, spark)
      }

      spark.stop()
      logger.info("所有数据分析任务执行完成！")
    } catch {
      case e: Exception =>
        logger.error("程序执行失败", e)
        throw e
    }
  }
}