#!/usr/bin/env python3
# -*- coding: utf-8 -*-

from pyspark.sql import SparkSession
import pyspark.sql.functions as F
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, FloatType, DoubleType

# --- 配置区 ---
# HDFS NameNode的地址和端口
HDFS_URI = "hdfs://hadoop102:8020"
# HDFS上的数据基础路径
HDFS_DATA_PATH = f"{HDFS_URI}/car_data"
# 文件名
RANK_FILE = "car_rank_data.csv"
INFO_FILE = "car_info_data.csv"
# Hive数据库名称
HIVE_DB = "car_analysis_db"


def main():
    """主执行函数"""
    spark = SparkSession.builder \
        .appName("CarAnalysisSpark") \
        .enableHiveSupport() \
        .getOrCreate()

    print("--- SparkSession 创建成功 ---")

    # --- 1. 读取并处理 car_rank_data.csv (销量排名数据) ---
    print(f"[*] 正在读取销量数据: {HDFS_DATA_PATH}/{RANK_FILE}")

    # 为销量排名数据定义Schema
    rank_schema = StructType([
        StructField("raw_series_name", StringType(), True),  # 临时名称，处理BOM问题
        StructField("series_id", StringType(), True),
        StructField("price_range", StringType(), True),
        StructField("sales", IntegerType(), True),
        StructField("score", FloatType(), True)
    ])

    rank_df = spark.read.csv(
        f"{HDFS_DATA_PATH}/{RANK_FILE}",
        schema=rank_schema,
        header=True,
        encoding='UTF-8'
    )

    # **关键步骤: 处理BOM字符导致的列名问题**
    # 第一个列名可能被读作 '﻿raw_series_name'，我们将其重命名为 'series_name'
    bom_col_name = rank_df.columns[0]
    if bom_col_name != "raw_series_name":
        print(f"[INFO] 检测到BOM字符，将列 '{bom_col_name}' 重命名为 'series_name'")
        rank_df = rank_df.withColumnRenamed(bom_col_name, "series_name")
    else:
        rank_df = rank_df.withColumnRenamed("raw_series_name", "series_name")

    # 清洗和转换 rank_df: 计算平均价格 (万元)
    # 1. 使用split和regexp_replace提取价格区间的数字
    price_parts = F.split(F.regexp_replace("price_range", "万", ""), "-")
    # 2. 计算均价，并安全处理'暂无报价'等异常情况
    rank_df_processed = rank_df.withColumn(
        "avg_price",
        F.when(
            F.col("price_range").contains("-"),
            (price_parts.getItem(0).cast(FloatType()) + price_parts.getItem(1).cast(FloatType())) / 2
        ).when(  # 兼容 "21.59-29.99万" 和 "109.80万" 这样的格式
            F.col("price_range").rlike(r"^\d+\.?\d*"),
            F.regexp_replace("price_range", "万", "").cast(FloatType())
        ).otherwise(None)  # 对于“暂无报价”等情况，设为null
    ).select("series_name", "sales", "score", "avg_price")

    print("[SUCCESS] 销量数据读取和处理完成，数据预览:")
    rank_df_processed.show(5, truncate=False)

    # --- 2. 读取并处理 car_info_data.csv (车辆详细信息) ---
    print(f"[*] 正在读取车辆详细信息数据: {HDFS_DATA_PATH}/{INFO_FILE}")

    # **最佳实践: 定义与文件列完全匹配的Schema，然后select所需列**
    info_full_schema = StructType([
        StructField("model_name", StringType(), True), StructField("manufacturer_price", StringType(), True),
        StructField("manufacturer", StringType(), True), StructField("level", StringType(), True),
        StructField("energy_type", StringType(), True), StructField("market_time", StringType(), True),
        StructField("cltc_range_km", StringType(), True), StructField("wltc_range_km", StringType(), True),
        StructField("wltc_fuel_L100km", StringType(), True), StructField("nedc_fuel_L100km", StringType(), True),
        StructField("max_power_kW", StringType(), True), StructField("length_mm", StringType(), True),
        StructField("width_mm", StringType(), True), StructField("height_mm", StringType(), True),
        StructField("official_accel_s", StringType(), True), StructField("official_brake_m", StringType(), True),
        StructField("max_speed_kmh", StringType(), True), StructField("max_load_kg", StringType(), True)
    ])

    info_df = spark.read.csv(
        f"{HDFS_DATA_PATH}/{INFO_FILE}",
        schema=info_full_schema,
        header=True,
        encoding='UTF-8'
    )

    # 清洗和转换 info_df: 提取车系名称作为关联键，并去重
    # 因为一个车系(如'海鸥')对应多个具体车型(如'海鸥 2025款...')，我们需要去重以避免数据重复
    info_df_processed = info_df.withColumn(
        "series_name", F.split(F.col("model_name"), " ", 2).getItem(0)
    ).select("series_name", "manufacturer", "level", "energy_type").distinct()

    print("[SUCCESS] 车辆信息读取和处理完成，数据预览:")
    info_df_processed.show(5, truncate=False)

    # --- 3. 关联两个数据集 ---
    print("[*] 正在通过 'series_name' 关联两个数据集...")
    combined_df = rank_df_processed.join(
        info_df_processed,
        "series_name",
        "inner"
    )
    print("[SUCCESS] 数据集关联完成，数据预览:")
    combined_df.show(5, truncate=False)
    combined_df.printSchema()

    # --- 4. 执行核心分析并存储结果 ---
    print("[*] 开始执行分析并将结果写入Hive...")

    spark.sql(f"CREATE DATABASE IF NOT EXISTS {HIVE_DB}")
    spark.sql(f"USE {HIVE_DB}")
    print(f"[INFO] 已确保并切换到Hive数据库: '{HIVE_DB}'")

    # 分析一: 各级别总销量
    level_sales_analysis = combined_df.groupBy("level") \
        .agg(F.sum("sales").alias("total_sales")) \
        .orderBy(F.desc("total_sales"))

    print("\n--- 分析结果1: 各级别总销量 ---")
    level_sales_analysis.show()
    hive_table_sales = "level_sales_relation"
    level_sales_analysis.write.mode("overwrite").saveAsTable(hive_table_sales)
    print(f"[SUCCESS] 级别与销量关系已存入Hive表: {HIVE_DB}.{hive_table_sales}")

    # 分析二: 各级别平均价格
    # 在计算均价前，过滤掉avg_price为null的行
    level_price_analysis = combined_df.filter(F.col("avg_price").isNotNull()) \
        .groupBy("level") \
        .agg(F.avg("avg_price").alias("average_price_wan")) \
        .withColumn("average_price_wan", F.round("average_price_wan", 2)) \
        .orderBy(F.desc("average_price_wan"))

    print("\n--- 分析结果2: 各级别平均价格(万元) ---")
    level_price_analysis.show()
    hive_table_price = "level_price_relation"
    level_price_analysis.write.mode("overwrite").saveAsTable(hive_table_price)
    print(f"[SUCCESS] 级别与均价关系已存入Hive表: {HIVE_DB}.{hive_table_price}")

    # --- 5. (新增) 额外分析: 热门厂商分析 ---
    # 计算每个厂商的总销量和平均评分
    manufacturer_analysis = combined_df.groupBy("manufacturer") \
        .agg(
        F.sum("sales").alias("total_sales"),
        F.avg("score").alias("avg_score")
    ) \
        .withColumn("avg_score", F.round("avg_score", 2)) \
        .orderBy(F.desc("total_sales"))

    print("\n--- 分析结果3: 热门厂商总销量及平均评分 ---")
    manufacturer_analysis.show(10, truncate=False)
    hive_table_manufacturer = "manufacturer_rank"
    manufacturer_analysis.write.mode("overwrite").saveAsTable(hive_table_manufacturer)
    print(f"[SUCCESS] 厂商排名数据已存入Hive表: {HIVE_DB}.{hive_table_manufacturer}")

    # --- 6. 验证 ---
    print("\n--- 从Hive中读取数据进行验证 ---")
    print(f"--- 表: {hive_table_sales} ---")
    spark.sql(f"SELECT * FROM {hive_table_sales} LIMIT 10").show()

    print(f"--- 表: {hive_table_price} ---")
    spark.sql(f"SELECT * FROM {hive_table_price} LIMIT 10").show()

    print(f"--- 表: {hive_table_manufacturer} ---")
    spark.sql(f"SELECT * FROM {hive_table_manufacturer} LIMIT 10").show()

    spark.stop()
    print("\n--- 脚本执行完毕 ---")


if __name__ == '__main__':
    main()