package com.lhy.sparkdemo.service;

import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

@Service
public class SparkService {
    private static final Logger logger = LoggerFactory.getLogger(SparkService.class);

    @Autowired
    private SparkSession sparkSession;

    // 增强版HDFS数据读取[2,6](@ref)
    public Dataset<Row> readHdfsData(String inputPath, String fileFormat) {
        // 路径有效性检查
        if (StringUtils.isBlank(inputPath) || !inputPath.startsWith("hdfs://")) {
            throw new IllegalArgumentException("非法的HDFS路径: " + inputPath);
        }

        try {
            // 验证HDFS路径是否存在[2](@ref)
            FileSystem fs = FileSystem.get(sparkSession.sparkContext().hadoopConfiguration());
            if (!fs.exists(new Path(inputPath))) {
                throw new RuntimeException("HDFS路径不存在: " + inputPath);
            }

            logger.info("从HDFS读取数据: {}", inputPath);
            return sparkSession.read()
                    .format(fileFormat)
                    .option("header", "true")
                    .option("inferSchema", "true")  // 自动推断数据类型
                    .load(inputPath);
        } catch (Exception e) {
            logger.error("HDFS数据读取失败: {}", e.getMessage());
            throw new RuntimeException("数据读取异常: " + e.getMessage(), e);
        }
    }

    // 安全写入HDFS[2,8](@ref)
    public void writeHdfsData(Dataset<Row> data, String outputPath, String fileFormat) {
        if (StringUtils.isBlank(outputPath) || !outputPath.startsWith("hdfs://")) {
            throw new IllegalArgumentException("非法的HDFS输出路径: " + outputPath);
        }

        try {
            FileSystem fs = FileSystem.get(sparkSession.sparkContext().hadoopConfiguration());
            Path outputDir = new Path(outputPath);

            // 避免覆盖已有数据[8](@ref)
            if (fs.exists(outputDir)) {
                logger.warn("输出路径已存在，自动添加时间戳后缀");
                outputPath += "_" + System.currentTimeMillis();
            }

            logger.info("写入数据到HDFS: {}", outputPath);
            data.write()
                    .format(fileFormat)
                    .mode("overwrite")
                    .option("compression", "snappy")  // 启用压缩[2](@ref)
                    .save(outputPath);

            // 验证写入结果
            if (!fs.exists(new Path(outputPath, "_SUCCESS"))) {
                throw new RuntimeException("HDFS写入验证失败: " + outputPath);
            }
        } catch (Exception e) {
            logger.error("HDFS数据写入失败: {}", e.getMessage());
            throw new RuntimeException("数据写入异常: " + e.getMessage(), e);
        }
    }

    // 完整处理流程示例
    public void processSalesData() {
        try {
            // 1. 读取CSV数据
            Dataset<Row> salesData = readHdfsData(
                    "hdfs://mycluster/data/sales.csv",
                    "csv"
            );

            // 2. 数据处理
            salesData.createOrReplaceTempView("sales");
            Dataset<Row> result = sparkSession.sql(
                    "SELECT region, SUM(amount) AS total_sales " +
                            "FROM sales " +
                            "GROUP BY region " +
                            "ORDER BY total_sales DESC"
            );

            // 3. 结果写入
            writeHdfsData(
                    result,
                    "hdfs://mycluster/output/sales_summary",
                    "parquet"
            );
        } catch (Exception e) {
            logger.error("数据处理流程失败: {}", e.getMessage());
            throw e;
        }
    }
}