package trace;

import org.apache.log4j.Logger;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.SparkSession;

import java.util.Properties;

import static org.apache.spark.sql.functions.*;
import static trace.utils.constantUtils.today;
import static trace.utils.constantUtils.yesterday;

public class tarce1local {

    private static final Logger logger = Logger.getLogger(tarce1local.class);


    public static void main(String[] args) {
        trace1local();
    }

    public static void trace1local() {
        try {
        String date = "2025-03-16";
        String today = today("yyyyMMdd");
//        String date = "2023-08-13";
        System.out.println(date);
        String path = "C:\\work\\a.csv.gz";
        // 创建 SparkSession
        SparkSession spark = SparkSession.builder()
                .appName("tarce1")
                .master("local") // 使用本地模式运行
                .getOrCreate();

        // 读取 CSV 文件
        Dataset<Row> df = spark.read()
                .option("header", "true") // 如果 CSV 文件包含第一行作为列名，则设置为 true
                .option("inferSchema", "true") // 自动推断数据类型
                .csv(path)
                .select("EAPXYNO","ZZNAME","STATIONTYPE","EAPLJBH","JSXZ","KEYFACTOR","BUREAU","MODEL","LONGITUDE","LATITUDE","OPENINGDATE"); // 替换为你的 CSV 文件路径
//                .select("EAPXYNO","ZZNAME"); // 替换为你的 CSV 文件路径

        // 使用 filter 方法进行过滤
        Dataset<Row> filteredData = df.filter(
                col("DQHJ").equalTo("开通入网")
                        .and(col("MODEL").equalTo("5G"))
                        .and(col("OPENINGDATE").startsWith(date))
        );

        Row record = RowFactory.create(1, "day", "111111");

        // 显示 DataFrame 内容
        filteredData = filteredData.withColumnRenamed("EAPXYNO", "siteid")
                .withColumnRenamed("ZZNAME", "sitename")
                .withColumnRenamed("STATIONTYPE", "sitetype")
                .withColumnRenamed("EAPLJBH", "eaplogicid")
                .withColumnRenamed("JSXZ", "constructtype")
                .withColumnRenamed("KEYFACTOR", "factor")
                .withColumnRenamed("BUREAU", "bureau")
                .withColumnRenamed("MODEL", "model")
                .withColumnRenamed("LONGITUDE", "longitude")
                .withColumnRenamed("LATITUDE", "latitude")
                .withColumnRenamed("OPENINGDATE", "openingtime")
                .withColumn("day", lit(today));
        ;
        filteredData = filteredData.withColumn("openingdate", substring(col("openingtime"), 1, 10))
                .withColumn("openingdate", regexp_replace(col("openingdate"), "-", ""));

        filteredData.show(50); // 打印前 20 行，默认情况下,现在是50行

//        df.show();
        Properties connectionProperties = new Properties();
        connectionProperties.put("user", "root"); // 替换为你的数据库用户名
        connectionProperties.put("password", "123456"); // 替换为你的数据库密码
        connectionProperties.put("driver", "com.mysql.cj.jdbc.Driver");

        // 写入 MySQL 数据库中的表
        String jdbcUrl = "jdbc:mysql://localhost:3306/trace"; // 替换为你的数据库URL
        String tableName = "openstation_sitelist"; // 替换为目标数据库表名

        filteredData.write()
                .mode(org.apache.spark.sql.SaveMode.Append) // 可以根据需要调整模式（Append, Overwrite等）
                .jdbc(jdbcUrl, tableName, connectionProperties);


        // 如果你想查看 DataFrame 的 schema
//        df.printSchema();

        // 停止 SparkSession
        spark.stop();
        } catch (Exception e) {
            logger.info(e);
            throw new RuntimeException(e);
        }
    }
}
