package trace;

import org.apache.log4j.Logger;
import org.apache.spark.SparkConf;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.SparkSession;
import org.dom4j.DocumentException;
import trace.config.SparkSaConfig;

import java.util.Properties;

import static org.apache.spark.sql.functions.*;
import static trace.utils.constantUtils.today;
import static trace.utils.constantUtils.yesterday;

public class tarce1 {

    private static final Logger logger = Logger.getLogger(tarce1local.class);

    public static void main(String[] args)  {
        trace1();
    }

    public static void trace1()  {
        try {
            System.out.println("trace1 start...");
            String date = yesterday("yyyy-MM-dd");
            String today = today("yyyyMMdd");
            String dirdate = yesterday("yyyyMMdd");
//        String date = "2023-08-13";
            System.out.println(date);
            String clusterName = "beh001";
//        String path = "/user/uway/YWJS_HZ_WY/YWJS_HZ_WY_"+dirdate+".csv.gz";
            String path = "/user/uway/YWJS_HZ_WY/YWJS_HZ_WY_" + dirdate + ".csv.gz";
            // 创建 SparkSession


            String cfgfile = "call-config.xml";

            SparkSaConfig config = new SparkSaConfig(cfgfile);

            System.setProperty("user.name", "hdfs");
            System.setProperty("HADOOP_USER_NAME", config.hdfsUser);
            SparkConf conf = new SparkConf()
                    .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
                    .set("spark.rdd.comperess", "true")
//                .setMaster(master)      // 本地运行
                    .setMaster("yarn") // 集群运行
                    .setAppName(config.name)
                    .registerKryoClasses(new Class[]{String.class});
            SparkSession spark = SparkSession.builder()
                    .config(conf)
                    .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
                    .config("spark.rdd.comperess", "true")
                    .appName(config.name)
                    .config("mapreduce.fileoutputcommitter.marksuccessfuljobs", "false")
                    .config("fs.defaultFS", "hdfs://" + clusterName)
                    .config("dfs.nameservices", clusterName)
                    .config("dfs.ha.namenodes." + clusterName, "nn1,nn2")
                    .config("dfs.namenode.rpc-address." + clusterName + ".nn1", "132.90.131.17:9000")
                    .config("dfs.namenode.rpc-address." + clusterName + ".nn2", "132.90.131.19:9000")
                    .config("dfs.client.failover.proxy.provider." + clusterName, "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider")
                    // 根据需要添加更多配置...
                    .getOrCreate();

            // 读取 CSV 文件
            Dataset<Row> df = spark.read()
                    .option("header", "true") // 如果 CSV 文件包含第一行作为列名，则设置为 true
                    .option("inferSchema", "true") // 自动推断数据类型
                    .csv(path)
                    .select("EAPXYNO", "ZZNAME", "STATIONTYPE", "EAPLJBH", "JSXZ", "KEYFACTOR", "BUREAU", "MODEL", "LONGITUDE", "LATITUDE", "OPENINGDATE"); // 替换为你的 CSV 文件路径
//                .select("EAPXYNO","ZZNAME"); // 替换为你的 CSV 文件路径

            // 使用 filter 方法进行过滤
            Dataset<Row> filteredData = df.filter(
                    col("DQHJ").equalTo("开通入网")
                            .and(col("MODEL").equalTo("5G"))
                            .and(col("OPENINGDATE").startsWith(date))
            );

            Row record = RowFactory.create(1, "day", "111111");

            // 显示 DataFrame 内容
            filteredData = filteredData.withColumnRenamed("EAPXYNO", "siteid")
                    .withColumnRenamed("ZZNAME", "sitename")
                    .withColumnRenamed("STATIONTYPE", "sitetype")
                    .withColumnRenamed("EAPLJBH", "eaplogicid")
                    .withColumnRenamed("JSXZ", "constructtype")
                    .withColumnRenamed("KEYFACTOR", "factor")
                    .withColumnRenamed("BUREAU", "bureau")
                    .withColumnRenamed("MODEL", "model")
                    .withColumnRenamed("LONGITUDE", "longitude")
                    .withColumnRenamed("LATITUDE", "latitude")
                    .withColumnRenamed("OPENINGDATE", "openingtime")
                    .withColumn("day", lit(today));
            ;
            filteredData = filteredData.withColumn("openingdate", substring(col("openingtime"), 1, 10))
                    .withColumn("openingdate", regexp_replace(col("openingdate"), "-", ""));

            System.out.println("print gz data start...");
            filteredData.show(50); // 打印前 20 行，默认情况下,现在是50行
            System.out.println("print gz data end...");
//        df.show();
            Properties connectionProperties = new Properties();
            connectionProperties.put("user", "5gzhyy"); // 替换为你的数据库用户名
            connectionProperties.put("password", "B6.5gzhyy312"); // 替换为你的数据库密码
            connectionProperties.put("driver", "com.mysql.cj.jdbc.Driver");

            // 写入 MySQL 数据库中的表
            String jdbcUrl = "jdbc:mysql://132.91.175.98:8067/g41_wyzx_5gzhyydb"; // 替换为你的数据库URL
            String tableName = "openstation_sitelist"; // 替换为目标数据库表名

            filteredData.write()
                    .mode(org.apache.spark.sql.SaveMode.Append) // 可以根据需要调整模式（Append, Overwrite等）
                    .jdbc(jdbcUrl, tableName, connectionProperties);


            // 如果你想查看 DataFrame 的 schema
//        df.printSchema();

            // 停止 SparkSession
            spark.stop();
            System.out.println("trace1 end...");
        } catch (Exception e) {
            logger.info(e);
            throw new RuntimeException(e);
        }
    }
}