package com.example.service;

import com.example.entity.SparkTag;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.*;
import org.apache.spark.storage.StorageLevel;

/**
 * Author wangJinLong
 * Date 2025/6/21 08:19
 **/
public class SparkDemo3 {

    /**
     * 样例数据
     * 001164506820063|{"tag":"贷款推销", "prob":1.0}|2025-06-03|1|2025-06-03
     * 001164506938202|{"tag":"信用卡推销", "prob":0.3333333333333333},{"tag":"贷款推销", "prob":0.6666666666666666}|2025-06-03|3|2025-06-03
     */

    private static String filepath = "C:\\Users\\HP\\Desktop\\助理代接模型\\20250603.txt";
    private static String outfilepath ="data/sparktag.parquet";

    public static void main(String[] args) throws InterruptedException {
//        saveAsParquet();
//        readParquet1();
//        readParquet2();
//        readParquet3();
        readFile();
    }

    /**
     *
     * 内存占用 300M 500M 1250M
     * @throws InterruptedException
     */
    private static void readParquet1() throws InterruptedException {
        SparkConf sparkConf = new SparkConf();

        SparkSession sparkSession = SparkSession.builder()
                .config(sparkConf)
                .master("local")
                .getOrCreate();
        Thread.sleep(20000);

        Dataset<Row> dataset = sparkSession.read().parquet(outfilepath);
        dataset.show();
        dataset.cache();
        Thread.sleep(20000);
        System.out.println(dataset.count());
        dataset.show();
        Thread.sleep(20000);
    }

    /**
     * 序列化之后
     * 内存占用 300M 500M 1550M
     * @throws InterruptedException
     */
    private static void readParquet2() throws InterruptedException {
        SparkConf sparkConf = new SparkConf();
        sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");
        sparkConf.registerKryoClasses(new Class<?>[]{SparkTag.class});

        SparkSession sparkSession = SparkSession.builder()
                .config(sparkConf)
                .master("local")
                .getOrCreate();
        Thread.sleep(20000);

        Dataset<Row> dataset = sparkSession.read().parquet(outfilepath);
        dataset.show();
        dataset.persist(StorageLevel.MEMORY_ONLY_SER());
        Thread.sleep(20000);
        System.out.println(dataset.count());
        dataset.show();
        Thread.sleep(20000);
    }

    /**
     *
     * 内存占用 300M 400M 1500M
     * @throws InterruptedException
     */
    private static void readParquet3() throws InterruptedException {
        SparkConf sparkConf = new SparkConf();
        sparkConf.set("spark.local.dir", "D:/tmp/spark-cache");

        SparkSession sparkSession = SparkSession.builder()
                .config(sparkConf)
                .master("local")
                .getOrCreate();
        Thread.sleep(10000);

        Dataset<Row> dataset = sparkSession.read().parquet(outfilepath);
        dataset.show();
        dataset.persist(StorageLevel.DISK_ONLY());
        Thread.sleep(5000);
        System.out.println(dataset.count());
        dataset.show();
        Thread.sleep(5000);
    }


    public static void saveAsParquet(){
        SparkConf sparkConf = new SparkConf();

        SparkSession sparkSession = SparkSession.builder()
                .config(sparkConf)
                .master("local")
                .getOrCreate();

        Dataset<String> dataset = sparkSession.read().textFile(filepath);
        dataset.show();

        JavaRDD<SparkTag> javaRDD = dataset.javaRDD().map((Function<String, SparkTag>) line -> {
            String[] split = line.split("\\|");
            SparkTag tag = new SparkTag();
            tag.setSender(split[0]);
            tag.setTagsJson(split[1]);
            tag.setGeneratedDate(split[2]);
            tag.setTotalCnt(Integer.valueOf(split[3]));
            tag.setDateId(split[4]);
            return tag;
        });
        Dataset<Row> dataFrame = sparkSession.createDataFrame(javaRDD, SparkTag.class);
        Dataset<SparkTag> sparkTagDataset = dataFrame.as(Encoders.bean(SparkTag.class));
        sparkTagDataset.show();

        // 保存为 Parquet 文件
        sparkTagDataset.write()
                .mode(SaveMode.Overwrite)
                .parquet(outfilepath); // 替换为实际路径
    }

    private static void readFile() throws InterruptedException {
        SparkConf sparkConf = new SparkConf();
        sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");
        sparkConf.set("spark.sql.parquet.compression.codec", "snappy");
        sparkConf.set("spark.io.compression.codec", "snappy");
        sparkConf.set("spark.shuffle.compress", "true");

        SparkSession spark = SparkSession
                .builder()
                .appName("sparkdemo3")
                .master("local")
                .config(sparkConf)
                .getOrCreate();

        Thread.sleep(10000);

        JavaRDD<String> stringJavaRDD = spark.read()
                .textFile(filepath)
                .javaRDD();

        long count = stringJavaRDD.count();
        System.out.println(count);
        Thread.sleep(5000);

        JavaRDD<SparkTag> tagJavaRDD = stringJavaRDD.map((Function<String, SparkTag>) line -> {
            String[] split = line.split("\\|");
            SparkTag tag = new SparkTag();
            tag.setSender(split[0]);
            tag.setTagsJson(split[1]);
            tag.setGeneratedDate(split[2]);
            tag.setTotalCnt(Integer.valueOf(split[3]));
            tag.setDateId(split[4]);
            return tag;
        });

        long count1 = tagJavaRDD.count();
        System.out.println(count1);
        Thread.sleep(5000);

        Dataset<Row> dataFrame = spark.createDataFrame(tagJavaRDD, SparkTag.class);
        dataFrame.show(10);
        dataFrame.createOrReplaceTempView("tag");

        Dataset<Row> dataset = spark.sql("select count(sender) from tag");
        dataset.show();
        Thread.sleep(5000);

        Dataset<Row> dataset1 = spark.sql("select max(totalCnt) from tag");
        dataset1.show();
        Thread.sleep(5000);

        Dataset<Row> dataset2 = spark.sql("select sum(totalCnt) from tag");
        dataset2.show();
        Thread.sleep(5000);

    }
}
