package dataconsume;

import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.seg.common.Term;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.sql.*;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import scala.Tuple2;

import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.stream.Collectors;

public class TextAnalysisJava {

    // 数据库配置
    private static final String JDBC_URL = "jdbc:mysql://localhost:3306/db_teaching?useUnicode=true&characterEncoding=utf8&useSSL=false&serverTimezone=Hongkong&allowPublicKeyRetrieval=true&allowMultiQueries=true";
    private static final String DB_USER = "root";
    private static final String DB_PASSWORD = "20040429lk";

    public static void main(String[] args) {
        SparkSession spark = SparkSession.builder()
                .appName("TextAnalysisJava")
                .master("local[*]")
                .getOrCreate();

        try {
            JavaSparkContext jsc = new JavaSparkContext(spark.sparkContext());

            // 读取文件
            String filePath = TextAnalysisJava.class.getResource("/articles.txt").getPath();
            System.out.println("Reading file from: " + filePath);

            JavaRDD<String> textRDD = jsc.textFile(filePath);
            // 词频统计：将单词映射为 (word, 1) 并累加


            // 转换为 JavaRDD<Tuple2<String, Integer>> 以便后续处理


            // 解析JSON并提取contents
            JavaRDD<String> contentsRDD = textRDD.flatMap(new FlatMapFunction<String, String>() {
                @Override
                public Iterator<String> call(String line) throws Exception {
                    try {
                        JsonWrapper json = new JsonWrapper(line);
                        List<String> contents = json.getContents();
                        return contents.iterator();
                    } catch (Exception e) {
                        System.out.println("Error parsing: " + line);
                        return new ArrayList<String>().iterator();
                    }
                }
            });

            // 分词处理
            JavaRDD<String> wordsRDD = contentsRDD.flatMap(new FlatMapFunction<String, String>() {
                @Override
                public Iterator<String> call(String content) throws Exception {
                    List<Term> terms = HanLP.segment(content);
                    return terms.stream()
                            .map(term -> term.word)
                            .filter(word -> word != null && !word.isEmpty() && word.length() > 1)
                            .collect(Collectors.toList())
                            .iterator();
                }
            }
            );

            JavaPairRDD<String, Integer> wordCountsPairRDD = wordsRDD
                    .mapToPair(word -> new Tuple2<>(word, 1))
                    .reduceByKey((a, b) -> a + b);
            // 转换为 JavaRDD<Tuple2<String, Integer>> 以便后续处理
            JavaRDD<Tuple2<String, Integer>> wordCountsRDD = wordCountsPairRDD.map(tuple -> tuple);

            // 词频统计
            Encoder<Tuple2<String, Integer>> tupleEncoder = Encoders.tuple(Encoders.STRING(), Encoders.INT());

            // 转换为 Row 的 RDD
            JavaRDD<Row> rowRDD = wordCountsRDD.map(tuple -> RowFactory.create(tuple._1, tuple._2));

            // 定义 schema
            StructType schema = new StructType(new StructField[]{
                    new StructField("word", DataTypes.StringType, false, Metadata.empty()),
                    new StructField("num", DataTypes.IntegerType, false, Metadata.empty())
            });

            // 创建 DataFrame
            Dataset<Row> wordCountsDF = spark.createDataFrame(rowRDD, schema);

            // 排序并显示结果
            wordCountsDF = wordCountsDF.sort(functions.desc("num"));

            System.out.println("Top 100 frequent words:");
            wordCountsDF.show(100, false);

            System.out.println("Top 100 frequent words:");
            wordCountsDF.show(100, false);

            // 保存结果到数据库
            saveAnalysisResult(wordCountsDF, "ads_word_num");

        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            spark.stop();
        }
    }

    private static void saveAnalysisResult(Dataset<Row> data, String tableName) {
        try {
            data.write()
                    .format("jdbc")
                    .option("url", JDBC_URL)
                    .option("driver", "com.mysql.cj.jdbc.Driver")
                    .option("dbtable", tableName)
                    .option("user", DB_USER)
                    .option("password", DB_PASSWORD)
                    .option("createTableColumnTypes", "word VARCHAR(255), num INT")
                    .mode(SaveMode.Overwrite)
                    .save();

            System.out.println("Successfully saved to: " + tableName);
        } catch (Exception e) {
            System.err.println("Error saving to database: " + e.getMessage());
            throw e;
        }
    }

    // 简单JSON解析类
    static class JsonWrapper implements Serializable {
        private String contents;

        public JsonWrapper(String jsonStr) {
            // 简单实现JSON解析
            this.contents = jsonStr.replaceAll(".*\"contents\":\\s*\\[([^\\]]+)\\].*", "$1")
                    .replaceAll("\"", "")
                    .trim();
        }

        public List<String> getContents() {
            return Arrays.asList(contents.split("\\s*,\\s*"));
        }
    }
}