package com.dtsw.jiangxi;

import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.*;
import org.apache.spark.sql.api.java.UDF1;
import org.apache.spark.sql.expressions.Window;
import org.apache.spark.sql.expressions.WindowSpec;
import org.apache.spark.sql.types.DataTypes;

import java.util.ArrayList;
import java.util.Arrays;

import static org.apache.spark.sql.functions.*;

public class MultiRoadSegmentDetector {
    public static void main(String[] args) {
        // 1. 初始化SparkSession，本地测试
        SparkSession spark=SparkSession.builder()
                  .master("local[4]")
                  .appName("local")
                  .getOrCreate();

        // 2. 加载并预处理数据（同之前步骤，补充状态标记）
        Dataset<Row> processedData = loadAndPreprocessData(spark);
        processedData.createOrReplaceTempView("base_data");
        processedData.show();
        // 3. 初始化结果集（存储所有质差路段）
        Dataset<Row> allSegments = spark.createDataFrame(
                new ArrayList<>(),
                DataTypes.createStructType(Arrays.asList(
                        DataTypes.createStructField("id", DataTypes.StringType, true),
                        DataTypes.createStructField("segment_id", DataTypes.IntegerType, true),
                        DataTypes.createStructField("t0_row_num", DataTypes.IntegerType, true),
                        DataTypes.createStructField("t0_time", DataTypes.TimestampType, true),
                        DataTypes.createStructField("t1_row_num", DataTypes.IntegerType, true),
                        DataTypes.createStructField("t1_time", DataTypes.TimestampType, true)
                ))
        );
        // 4. 迭代检测多路段
        int segmentCount = 0;
        boolean hasNewSegment;
        spark.stop();
    }
    private static Dataset<Row>  loadAndPreprocessData(SparkSession spark){
        //加载原始数据,筛选指定字段
        Dataset<Row> processData= spark.read()
                .option("header","true")
                .csv("C:\\Users\\sen\\Desktop\\output.csv")
                .withColumn("id",col("id"))
                .withColumn("day",col("day"))
                .withColumn("file_id",col("file_id"))
                .withColumn("test_date",col("test_date"))
                .withColumn("taskid",col("taskid"))
                .withColumn("task_name",col("task_name"))
                .withColumn("log_name",col("log_name"))
                .withColumn("provincecode",col("provincecode"))
                .withColumn("province_name",col("province_name"))
                .withColumn("citycode",col("citycode"))
                .withColumn("city_name",col("city_name"))
                .withColumn("test_type",col("test_type"))
                .withColumn("test_service",col("test_service"))
                .withColumn("operator_name",col("operator_name"))
                .withColumn("test_time",col("test_time"))
                .withColumn("lon",col("lon"))
                .withColumn("lat",col("lat"))
                .withColumn("lte_nodebid",col("lte_nodebid"))
                .withColumn("lte_cellid",col("lte_cellid"))
                .withColumn("lte_arfcn",col("lte_arfcn"))
                .withColumn("lte_pci",col("lte_pci"))
                .withColumn("lte_rsrp",col("lte_rsrp"))
                .withColumn("lte_sinr",col("lte_sinr"))
                .withColumn("lte_neighbor1_arfcn",col("lte_neighbor1_arfcn"))
                .withColumn("lte_neighbor1_pci",col("lte_neighbor1_pci"))
                .withColumn("lte_neighbor1_rsrp",col("lte_neighbor1_rsrp"))
                .withColumn("lte_neighbor1_sinr",col("lte_neighbor1_sinr"))
                .withColumn("lte_neighbor2_arfcn",col("lte_neighbor2_arfcn"))
                .withColumn("lte_neighbor2_pci",col("lte_neighbor2_pci"))
                .withColumn("lte_neighbor2_rsrp",col("lte_neighbor2_rsrp"))
                .withColumn("lte_neighbor2_sinr",col("lte_neighbor2_sinr"))
                .withColumn("lte_neighbor3_arfcn",col("lte_neighbor3_arfcn"))
                .withColumn("lte_neighbor3_pci",col("lte_neighbor3_pci"))
                .withColumn("lte_neighbor3_rsrp",col("lte_neighbor3_rsrp"))
                .withColumn("lte_neighbor3_sinr",col("lte_neighbor3_sinr"))
                .withColumn("lte_throughput_ul",col("lte_throughput_ul"))
                .withColumn("lte_throughput_dl",col("lte_throughput_dl"))
                .withColumn("nr_nodebid",col("nr_nodebid"))
                .withColumn("nr_cellid",col("nr_cellid"))
                .withColumn("nr_arfcn",col("nr_arfcn"))
                .withColumn("nr_pci",col("nr_pci"))
                .withColumn("nr_rsrp",col("nr_rsrp").cast("int"))
                .withColumn("nr_sinr",col("nr_sinr").cast("int"))
                .withColumn("nr_neighbor1_arfcn",col("nr_neighbor1_arfcn"))
                .withColumn("nr_neighbor1_pci",col("nr_neighbor1_pci"))
                .withColumn("nr_neighbor1_rsrp",col("nr_neighbor1_rsrp"))
                .withColumn("nr_neighbor1_sinr",col("nr_neighbor1_sinr"))
                .withColumn("nr_neighbor2_arfcn",col("nr_neighbor2_arfcn"))
                .withColumn("nr_neighbor2_pci",col("nr_neighbor2_pci"))
                .withColumn("nr_neighbor2_rsrp",col("nr_neighbor2_rsrp"))
                .withColumn("nr_neighbor2_sinr",col("nr_neighbor2_sinr"))
                .withColumn("nr_neighbor3_arfcn",col("nr_neighbor3_arfcn"))
                .withColumn("nr_neighbor3_pci",col("nr_neighbor3_pci"))
                .withColumn("nr_neighbor3_rsrp",col("nr_neighbor3_rsrp"))
                .withColumn("nr_neighbor3_sinr",col("nr_neighbor3_sinr"))
                .withColumn("nr_throughput_ul",col("nr_throughput_ul"))
                .withColumn("nr_throughput_dl",col("nr_throughput_dl"))
                .select("id","taskid","file_id","test_time","lon","lat","nr_rsrp","nr_sinr")
                .dropDuplicates("id","taskid","test_time").limit(10);
        // 按ID分组、按时间排序，添加行号
        WindowSpec w= Window.partitionBy("taskid").orderBy("test_time");
        Dataset<Row> sortedData=processData.withColumn("row_num",row_number().over(w));
        // 标记目标类（弱覆盖/质差）
        UDF1<Integer, Integer> isTargetUDF = nr_rsrp -> {
            // 简化逻辑：仅用RSRP判断（实际需结合SINR，此处为示例）
            if (nr_rsrp == null) return 0;
            return (nr_rsrp < -110.0) ? 1 : 0;
        };
        spark.udf().register("isTarget", isTargetUDF, DataTypes.IntegerType);
        return sortedData.withColumn("is_target",call_udf("isTarget",col("nr_rsrp")))
                .withColumn("is_processed",lit(false))  //赋值成false
                ;
    }
    // 寻找当前未处理数据中的T0（连续30个点目标类占比≥50%）
    private static Dataset<Row> findT0(SparkSession spark, int segmentId) {
        return spark.sql(
                "WITH unprocessed_data AS (" +
                        "SELECT * FROM base_data WHERE is_processed = false" +
                        "), " +
                        "window_stats AS (" +
                        "SELECT " +
                        "id, " +
                        "row_num, " +
                        "time, " +
                        "sum(is_target) OVER (" +
                        "  PARTITION BY id " +
                        "  ORDER BY row_num " +
                        "  ROWS BETWEEN 0 AND 29" + // 连续30个点窗口
                        ") AS target_count " +
                        "FROM unprocessed_data" +
                        "), " +
                        "valid_windows AS (" +
                        "SELECT " +
                        "*, " +
                        "target_count / 30.0 AS target_ratio " +
                        "FROM window_stats " +
                        "WHERE " +
                        "target_ratio >= 0.5 " + // 占比≥50%
                        "row_num + 29 <= (SELECT max(row_num) FROM unprocessed_data WHERE id = window_stats.id)" + // 窗口完整
                        "), " +
                        "first_t0_per_id AS (" +
                        "SELECT " +
                        "*, " +
                        "row_number() OVER (PARTITION BY id ORDER BY row_num) AS rn " +
                        "FROM valid_windows" +
                        ") " +
                        "SELECT " +
                        "id, " +
                        row_number().over(Window.orderBy("id")).alias("segment_id") + ", " +
                        "row_num AS t0_row_num, " +
                        "time AS t0_time " +
                        "FROM first_t0_per_id " +
                        "WHERE rn = 1" // 每个ID取第一个T0
        ).cache();
    }
}
