package org.example.petitionplatformsystem.service.Impl;

import org.apache.spark.ml.Pipeline;
import org.apache.spark.ml.PipelineModel;
import org.apache.spark.ml.PipelineStage;
import org.apache.spark.ml.feature.VectorAssembler;
import org.apache.spark.ml.regression.RandomForestRegressor;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import org.example.petitionplatformsystem.dao.PetitionEventsRepository;
import org.example.petitionplatformsystem.dao.model.PetitionEvents;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

import java.time.LocalDate;
import java.util.ArrayList;
import java.util.List;

@Service
public class PetitionPredictionServiceImpl implements PetitionPredictionService{
    @Autowired
    PetitionEventsRepository petitionEventsRepository;
    @Autowired
    SparkSession spark;

    @Override
    public void predictFuturePetitions() {
        // 从数据库中加载数据
        List<PetitionEvents> events = petitionEventsRepository.findAll();
        Dataset<Row> data = spark.createDataFrame(events, PetitionEvents.class);

        // 特征工程
        VectorAssembler assembler = new VectorAssembler()
                .setInputCols(new String[]{"eventType", "userID", "status", "sentiment", "degree"})
                .setOutputCol("features");

        // 随机森林回归模型
        RandomForestRegressor rf = new RandomForestRegressor()
                .setLabelCol("eventID")
                .setFeaturesCol("features");

        // 创建Pipeline
        Pipeline pipeline = new Pipeline()
                .setStages(new PipelineStage[]{assembler, rf});

        // 训练模型
        PipelineModel model = pipeline.fit(data);

        // 创建未来30天的数据集
        Dataset<Row> futureData = generateFutureData(spark);

        // 使用模型进行预测
        Dataset<Row> predictions = model.transform(futureData);

        // 显示预测结果
        predictions.select("eventID", "prediction").show();

        // 停止SparkSession
//        spark.stop();
    }
    private Dataset<Row> generateFutureData(SparkSession spark) {
        // 生成未来30天的日期
        List<Row> futureDates = new ArrayList<>();
        LocalDate startDate = LocalDate.now();
        for (int i = 0; i < 30; i++) {
            LocalDate date = startDate.plusDays(i);
            futureDates.add(RowFactory.create(
                    (long) (i + 1), // eventID
                    (long) 1, // eventType (假设默认值为1)
                    (long) 1, // userID (假设默认值为1)
                    (long) 1, // status (假设默认值为1)
                    0.0, // sentiment (假设默认值为0.0)
                    0.0 // degree (假设默认值为0.0)
            ));
        }

        // 定义Schema
        StructType schema = new StructType(new StructField[]{
                new StructField("eventID", DataTypes.LongType, false, Metadata.empty()),
                new StructField("eventType", DataTypes.LongType, false, Metadata.empty()),
                new StructField("userID", DataTypes.LongType, false, Metadata.empty()),
                new StructField("status", DataTypes.LongType, false, Metadata.empty()),
                new StructField("sentiment", DataTypes.DoubleType, false, Metadata.empty()),
                new StructField("degree", DataTypes.DoubleType, false, Metadata.empty())
        });

        // 创建DataFrame
        Dataset<Row> futureData = spark.createDataFrame(futureDates, schema);
        return futureData;
    }

}
