package org.example.petitionplatformsystem.service.Impl;

import com.huaban.analysis.jieba.JiebaSegmenter;
import com.huaban.analysis.jieba.SegToken;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.sql.*;
import org.example.petitionplatformsystem.dao.PetitionEventsRepository;
import org.example.petitionplatformsystem.dao.model.PetitionEvents;
import org.example.petitionplatformsystem.service.PetitionEventsService;
import org.example.petitionplatformsystem.utils.IntToStringUtil;
import org.jetbrains.annotations.NotNull;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;

import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.stereotype.Service;

import javax.management.Query;
import java.util.List;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.util.*;
import java.util.stream.Collectors;

@Service
public class PetitionEventsServiceImpl implements PetitionEventsService {
    @Autowired
    PetitionEventsRepository petitionEventsRepository;
    @Autowired
    JdbcTemplate jdbcTemplate;
    @Autowired
    @Qualifier("sparkSession1")
    private SparkSession spark;


    @Override
    public PetitionEvents addPetitionEvent(PetitionEvents petitionEvent) {
        return petitionEventsRepository.save(petitionEvent);
    }

    @Override
    public PetitionEvents updatePetitionEvent(PetitionEvents petitionEvent) {
        return petitionEventsRepository.save(petitionEvent);
    }

    @Override
    public void deletePetitionEvent(PetitionEvents petitionEvent) {
        petitionEventsRepository.delete(petitionEvent);
    }

    @Override
    public PetitionEvents getPetitionEventById(long id) {
        return petitionEventsRepository.getReferenceById(id);
    }

    @Override
    public List<PetitionEvents> getAllPetitionEvents() {
        return petitionEventsRepository.findAll();
    }

    @Override
    public List<PetitionEvents> getAllPetitionEventsByUserId(long userId) {
        return petitionEventsRepository.findAllByUserID(userId);
    }

    @Override
    public PetitionEvents getPetitionEventByPetitionIdAndUserid(long petitionId, long userId) {
        return petitionEventsRepository.findByEventIDAndUserID(petitionId, userId);
    }

    // 利用spark画词云图
    @Override
    public List<Map<String, Object>> getKeywordsCounts() {
        // 从JPA读取数据
        List<PetitionEvents> events = petitionEventsRepository.findAll();
        Dataset<Row> df = spark.createDataFrame(events, PetitionEvents.class);

        // 处理数据
        Dataset<String> keywords = df.select("keywords")
                .na().drop()
                .flatMap((FlatMapFunction<Row, String>) row -> {
                    String keywordsStr = row.getAs("keywords");
                    return Arrays.asList(keywordsStr.split("\\s+")).iterator();
                }, Encoders.STRING());

        // 计算关键词词频
        Dataset<Row> wordCounts = keywords.groupBy("value")
                .count()
                .orderBy(functions.desc("count"));

        // 计算总关键词数
        long totalKeywords = keywords.count();

        // 添加占比属性并保留两位小数
        wordCounts = wordCounts.withColumn("percentage", functions.round(
                wordCounts.col("count").divide(totalKeywords).multiply(100), 2));

        // 显示结果
        wordCounts.show();

        // 将结果转换为List<Map<String, Object>>
        return getMaps(wordCounts);
    }

    @Override
    public List<Map<String, Object>> getTitleKeywordsCounts() {
        // 从JPA读取数据
        List<PetitionEvents> events = petitionEventsRepository.findAll();
        Dataset<Row> df = spark.createDataFrame(events, PetitionEvents.class);
        // 使用jieba进行中文分词
//        JiebaSegmenter segmenter = new JiebaSegmenter();
        Dataset<String> words = df.select("title")
                .na().drop() // 删除任何含有null的行
                .flatMap((FlatMapFunction<Row, String>) row -> {
                    JiebaSegmenter segmenter = new JiebaSegmenter();// 在每个分区中创建实例
                    String title = row.getAs("title");
                    List<SegToken> tokens = segmenter.process(title, JiebaSegmenter.SegMode.INDEX);
                    return tokens.stream()
                            .map(token -> token.word) // 可能是这里
                            .filter(word -> !word.trim().isEmpty())
                            .iterator();
                }, Encoders.STRING());

        // 统计词频
        Dataset<Row> wordCounts = words.groupBy("value").count().orderBy(functions.desc("count"));

        // 将结果转换为List<Map<String, Object>>
        return getMaps(wordCounts);

    }

    @Override
    public Map<String, Object> getPetitionInfodata() {
        Map<String, Object> result = new HashMap<>();
        // 从JPA读取数据
        List<PetitionEvents> events = petitionEventsRepository.findAll();
        Dataset<Row> df = spark.createDataFrame(events, PetitionEvents.class);
        // 统计信访事件总数
        long totalEvents = df.count();
        result.put("TotalEvents", totalEvents);

        // 统计不同Status的数量
        Dataset<Row> statusCounts = df.groupBy("status").count();
        Map<Integer, Long> statusCountMap = statusCounts.collectAsList().stream()
                .collect(Collectors.toMap(
                        row -> row.<Long>getAs("status").intValue(), // 直接获取Long类型的status
                        row -> row.<Long>getAs("count")   // 直接获取Long类型的count
                ));

        // 状态数字与状态字符串的映射
        Map<Integer, String> numberToStatus = new HashMap<>();
        numberToStatus.put(1, "Submitted");
        numberToStatus.put(2, "PendingReview");
        numberToStatus.put(3, "Reviewed");
        numberToStatus.put(4, "PendingProcessing");
        numberToStatus.put(5, "Processed");

        // 用于保存状态与其对应数量的结果
        Map<String, Long> statusResults = new HashMap<>();

        // 初始化所有状态为0
        numberToStatus.values().forEach(status -> statusResults.put(status, 0L));

        // 更新状态数量
        for (Map.Entry<Integer, Long> entry : statusCountMap.entrySet()) {
            String statusString = numberToStatus.getOrDefault(entry.getKey(), "UnKnownStatus");
            statusResults.put(statusString, entry.getValue());
        }

        // 将状态结果加入最终返回的结果集中
        result.put("StatusCounts", statusResults);

        // 使用正则表达式对Petition_Dep进行词频统计
        Dataset<String> petitionDeps = df.select("petitionDep")
                .na().drop()
                .flatMap((FlatMapFunction<Row, String>) row -> {
                    String petitionDep = row.getAs("petitionDep");
                    return Arrays.asList(petitionDep.split("\\s+")).iterator();
                }, Encoders.STRING());

        Dataset<Row> petitionDepWordCounts = petitionDeps.groupBy("value").count().orderBy(functions.desc("count"));

        // 将Petition_Dep词频统计结果转换为Map
        List<Map<String, Object>> petitionDepWordCountList = getMaps(petitionDepWordCounts);

        result.put("PetitionDepWordCounts", petitionDepWordCountList);
        //将信访类型进行统计
        Dataset<Row> TypeCounts = df.groupBy("eventType").count();
        Map<Integer, Long> TypeCountMap = TypeCounts.collectAsList().stream()
                .collect(Collectors.toMap(
                        row -> row.<Long>getAs("eventType").intValue(), // 直接获取Long类型的type
                        row -> row.<Long>getAs("count")   // 直接获取Long类型的count
                ));

        // 状态数字与状态字符串的映射
        Map<Integer, String> numberToType = new HashMap<>();
        numberToType.put(1, "OfflinePetition");
        numberToType.put(2, "OnlinePetition");
        numberToType.put(3, "PhonePetition");
        numberToType.put(4, "SMSSPetition");
        numberToType.put(5, "VideoPetition");

        // 用于保存状态与其对应数量的结果
        Map<String, Long> typeResults = new HashMap<>();

        // 初始化所有状态为0
        numberToType.values().forEach(type -> typeResults.put(type, 0L));

        // 更新状态数量
        for (Map.Entry<Integer, Long> entry : TypeCountMap.entrySet()) {
            String typeString = numberToType.getOrDefault(entry.getKey(), "UnKnownStatus");
            typeResults.put(typeString, entry.getValue());
        }

        // 将状态结果加入最终返回的结果集中
        result.put("TypeCounts", typeResults);
        return result;
    }

    @Override
    public List<Map<String, Object>> getPetitionCountsByDay() {
        // 从JPA读取数据
        List<PetitionEvents> events = petitionEventsRepository.findAll();
        Dataset<Row> df = spark.createDataFrame(events, PetitionEvents.class);

        // 将Created_At字段转换为日期格式
        df = df.withColumn("Created_At", functions.to_date(functions.col("createdAt")));

        // 按天统计信访事件数量，并给日期列指定别名
        Dataset<Row> dailyCounts = df.groupBy(functions.date_format(df.col("Created_At"), "yyyy-MM-dd").alias("Date"))
                .count()
                .orderBy(functions.desc("count"));

        // 转换为List<Map<String, Object>>
        return getDailMaps(dailyCounts);
    }

    @Override
    public List<Map<String, Object>> getPetitionCountsByWeek() {
        // 从JPA读取数据
        List<PetitionEvents> events = petitionEventsRepository.findAll();
        Dataset<Row> df = spark.createDataFrame(events, PetitionEvents.class);
        // 添加周和年的列
        df = df.withColumn("Week", functions.weekofyear(df.col("CreatedAt")));
        df.show();
        // 按月和周分组，并统计每周的信访量
        Dataset<Row> weeklyCounts = df.groupBy( "Week").count()
                .orderBy(functions.desc("count"));

        weeklyCounts.show();
        // 将结果转换为Java对象的列表
        List<Map<String, Object>> result = weeklyCounts.collectAsList().stream().map(row -> {
            Map<String, Object> map = new HashMap<>();
            map.put("week", row.getAs("Week"));
            map.put("count", row.getAs("count"));
            return map;
        }).collect(Collectors.toList());

        return result;
    }

    @Override
    public List<Map<String, Object>> getPetitionCountsByMonth() {
        // 从JPA读取数据
        List<PetitionEvents> events = petitionEventsRepository.findAll();
        Dataset<Row> df = spark.createDataFrame(events, PetitionEvents.class);
        // 添加周和年的列
        df = df.withColumn("Month", functions.month(df.col("CreatedAt")));
        df.show();
        // 按月和周分组，并统计每周的信访量
        Dataset<Row> weeklyCounts = df.groupBy( "Month").count()
                .orderBy(functions.desc("count"));

        weeklyCounts.show();
        // 将结果转换为Java对象的列表
        List<Map<String, Object>> result = weeklyCounts.collectAsList().stream().map(row -> {
            Map<String, Object> map = new HashMap<>();
            map.put("month", row.getAs("Month"));
            map.put("count", row.getAs("count"));
            return map;
        }).collect(Collectors.toList());

        return result;
    }

    @Override
    public Map<String, Object> getPetitionCountsByDayAndStatus(String dateStr) {
        Map<String, Object> result = new HashMap<>();
        // 从JPA读取数据
        List<PetitionEvents> events = petitionEventsRepository.findAll();
        Dataset<Row> df = spark.createDataFrame(events, PetitionEvents.class);

        // 将Created_At字段转换为日期格式
        df = df.withColumn("Created_At", functions.to_date(functions.col("createdAt")));

        // 过滤指定日期的数据
        Dataset<Row> filteredDf = df.filter(functions.date_format(df.col("Created_At"), "yyyy-MM-dd").equalTo(dateStr));

        // 统计不同Status的数量
        Dataset<Row> statusCounts = filteredDf.groupBy("status").count();
        Map<Integer, Long> statusCountMap = statusCounts.collectAsList().stream()
                .collect(Collectors.toMap(
                        row -> row.<Long>getAs("status").intValue(),
                        row -> row.<Long>getAs("count")
                ));

        // 状态数字与状态字符串的映射
        Map<Integer, String> numberToStatus = new HashMap<>();
        numberToStatus.put(1, "Submitted");
        numberToStatus.put(2, "PendingReview");
        numberToStatus.put(3, "Reviewed");
        numberToStatus.put(4, "PendingProcessing");
        numberToStatus.put(5, "Processed");

        // 用于保存状态与其对应数量的结果
        Map<String, Long> statusResults = new HashMap<>();

        // 初始化所有状态为0
        numberToStatus.values().forEach(status -> statusResults.put(status, 0L));

        // 更新状态数量
        for (Map.Entry<Integer, Long> entry : statusCountMap.entrySet()) {
            String statusString = numberToStatus.getOrDefault(entry.getKey(), "UnKnownStatus");
            statusResults.put(statusString, entry.getValue());
        }

        // 将状态结果加入最终返回的结果集中
        result.put("Date", dateStr);
        result.put("StatusCounts", statusResults);

        return result;
    }



    private List<Map<String, Object>> getDailMaps(Dataset<Row> df) {
        List<Row> rows = df.collectAsList();
        List<Map<String, Object>> result = new ArrayList<>();

        for (Row row : rows) {
            Map<String, Object> map = new HashMap<>();
            map.put("Date", row.getAs("Date").toString());
            map.put("Count", row.getAs("count"));
            result.add(map);
        }

        return result;
    }

    @NotNull
    private List<Map<String, Object>> getMaps(Dataset<Row> wordCounts) {
        List<Map<String, Object>> result = new ArrayList<>();
        List<Row> rows = wordCounts.collectAsList();

        for (Row row : rows) {
            Map<String, Object> map = new HashMap<>();
            map.put("value", row.getAs("value"));
            map.put("count", row.getAs("count"));
            map.put("percentage", row.getAs("percentage"));
            result.add(map);
        }

        return result;
    }


    @Override
    public List<PetitionEvents> getNotApproved() {
       return petitionEventsRepository.findPetitionEventsByStatus(2L);
    }

    @Override
    public List<PetitionEvents> getEventByWorkId(Long workId) {
        return petitionEventsRepository.findPetitionEventsByWorkId(workId);
    }

    @Override
    public List<PetitionEvents> getEventByStatus(Long status) {
        return petitionEventsRepository.findPetitionEventsByStatus(status);
    }
}
