package com.example.demo.controller;

import com.example.demo.componet.MapUtil;
import com.example.demo.componet.ReadCSV;
import com.example.demo.model.BrandJson;
import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;

import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;

import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.servlet.mvc.method.annotation.SseEmitter;
import scala.Tuple2;

import javax.annotation.Resource;
import java.io.IOException;
import java.io.Serializable;
import java.util.*;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.logging.Logger;

@RestController
@RequestMapping(("/analyze"))
public class BrandRankController implements Serializable {

    @Resource
    private JavaSparkContext sc;


    //文件地址
    @Autowired
    private String getFilePath;



    @GetMapping(("/brand"))
    public List<BrandJson> getBrandList(){
        ReadCSV readCSV = new ReadCSV();
        List<String> columnList = readCSV.getColumnList(getFilePath, 6);    //"C:\\Users\\fireapproval\\Desktop\\毕业设计\\数据集\\test.csv"

        //内存中加载数据
        JavaRDD<String> rdd = sc.parallelize(columnList);

        //JavaRDD<String> rdd1 = rdd.map(x -> x.concat("xxx"));



        //转化为(word,1)类型
        JavaPairRDD<String, Integer> wordMap = rdd.mapToPair((PairFunction<String, String, Integer>) word -> new Tuple2<String, Integer>(word, 1));



        System.out.println(wordMap);



        //对(word,1)计算  //必须要用匿名函数

        JavaPairRDD<String, Integer> reduceMap = wordMap.reduceByKey((Function2<Integer, Integer, Integer>) (integer, integer2) -> integer+integer2);

        System.out.println(reduceMap);




        //List接收rdd
        Map<String, Integer> rddMap = reduceMap.collectAsMap();

        //System.out.println(rddMap.size());

        Map<String, Integer> sortMap = MapUtil.sortDescend(rddMap);

                /*
                解析数据
                 */

        List<BrandJson> list = new ArrayList<>();

        for (Map.Entry<String,Integer> entry:sortMap.entrySet()
        ) {

            BrandJson brandJson =new BrandJson();
            brandJson.setName(entry.getKey());
            brandJson.setValue(entry.getValue());

            list.add(brandJson);
        }

        //System.out.println(list);

        List<BrandJson> sendList = list.subList(0, 5);

        System.out.println("------brand data------:"+sendList);


        return sendList;

    }

    //设置路径
    static{
        System.setProperty("hadoop.home.dir", "D:\\SoftWares\\Apache\\spark-3.3.1-bin-hadoop3");
    }

    //读文件


    //sse实时推送对象服务
    ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();

    ReadCSV readCSV = new ReadCSV();
    List<String> columnList = readCSV.getColumnList("C:\\Users\\fireapproval\\Desktop\\毕业设计\\数据集\\test.csv", 6);

    //计数器
    int flag = 6;



    @GetMapping("/brand1")
    public SseEmitter sendBrandData(){

        /**
         * sse
         */


        //System.out.println(list);

        //List<BrandJson> sendList = list.subList(0, 5);




        SseEmitter sseEmitter = new SseEmitter();
        executorService.scheduleWithFixedDelay(new Runnable() {
            @Override
            public void run() {
                //去表头
                List<String> tmpList = columnList.subList(1, flag);

                flag = flag+1;

                //RDD计算
                //内存中加载数据
                JavaRDD<String> rdd = sc.parallelize(tmpList);


                //转化为(word,1)类型
                JavaPairRDD<String, Integer> wordMap = rdd.mapToPair((PairFunction<String, String, Integer>) word -> new Tuple2<String, Integer>(word, 1));

                //对(word,1)计算  //必须要用匿名函数

                JavaPairRDD<String, Integer> reduceMap = wordMap.reduceByKey((Function2<Integer, Integer, Integer>) (integer, integer2) -> integer+integer2);

                //Map接收rdd
                Map<String, Integer> rddMap = reduceMap.collectAsMap();

                Map<String,Integer> sortMap = MapUtil.sortDescend(rddMap);

                List<BrandJson> list = new ArrayList<>();

                for (Map.Entry<String,Integer> entry:sortMap.entrySet()
                ) {

                    BrandJson brandJson =new BrandJson();
                    brandJson.setName(entry.getKey());
                    brandJson.setValue(entry.getValue());

                    list.add(brandJson);
                }

                try{
                    sseEmitter.send(list.subList(0,5));
                }catch (IOException exception){
                    exception.printStackTrace();
                }
            }
        },10,5, TimeUnit.SECONDS);   //每个任务十秒执行时间，每隔5秒执行一次
        return sseEmitter;
    }


}
