package com.cl.spark.node;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.cl.spark.base.BaseSparkNode;
import com.cl.spark.dto.SparkParam;
import com.cl.spark.dto.SparkResult;
import com.cl.spark.enums.SparkNodeEnum;
import com.jandar.pile.rule.process.RuleMatcher;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.*;
import org.apache.spark.sql.catalyst.encoders.RowEncoder;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import org.springframework.stereotype.Component;
import scala.Function1;
import scala.collection.JavaConverters;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;


@Component
public class RuleNode extends BaseSparkNode {

    private final RuleMatcher ruleMatcher;
    private final JavaSparkContext sparkContext;

    public RuleNode(RuleMatcher ruleMatcher, JavaSparkContext sparkContext) {
        this.ruleMatcher = ruleMatcher;
        this.sparkContext = sparkContext;
    }


    @Override
    public SparkResult process(SparkParam sparkParam) {

        Dataset<Row> dataset = sparkParam.getDatasetList().get(0);
        JSONObject expressionObj = sparkParam.getNodeExpression();
        String ruleExpression = expressionObj.getString("ruleExpression");

        Dataset<Row> newDataset = dataset.withColumn("msg",functions.lit("")).withColumn("match",functions.lit(""));
        Dataset<Row> mapDataset = newDataset.map((MapFunction<Row, Row>) row -> {
            List<String> errorList = new ArrayList<>();
            List<String> tipsList = new ArrayList<>();
            JSONObject cpJson = new JSONObject();
            String msg = "";
            boolean match = RuleMatcher.execMatchRule(JSONObject.parseObject(row.json()), JSONObject.parseObject(ruleExpression), cpJson, errorList, tipsList);
            if (!errorList.isEmpty()) {
                msg = String.join(",", errorList);
            } else if (!tipsList.isEmpty()) {
                msg = String.join(",", tipsList);
            }

            // 获取原始字段的值列表
            List<Object> values = new ArrayList<>();
            for (int i = 0; i < row.length() - 2; i++) {
                values.add(row.get(i));
            }
            // 添加新字段的值到列表中，这里假设新字段的值是固定的字符串"new_value"
            values.add(msg);
            values.add(String.valueOf(match));

            // 创建新的Row对象，包含原始字段和新字段
            Row newRow = RowFactory.create(values.toArray(new Object[0]));

            return newRow;
        },newDataset.encoder());

        Dataset<Row> filteredDF = mapDataset.filter(
                functions.col("msg").isNotNull() // 过滤掉 msg 为 null 的记录
                        .and(functions.col("msg").notEqual(functions.lit("")))) ;// 过滤掉 msg 为空字符串的记录;

        return SparkResult.success(filteredDF);
    }

    @Override
    public SparkNodeEnum getType() {
        return SparkNodeEnum.RULE_HANDLE;
    }

//    public static void main(String[] args) {
//        // 创建SparkConf对象
//        SparkConf conf = new SparkConf()
//                .setAppName("RuleHandler")
//                .setMaster("local[*]")
////                .setSparkHome("file:///opt/homebrew/Cellar/spark/1.0.1/")// 在本地运行，使用所有可用核心
//                .set("spark.executor.memory", "2g") // 设置每个executor的内存大小为2GB
//                .set("spark.executor.cores", "2") // 设置每个executor的核心数为2
//                .set("spark.executor.count", "2"); // 设置executor的数量为2
//
//        // 创建JavaSparkContext对象
//        JavaSparkContext sc = new JavaSparkContext(conf);
//        // 创建JavaRDD
//        JavaRDD<String> data = sc.textFile("/Users/chenlong/Downloads/spark-demo/src/main/java/com/cl/spark/server/SparkServer.java");
//
//
////        JavaRDD<String> data = sc.parallelize(test);
//
//        // 创建线程池
//        ExecutorService executor = Executors.newFixedThreadPool(10); // 创建固定大小的线程池，包含10个线程
//
//
//        // 将任务提交到线程池执行
//        for (String line : data.collect()) {
//            executor.execute(new Task(line));
//        }
//
//        // 关闭线程池
//        executor.shutdown();
//        while (!executor.isTerminated()) {
//            // 等待所有任务执行完毕
//        }
//
//        // 关闭SparkContext对象
//        sc.close();
//    }
}
//
//class Task implements Runnable {
//    private JSONObject data;
//
//    private String ruleExpression;
//
//    private RuleMatcher ruleMatcher;
//
//    private JSONArray res;
//
//    public Task(JSONObject data, String ruleExpression, RuleMatcher ruleMatcher, JSONArray res) {
//        this.data = data;
//        this.ruleExpression = ruleExpression;
//        this.ruleMatcher = ruleMatcher;
//        this.res = res;
//    }

//    @Override
//    public void run() {
//        List<String> errorList = new ArrayList<>();
//        List<String> tipsList = new ArrayList<>();
//        JSONObject cpJson = new JSONObject();
//        boolean match = ruleMatcher.execMatchRule(data, JSONObject.parseObject(ruleExpression), cpJson, errorList, tipsList);
//        String msg = "";
//        if (!errorList.isEmpty()) {
//            msg = String.join(",", errorList);
//        } else if (!tipsList.isEmpty()) {
//            msg = String.join(",", tipsList);
//        }
////        if (match && msg.isEmpty()) {
////            msg = "match";
////        }
//        if (!msg.isEmpty()) {
//            data.put("msg", msg);
//            res.add(data);
//        }
////        if (match) {
////            data.put("msg", msg);
////            res.add(data);
////        }
//    }
//}