package com.cl.spark.util;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import org.apache.parquet.Strings;
import org.apache.spark.sql.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;

@Component
public class SparkUtil {
    @Autowired
    private SparkSession sparkSession;
//    @Autowired
//    private FileSystem fileSystem;

    @Value("${hdfs.url:hdfs://localhost:9000}")
    private String hdfsUrl;


    public static Dataset<Row> toStringDataset(Dataset<Row> dataset, String... excludeFields) {
        return dataset.selectExpr(Arrays.stream(dataset.columns())
                .filter(item -> {
                    for (String excludeField : excludeFields) {
                        if (item.equalsIgnoreCase(excludeField)) {
                            return false;
                        }
                    }
                    return true;
                })
                .map(colName -> "CAST(`" + colName + "` AS STRING)").toArray(String[]::new));
    }

    public static Dataset<Row> filterByUniqueColumns(Dataset<Row> dataset, Dataset<Row> existingRecords, List<String> uniqueColumnList) {

        String firstColumn = uniqueColumnList.get(0);
        existingRecords = existingRecords.withColumnRenamed(firstColumn, "existing_" + firstColumn);
        Column filterColumn = dataset.col(firstColumn).equalTo(existingRecords.col("existing_" + firstColumn));
        for (int i = 1; i < uniqueColumnList.size(); i++) {
            String uniqueColumn = uniqueColumnList.get(i);
            existingRecords = existingRecords.withColumnRenamed(uniqueColumn, "existing_" + uniqueColumn);
            filterColumn = filterColumn.and(dataset.col(uniqueColumn).equalTo(existingRecords.col("existing_" + uniqueColumn)));
        }
        // 过滤掉已经存在的记录
        return dataset.join(existingRecords, filterColumn, "left_anti");
    }

    public Dataset<String> readStringDataset(String path) {
        return sparkSession.read().textFile(hdfsUrl + path);
    }


    public Dataset<Row> jsonArrayToDataset(JSONArray jsonArray) {
        List<String> list = new ArrayList<>();
        for (int i = 0; i < jsonArray.size(); i++) {
            list.add(jsonArray.getString(i));
        }
        Dataset<String> dataset = sparkSession.createDataset(list, Encoders.STRING());
        return sparkSession.read().json(dataset);
    }
}
