package com.edata.bigdata.spark;

import com.edata.bigdata.basic.Commons;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.sql.*;
import org.apache.spark.sql.types.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.Properties;


public class HdfsReader {

    public Logger logger = LoggerFactory.getLogger(this.getClass());
    public HdfsConnector hdfsConnector;
    public String PREFIX = "hdfs://";
    public String ENTRYPOINT = "localhost:8082,localhost:8082";
    public String activeNameNode;
    public SparkSession session;

    public JavaRDD<String> findData(String path) {
        try {
            if (hdfsConnector.client == null || hdfsConnector.client.getStatus().getCapacity() < 0) {
                hdfsConnector.createHdfsClient();
            }
            String hdfsFilePath = hdfsConnector.activeNameNode + "/" + path;
            return session.sparkContext().textFile(hdfsFilePath, 1).toJavaRDD();
        } catch (Exception e) {
            logger.error("无法查询数据，{}", e.getMessage());
            return null;
        }
    }

    public <T> Dataset<T> toBeanMapper(JavaRDD<String> rdd, Class<T> clazz) {
        if (rdd == null) {
            logger.error("rdd为空");
            return null;
        }
        if (clazz == null) {
            logger.error("clazz为空");
            return null;
        }
        logger.info("正在从数据类型 RDD<String> 映射为 Dataset<{}>", clazz.getSimpleName());
        //Java场景下要将
        JavaRDD<Row> rowRDD = rdd.map(line -> {
            String[] parts = line.split(",");
            Object[] values = new Object[parts.length];
            for (int i = 0; i < parts.length; i++) {
                values[i] = parts[i].trim();
            }
            return RowFactory.create(values);
        });
        StructType schema = Commons.createDataFrameSchema(clazz);
        Dataset<Row> rows = session.createDataFrame(rowRDD, schema);
        return rows.as(Encoders.bean(clazz));
    }


    public HdfsReader(SparkSession session) {
        this.session = session;
    }
}
