package com.edata.bigdata.spark;

import com.edata.bigdata.basic.Commons;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.StructType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;

public class HdfsWriter {

    public Logger logger = LoggerFactory.getLogger(this.getClass());
    public HdfsConnector hdfsConnector;
    public SparkSession session;

    public <T> void write(List<Row> rows, String path, String mode,Class<T> clazz) {
        try {
            if (hdfsConnector.client == null || hdfsConnector.client.getStatus().getCapacity() < 0) {
                hdfsConnector.createHdfsClient();
            }
            StructType schema = Commons.createDataFrameSchema(clazz);
            Dataset<Row> data = session.createDataFrame(rows, schema);
            String filePath = hdfsConnector.activeNameNode + path;
            data.write().mode(mode).csv(filePath);
        } catch (Exception e) {
            logger.error("无法写入数据，{}", e.getMessage());
        }
    }

    public HdfsWriter(SparkSession session) {
        this.session = session;
    }
}
