package com.xdja.kafka.hdfs.sink;

import com.alibaba.fastjson.JSON;
import com.xdja.kafka.hdfs.client.HadoopClient;
import com.xdja.kafka.hdfs.client.HadoopProperties;
import org.apache.hadoop.fs.FileSystem;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.connect.sink.SinkRecord;
import org.apache.kafka.connect.sink.SinkTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.net.URI;
import java.util.Collection;
import java.util.List;
import java.util.Map;

/**
 * 向hdfs写入数据的任务
 * 冗余代码，仅供参考
 */

public class HdfsSinkGoodTask extends SinkTask{
    private static final Logger log = LoggerFactory.getLogger(HdfsSinkTask.class);


    private HadoopClient hadoopClient;

    @Override
    public String version() {
        String version = new HdfsSinkConnector().version();
        return version;
    }

    @Override
    public void start(Map<String, String> props) {
        log.info("开始执行start()方法");
        System.out.println("开始执行start()方法");
        final String hdfsUrl = props.get(HdfsSinkConnector.HDFS_URL);
        final String hdfsPath = props.get(HdfsSinkConnector.HDFS_PATH);
        // 文件系统
        HadoopProperties hadoopProperties = new HadoopProperties();
        try {
            URI uri = new URI(hadoopProperties.getDirectoryPath().trim());
            FileSystem fileSystem = FileSystem.get(uri, this.getConfiguration(hadoopProperties));
            hadoopClient = new HadoopClient(fileSystem, hadoopProperties);
            System.out.println("开始获取文件信息");
            List<Map<String, Object>> pathInfoList =  hadoopClient.getPathInfo("/wjy");
            System.out.println("获取到的文件信息:" + JSON.toJSONString(pathInfoList));
        } catch (Exception e) {
            log.error("【FileSystem配置初始化失败】", e);
        }
    }

    private org.apache.hadoop.conf.Configuration getConfiguration(HadoopProperties hadoopProperties) {
        //读取配置文件
        org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
        conf.set("dfs.replication", "1");
        conf.set("fs.defaultFS", hadoopProperties.getNameNode());
        conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
        conf.set("mapred.job.tracker", hadoopProperties.getNameNode());
        return conf;
    }

    @Override
    public void put(Collection<SinkRecord> sinkRecords) {
        log.info("开始执行put()方法");
        System.out.println("开始执行put()方法");

    }

    @Override
    public void flush(Map<TopicPartition, OffsetAndMetadata> offsets) {
        log.info("开始执行flush()方法");
        System.out.println("开始执行flush()方法");

    }

    @Override
    public void stop() {
        log.info("开始执行stop()方法");
        hadoopClient.closeFileSystem();
        System.out.println("开始执行stop()方法");

    }
}
