package com.xdja.kafka.hdfs.sink;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.connect.sink.SinkRecord;
import org.apache.kafka.connect.sink.SinkTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import java.util.Properties;

/**
 * 向hdfs写入数据的任务
 */

public class HdfsSinkTask extends SinkTask{
    private static final Logger log = LoggerFactory.getLogger(HdfsSinkTask.class);

    /**
     * hdfs的输出流
     */
    private FSDataOutputStream fsDataOutputStream;

    @Override
    public String version() {
        String version = new HdfsSinkConnector().version();
        return version;
    }

    @Override
    public void start(Map<String, String> props) {
        log.info("开始执行start()方法");
        System.out.println("开始执行start()方法");
        final String hdfsUrl = props.get(HdfsSinkConnector.HDFS_URL);
        final String hdfsPath = props.get(HdfsSinkConnector.HDFS_PATH);
        System.out.println("hdfsUrl:" + hdfsUrl);
        System.out.println("hdfsPath:" + hdfsPath);

        Configuration configuration = new Configuration();
        configuration.set("fs.defaultFS", hdfsUrl);
        //这两个是与hdfs append相关的设置
        configuration.setBoolean("dfs.support.append", true);
        configuration.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER");
        configuration.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");

        Properties properties = System.getProperties(); //这两行告诉hadoop访问时的用户为root
        properties.setProperty("HADOOP_USER_NAME", "root");
        try{
            FileSystem fileSystem = FileSystem.get(configuration);
            fsDataOutputStream = fileSystem.append(new Path(hdfsPath));
        }catch (IOException e){
            log.error("创建hdfs的输出流失败：{}", e.getMessage(), e);
        }
    }

    @Override
    public void put(Collection<SinkRecord> sinkRecords) {
        log.info("开始执行put()方法");
        System.out.println("开始执行put()方法");
        for (SinkRecord sinkRecord : sinkRecords) {
            try{
                log.info("write info------------------------" + sinkRecord.value().toString() + "-----------------");
                fsDataOutputStream.write((sinkRecord.value().toString()).getBytes("UTF-8"));
                fsDataOutputStream.hsync();
            }catch(Exception e){
                log.error("put(), 写入sinkRecord失败。", e);
            }
        }
    }

    @Override
    public void flush(Map<TopicPartition, OffsetAndMetadata> offsets) {
        log.info("开始执行flush()方法");
        System.out.println("开始执行flush()方法");
        try{
            fsDataOutputStream.hsync();
        }catch (Exception e){
            log.error("flush(), 写入sinkRecord失败。", e);
        }
    }

    @Override
    public void stop() {
        log.info("开始执行stop()方法");
        System.out.println("开始执行stop()方法");
        try {
            fsDataOutputStream.close();
        }catch(IOException e){
            System.out.println(e.toString());
            log.error("stop(), 关闭fsDataOutputStream失败。", e);
        }
    }
}
