package com.xdja.kafka.hdfs.sink.writer.manage;

import com.xdja.kafka.hdfs.sink.writer.definition.ParquetWriterDefinition;

import com.xdja.kafka.hdfs.sink.writer.definition.WriterDefinition;
import org.apache.hadoop.fs.Path;
import org.apache.parquet.example.data.Group;
import org.apache.parquet.hadoop.ParquetWriter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.File;
import java.io.IOException;


/**
 * ParquetWriter管理
 */
public class ParquetWriterManage extends AbstractWriterManage<ParquetWriter<Group>>{
    private static final Logger log = LoggerFactory.getLogger(ParquetWriterManage.class);
    public static final String parquetSuffix = ".par";
    /**
     * 实例
     */
    private static ParquetWriterManage instance;

    /**
     * 私有构造函数，为单例模式做准备
     */
    private ParquetWriterManage() {

    }

    /**
     * 单例模式，获取实例
     * @return
     */
    public static ParquetWriterManage getInstance() {
        if(instance == null) {
            instance = new ParquetWriterManage();
        }
        return instance;
    }

    @Override
    protected void clolseWriter(ParquetWriter<Group> closeParquetWriter) {
        if(closeParquetWriter != null) {
            try {
                closeParquetWriter.close();
            } catch (IOException e) {
                log.error("关闭closeParquetWriter失败，原始:{}", e.getMessage(), e);
            }
        }
    }

    /**
     * parquet格式的writer，存储路径  {hdfsPath}/{YYYY-MM}/{YYYY-MM-DD}/{YYYY-MM-DD}*.par
     * @param date
     * @return
     * @throws IOException
     */
    @Override
    protected ParquetWriter<Group> buildWriter(String date) throws IOException{
        String hdfsDateDir = getHdfsDateDir(date);
        String fileHdfsPath = hdfsDateDir + File.separator + date;
        Path dateFileHdfsPath = new Path(fileHdfsPath + parquetSuffix);
        //文件若已存在，指向一个新的
        if(WriterDefinition.fileSystem.exists(dateFileHdfsPath)) {
            fileHdfsPath += "-" + System.currentTimeMillis();
            dateFileHdfsPath = new Path(fileHdfsPath + parquetSuffix);
        }
        ParquetWriter<Group> writer = new ParquetWriter(dateFileHdfsPath, ParquetWriterDefinition.configuration, ParquetWriterDefinition.groupWriteSupport);
        log.info("成功创建ParquetWriter, date:{}", date);
        return writer;
    }
}
