package demo;

import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.Reader;
import org.apache.hadoop.io.SequenceFile.Writer;
import org.apache.hadoop.io.Text;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;

public class SmallFilesMergeBySequenceFile {

    private List<String> smallFilePaths = new ArrayList<String>();

    public void addInputPath(String path) throws Exception {
        File file = new File(path);
        if (file.isDirectory()) {
            File[] files = FileUtil.listFiles(file);
            for (File sFile : files) {
                smallFilePaths.add(sFile.getPath());
                System.out.println("添加小文件路径：" + sFile.getPath());
            }
        } else {
            smallFilePaths.add(file.getPath());
            System.out.println("添加小文件路径：" + file.getPath());
        }
    }

    //    合并文件
    public void mergeFile(Configuration configuration, Path path) throws Exception {
        Writer.Option bigFile = Writer.file(path);
        Writer.Option keyClass = Writer.keyClass(Text.class);
        Writer.Option valueClass = Writer.valueClass(BytesWritable.class);
        Writer writer = SequenceFile.createWriter(configuration, bigFile, keyClass, valueClass);

        Text key = new Text();
        for (String sfps : smallFilePaths) {
            File file = new File(sfps);
            long fileSize = file.length();
            byte[] fileContent = new byte[(int) fileSize];
            FileInputStream inputStream = new FileInputStream(file);
            inputStream.read(fileContent, 0, (int) fileSize);
            String md5Str = DigestUtils.md5Hex(fileContent);
            System.out.println("merge小文件：" + sfps + ",md5:" + md5Str);
            key.set(sfps);
            // 把文件路径作为key，文件内容做为value，放入到sequencefile中
            writer.append(key, new BytesWritable(fileContent));
        }
        writer.hflush();
        writer.close();
    }

    public void readMergedFile(Configuration configuration, Path path) throws Exception {
        Reader.Option file = Reader.file(path);
        Reader reader = new Reader(configuration, file);
        Text key = new Text();
        BytesWritable value = new BytesWritable();
        while (reader.next(key, value)) {
            String[] split = key.toString().split("\\\\");
            String fileName = split[split.length - 1];
            System.out.println(key);
            byte[] bytes = value.copyBytes();
            String md5 = DigestUtils.md5Hex(bytes);
            String content = new String(bytes, StandardCharsets.UTF_8);
            FileOutputStream fileOutputStream = new FileOutputStream("E:\\temp\\test2\\" + fileName);
            fileOutputStream.write(bytes);
            System.out.println("读取到文件：" + key + ",md5:" + md5 + ",content:" + content);
            fileOutputStream.close();
        }
    }

    public static void main(String[] args) throws Exception {
        Configuration configuration;
        FileSystem fs;
        // 构造一个配置参数对象，设置一个参数：要访问的HDFS的URI
        configuration = new Configuration();
        // 指定使用HDFS访问
        configuration.set("fs.default.name", "hdfs://localhost:9001");
        // 进行客户端身份的设置(root为虚拟机的用户名，hadoop集群节点的其中一个都可以)
        System.setProperty("HADOOP_USER_NAME", "root");
        // 通过FileSystem的静态get()方法获取HDFS文件系统客户端对象
        fs = FileSystem.get(configuration);

        SmallFilesMergeBySequenceFile msf = new SmallFilesMergeBySequenceFile();

        Path path = new Path("/temp/text/3");
        msf.addInputPath("E:\\temp\\test");
        msf.mergeFile(configuration, path);
        msf.readMergedFile(configuration, path);
    }
}


