package org.zjt.hdfs.demo;

import java.io.*;
import java.net.URI;

import org.apache.commons.io.FileUtils;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.conf.*;

/**
 * 设置fs.defaultFS参数，如果没有设置，会出现java.lang.IllegalArgumentException:
 * Wrong FS:hdfs://master:9000/user,expected: file:///
 * 当然了，也可以将hadoop集群中的core-site.xml配置文件拷贝到该工程项目下,这样，在读取配置文件时就能够识别hdfs文件系统
 * <p>
 * hdfs : file append
 *
 * /Users/zhangjuntao/Downloads/catalina.log
 *
 *
 * 172.19.110.240 master
 * 172.19.110.243 slave1
 * 172.19.110.244 slave2
 *
 */
public class HDFSWrite {
    public static void main(String[] args) throws Exception {
        try {
            Configuration conf = new Configuration();// 读取配置文件
            /*
                todo: 可无
                conf.set("fs.defaultFS", "hdfs://master:9000/");
                conf.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName());
            */

            conf.set("mapred.jop.tracker", "centos:9001");

            /*
            在hdfs-site.xml中配置了没啥用
               org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException):
    　　　　    Failed to APPEND_FILE /newfile.txt for DFSClient_NONMAPREDUCE_-610333487_1 on 127.0.0.1
    　　　　    because this file lease is currently owned by DFSClient_NONMAPREDUCE_1541357537_1 on 127.0.0.1
    　　　　*/
            conf.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER");
            conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", "true");
            /*
                java.io.IOException: Failed to replace a bad datanode on the existing pipeline due to no more good datanodes being available to try.
                (Nodes: current=[DatanodeInfoWithStorage[127.0.0.1:50010,DS-b1c29ca4-24f7-4447-a12b-5ae261663431,DISK]],
                original=[DatanodeInfoWithStorage[127.0.0.1:50010,DS-b1c29ca4-24f7-4447-a12b-5ae261663431,DISK]]).
                The current failed datanode replacement policy is DEFAULT, and a client may configure this via ‘dfs.client.block.write.replace-datanode-on-failure.policy‘ in its configuration.
             */
            conf.set("dfs.support.append", "true");

            FileSystem fs = FileSystem.get(new URI("hdfs://centos:9000/"), conf, "root");// 获取hdfs实例
            Path dest = new Path("hdfs://centos:9000/user//apollo-configservice.log");

            if (!fs.exists(new Path("hdfs://centos:9000/user/"))) {
                fs.mkdirs(new Path("hdfs://centos:9000/user/"));
            }
            FSDataOutputStream fos;
            if (!fs.exists(dest)) {
                fos = fs.create(dest, true);
            } else {
                fos = fs.append(dest);
            }
            BufferedWriter br = new BufferedWriter(new OutputStreamWriter(fos));
            // TO append data to a file, use fs.append(Path f)
            InputStream is = FileUtils.openInputStream(new File("/opt/logs/100003171/apollo-configservice.log.2"));
            byte[] buffer = new byte[1024];
            int size = 0 ;
            for (;(size = is.read(buffer)) > 0 ;){
                br.write(new String(buffer, 0, size));
            }
            br.close();
            fs.close();
        } catch (Exception e) {
            e.printStackTrace();
            System.out.println("File not found");

        }
    }
}
