package com.dtkavin.hadoop;

import com.dtkavin.hadoop.iface.HdfsI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.StringUtils;

import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.net.URI;

/**
 * Created by john on 10/1/16.
 */
public class HdfsComplexOperation implements HdfsI {
    private Configuration conf;

    public HdfsComplexOperation(Configuration conf) {
        this.conf = conf;
    }

    public void uploadFile(String srcPath, String dstPath) {
        FileSystem fs = null;
        try {
            fs = FileSystem.get(new URI("hdfs://node01:9000"), conf, "hadoop");
            //本地文件的输入流
            FileInputStream in = new FileInputStream(srcPath);
            //hdfs文件的输出流
            FSDataOutputStream out = fs.create(new Path(dstPath), true);
//            拷贝完自动关闭流
            IOUtils.copyBytes(in, out, 4096, true);
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            if (fs != null) {
                try {
                    fs.close();
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }

    }

    public void downloadFile(String srcPath, String dstPath) {
        FileSystem fs = null;
        try {
            fs = FileSystem.get(new URI("hdfs://node01:9000"), conf, "hadoop");
            //先获取一个文件的输入流----针对hdfs上的
            FSDataInputStream in = fs.open(new Path(srcPath));
            //再构造一个文件的输出流----针对本地的
            FileOutputStream out = new FileOutputStream(dstPath);
            //拷贝完自动关闭流
            IOUtils.copyBytes(in, out, 4096, true);
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            if (fs != null) {
                try {
                    fs.close();
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }
    }

    /**
     * hdfs随机文件读
     *
     * @param srcPath hdfs文件输入路径
     * @param dstPath 文件输出路径
     * @param offset  偏移量
     */
    public void randomReadFile(String srcPath, String dstPath, int offset) {
        FileSystem fs = null;
        try {
            fs = FileSystem.get(new URI("hdfs://node01:9000"), conf, "hadoop");
            //先获取一个文件的输入流----针对hdfs上的
            FSDataInputStream in = fs.open(new Path(srcPath));
            in.seek(offset);
            //再构造一个文件的输出流----针对本地的
            FileOutputStream out = new FileOutputStream(dstPath);
//            拷贝完自动关闭流
            IOUtils.copyBytes(in, out, 4096, true);
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            if (fs != null) {
                try {
                    fs.close();
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }

    }

    public void printFileStatus(String srcPath) {
        FileSystem fs = null;
        try {
            fs = FileSystem.get(new URI("hdfs://node01:9000"), conf, "hadoop");
            FSDataInputStream in = fs.open(new Path(srcPath));
            //获得文件的replications
            FileStatus[] fileStatuses = fs.listStatus(new Path(srcPath));
            for (FileStatus status : fileStatuses
                    ) {
                System.out.println("============status===========================");
                System.out.println("block size: " + status.getBlockSize());
                System.out.println("owner: " + status.getOwner());
                System.out.println("path: " + status.getPath());
                System.out.println("permission: " + status.getPermission());
                System.out.println("replications: " + status.getReplication());
                System.out.println("symlink: " + status.getSymlink());
            }

        } catch (Exception e) {
        }
    }

    /** 读取block信息，读取block内容
     * @param srcPath
     */
    public void printBlockInfos(String srcPath) {
        FileSystem fs = null;
        try {
            fs = FileSystem.get(new URI("hdfs://node01:9000"), conf, "hadoop");
            FSDataInputStream in = fs.open(new Path(srcPath));
            //获得文件的replications
            FileStatus fileStatus = fs.getFileStatus(new Path(srcPath));
            //通过文件的其实位置和长度，获取文件的某些block块
            BlockLocation[] fileBlockLocations = fs.getFileBlockLocations(fileStatus, 0, fileStatus.getLen());
            for (BlockLocation location :
                    fileBlockLocations) {
                System.out.println("===============cachedhosts");
                String[] catchedHosts = location.getCachedHosts();
                for (String a : catchedHosts
                        ) {
                    System.out.println(a + ",  ");

                }
                System.out.println("===============hosts");
                String[] hosts = location.getHosts();
                for (String a : hosts
                        ) {
                    System.out.println(a + ",  ");

                }
                System.out.println("===============names");
                String[] names = location.getNames();
                for (String a : names
                        ) {
                    System.out.println(a + ",  ");

                }
                System.out.println("===============offset");
                System.out.println(location.getOffset());
                System.out.println("===============topologyPaths");
                String[] topologyPaths = location.getTopologyPaths();
                for (String a : topologyPaths
                        ) {
                    System.out.println(a + ",  ");

                }
            }
            System.out.println("========================read block");
            //第一个block长度
            long length = fileBlockLocations[0].getLength();
            //第一个block的起步偏移量
            long offset = fileBlockLocations[0].getOffset();
            IOUtils.copyBytes(in, System.out, (int) length, true);
        } catch (Exception e) {
        }
    }
}
