package com.chenjj.bigdata.hbase.bulkload;

import java.net.URI;

import com.chenjj.bigdata.hbase.client.HbaseClient;
import com.chenjj.bigdata.hbase.client.impl.HbaseClientImpl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;


public class BulkLoadServie {
    Logger logger = LoggerFactory.getLogger(getClass());
    public int doBulkLoad(String[] args) throws Exception {
        HbaseClient hBaseService = null;
        Connection conn = null;
        Table table = null;
        boolean res = false;
        String startTime ="";
        String endTime ="";
        try {
            startTime = System.currentTimeMillis()-1000*60 + "";//考虑到集群机器时间差，将开始时间向前推1分钟
            logger.info("startTime : " + startTime);

            String tableName = args[0];
            String dataPath = args[1];
            String hfilePath = args[2];
            String mapperClassName = args[3];
            String zkHost = args[4];
            String zkPort = args[5];

            String tableNameDate = tableName;//hbase存储的表名
            hBaseService = new HbaseClientImpl(zkHost,zkPort);

            Configuration jobConf = new Configuration();

            //表字段
            String columns = "acc_no,acc_name,opp_acc_no,opp_acc_name,opp_br_no,opp_br_name,curr,trx_code,trx_name,db_flag";
            jobConf.set("columns", columns);//自定义参数：列定义
            //设置压缩
//			jobConf.setLong("putsortreducer.row.threshold", 2L * (1<<30));
            Job job = Job.getInstance(jobConf);

            job.setJarByClass(BulkLoadServie.class);

            //设置数据来源目录或文件
            logger.info("数据目录:" + dataPath);
            FileInputFormat.setInputPaths(job, new Path(dataPath));
            //size 1048576(1M)，
            //如果有大量的小文件,这边根据实际文件大小做设置
//			FileInputFormat.setMinInputSplitSize(job, 100 * 1024 * 1024);
//			FileInputFormat.setMaxInputSplitSize(job, 10000);
            //如果有大量的小文件,这边根据实际文件大小做设置
            //设置MapReduce处理类
            Class clss = Class.forName(mapperClassName);
            job.setJobName(mapperClassName);
            job.setMapperClass(clss);
            job.setMapOutputKeyClass(ImmutableBytesWritable.class);
            job.setMapOutputValueClass(Put.class);
            job.setInputFormatClass(TextInputFormat.class);
            job.setOutputFormatClass(HFileOutputFormat2.class);
            //job.setReducerClass(PutSortReducer.class);//putsortreducer.row.threshold
            FsShell shell = new FsShell(jobConf);
            try {
                shell.run(new String[]{"-rm","-r", hfilePath});
                logger.info(">>>>>>>>>>>>删除临时目录：" + hfilePath);
            } catch (Exception e) {
                logger.info(">>>>>>>>>>>>删除临时目录失败" + hfilePath);
                throw e;
            }

            //将reduce输出文件压缩
            FileSystem hdfs = null;
            FileSystem hdfs2 = null;
            RegionLocator regionLocator = null;
            try {
                logger.info(">>>>>>>>>>>>>>>>>>>>>>>开始生成hfile<<<<<<<<<<<<<<<<<<<<<< " + hfilePath);
                FileOutputFormat.setCompressOutput(job, true);  //job使用压缩
                FileOutputFormat.setOutputCompressorClass(job, GzipCodec.class); //设置压缩格式
                FileOutputFormat.setOutputPath(job, new Path(hfilePath));


                conn = hBaseService.getConnection();
                table = conn.getTable(TableName.valueOf(tableNameDate));
                regionLocator = conn.getRegionLocator(TableName.valueOf(tableNameDate));
                //设置临时路径,否则报错Mkdirs failed to creat
                job.getConfiguration().set("hbase.fs.tmp.dir", "/tmp/blukload/hbase-staging");
                logger.info(tableNameDate + " 开始进行mapreduce");
                HFileOutputFormat2.configureIncrementalLoad(job, table, regionLocator);
                // job是否提交成功
                res = job.waitForCompletion(true);
                if (res) {
                    logger.info(">>>>>>>>>>>>>>>>>>>>>>>结束生成hfile<<<<<<<<<<<<<<<<<<<<<< " + hfilePath);

                    logger.info(">>>>>>>>>>>>>>>>>>>>>>>开始获取hfile文件名<<<<<<<<<<<<<<<<<<<<<< ");

                    //将生成的hfile目录和文件名打印到日志
                    hdfs = FileSystem.newInstance(URI.create(hfilePath), jobConf);
                    // 获取文件目录
                    FileStatus[] fileStatuses = hdfs.listStatus(new Path(hfilePath));
                    for (FileStatus fs : fileStatuses) {
                        Path tmpPath = fs.getPath();
                        String mulu = 	hfilePath + "/" + 	tmpPath.getName();
                        logger.info(">>>>>>>>>>>>>>>>>>>>>>>hfile目录名: " + mulu);
                        hdfs2 = FileSystem.newInstance(URI.create(tmpPath.toString()), jobConf);
                        FileStatus[] fileStatuses2 = hdfs2.listStatus(tmpPath);
                        for (FileStatus fs2 : fileStatuses2) {
                            Path tmpPath2 = fs2.getPath();
                            Boolean isFile2 = fs2.isDirectory() ? false : true;
                            // 只操作文件
                            if (isFile2) {
                                logger.info(">>>>>>>>>>>>>>>>>>>>>>>hfile文件名: " + mulu + "/" + tmpPath2.getName());
                            }
                        }
                    }
                }
            } catch (Exception e) {
                logger.info(">>>>>>>>>>>>hfile文件生成失败 " + hfilePath);
                logger.info(e.getMessage());
                throw e;
            } finally {
                if(hdfs!=null) {
                    hdfs.close();
                }
                if(hdfs2!=null) {
                    hdfs2.close();
                }
            }
            if (res) {
                try {//修改hfile文件权限
                    shell.run(new String[]{"-chmod", "-R", "777", hfilePath});
                } catch (Exception e) {
                    logger.info(">>>>>>>>>>>>>>Could not change the file permissions " + hfilePath);
                    throw e;
                }
                //加载到hbase表
                try {
                    logger.info(tableNameDate + " 加载数据到hbase开始");
                    Configuration hbaseConf = jobConf;
                    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(hbaseConf);
                    Admin admin = conn.getAdmin();
                    loader.doBulkLoad(new Path(hfilePath), admin, table, regionLocator);
                } catch (Exception e) {
                    logger.info(tableNameDate + " >>>>>>>>>>>加载数据到hbase异常<<<<<<<<<<<<" + e.getMessage());
                    throw e;
                }
                logger.info(tableNameDate + " 加载数据到hbase完成");
                endTime = System.currentTimeMillis() + 60*1000 + "";
                logger.info("endTime:" + endTime);
            } else {
                logger.info(">>>>>>>>>>>>>>>loading failed." + hfilePath);
            }
        } catch (Exception e){
            throw e;
        } finally {
            try {
                if (conn!=null){
                    conn.close();
                }
                if (table!=null) {
                    table.close();
                }
            }catch (Exception e){
                e.printStackTrace();
            }
        }
        return res ? 0 : 1;
    }
}
