package com.itbaizhan.hbase2hdfs;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;

import java.io.*;
import java.util.ArrayList;
import java.util.List;

/**
 * @Auther: hwh
 * @Date: 2023-02-28 - 02 - 28 - 16:23
 * @Description: com.itbaizhan.hbase2hdfs
 * @version: 1.0
 */
public class InsertSentence {
    //定义连接对象
    private Connection connection;
    //定义Table对象
    private Table table;

    @Before
    public void before() throws Exception{
        //构造conf对象
        Configuration conf = HBaseConfiguration.create();
        //设置hbase用到的zk集群
        conf.set("hbase.zookeeper.quorum", "node02,node03,node04");
        //获取连接对象
        connection = ConnectionFactory.createConnection(conf);
        //获取表的DML对象
        table = connection.getTable(TableName.valueOf("sentence"));
    }
    @After
    public void close() throws IOException{
        if(table!=null){
            table.close();
        }
        if(connection!=null){
            connection.close();
        }
    }
    @Test  //这个路径也是可以的
    public void insertData() throws Exception{
        //从本地读取hello.txt
        BufferedReader bufferedReader = new BufferedReader(new FileReader("./data/hello.txt"
                //File.separator 的作用相当于 ' \  '
                /*System.getProperty("user.dir") +
                        File.separator + "data" + File.separator + "hello.txt"*/
        ));
        //定义变量，表示读取到的当前行的内容
        String line = null;
        //定义rowkey
        int index = 1;
        //逐一读取文本中的内容，并写入到Hbase的sentence表中
        while((line = bufferedReader.readLine())!=null){
            Put put = new Put(Bytes.toBytes(index));
            put.addColumn("cf".getBytes(), "line".getBytes(), line.getBytes());
            table.put(put);
            index++;
        }
        //关闭本地输入流对象
        bufferedReader.close();
    }
    //优化：1000行数据插入一次
    @Test
    public void insertData2() throws IOException {
        //从本地读取hello.txt
        BufferedReader bufferedReader = new BufferedReader(new FileReader(//File.separator 的作用相当于 ' \  '
                System.getProperty("user.dir") +"."+
                        File.separator + "data" + File.separator + "hello.txt"
        ));
        //定义变量，表示读取到的当前行的内容
        String line = null;
        //定义rowkey
        int index = 1;
        //定义一个Put集合
        List<Put> putList = new ArrayList<>();
        //逐一读取文本中的内容，并写入搭配hbase sentence表中
        while((line=bufferedReader.readLine())!=null){
            Put put = new Put(Bytes.toBytes(index));
            put.addColumn("cf".getBytes(),
                    "line".getBytes(),
                    line.getBytes());
            //将put对象添加到putList中
            putList.add(put);
            //当index是1000的整数倍时执行一次批量插入
            if(index%1000==0){
                table.put(putList);
                //清空putList
                putList.clear();
            }
            index++;
        }
        if(putList.size()!=0){
            table.put(putList);
        }
        //关闭本地输入流对象
        bufferedReader.close();
    }

    //这是一个main方法，是程序的入口：
    public static void main(String[] args) {
        /**
         * E:\App\IDEA_workspace\big_data\HbaseApiDemo
         * \
         */
        System.out.println(System.getProperty("user.dir"));
        System.out.println(File.separator);
    }
}
