package cn.doitedu.hbase.demo;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;

import java.net.URI;
import java.util.concurrent.CompletableFuture;

public class EnvTest {

    public static void main(String[] args) throws Exception {

        Logger.getLogger("org").setLevel(Level.DEBUG);

        Configuration conf = new Configuration();
        conf.set("fs.defaultFS","hdfs://spark01:8020/");

        FileSystem fs = FileSystem.get(new URI("hdfs://spark01:8020"),conf,"root");
        boolean exists = fs.exists(new Path("/abc"));
        System.out.println(exists);


        Configuration cf = HBaseConfiguration.create();
        cf.set("hbase.zookeeper.quorum","spark01,spark02,spark03");
        Connection conn = ConnectionFactory.createConnection(cf);
        Admin admin = conn.getAdmin();
        boolean ex = admin.tableExists(TableName.valueOf("t1"));
        System.out.println(ex);
        admin.close();

        /**
         * 同步
         */
        Table t1 = conn.getTable(TableName.valueOf("t1"));
        long start = System.currentTimeMillis();
        for(int i=0;i<10000;i++){
            t1.put(new Put(Bytes.toBytes(i)).addColumn("f".getBytes(),"v".getBytes(),(i+"").getBytes()));
        }
        long end = System.currentTimeMillis();
        System.out.println((end - start));
        t1.close();
        conn.close();


        CompletableFuture<AsyncConnection> asc = ConnectionFactory.createAsyncConnection();


    }


}
