package com.ihk.test;

import com.ihk.hadoop.hbase.HbaseTemplate;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.mapreduce.JobUtil;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;


import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.*;
import org.apache.spark.rdd.RDD;
import org.apache.spark.sql.*;
import org.apache.spark.sql.Row;
import org.junit.*;
import scala.Function1;
import scala.Tuple2;
import scala.runtime.BoxedUnit;

//import org.apache.hadoop.hbase.mapreduce.TableInputFormat;

import java.io.IOException;
import java.util.Iterator;
import java.util.List;

/**
 * Created by YANFA on 2017/5/4.
 */
public class ConnectionHbase {
    public static void main(String[] args) {
         String quorum = "172.16.8.184";
         String port = "2181";
         String parent = "/hbase";
        SparkConf sparkConf = new SparkConf();
        sparkConf.setAppName("sparkHbase");
        sparkConf.setMaster("local");
        JavaSparkContext jsc = new  JavaSparkContext(sparkConf);
        SQLContext sqlContent = new SQLContext(jsc);
        Configuration conf = HBaseConfiguration.create();
        conf.set("hbase.zookeeper.quorum", quorum);
        conf.set("hbase.zookeeper.property.clientPort", port);
        conf.set("zookeeper.znode.parent", parent);
       // conf.set("hbase.master", "192.168.68.84:60000");
        conf.set(TableInputFormat.INPUT_TABLE, "test");


        //设置查询条件，这里值返回用户的等级
/*      Scan scan = new Scan();
        scan.setStartRow(Bytes.toBytes("195861-1035177490"));
        scan.setStopRow(Bytes.toBytes("195861-1072173147"));
        scan.addFamily(Bytes.toBytes("info"));
        scan.addColumn(Bytes.toBytes("info"), Bytes.toBytes("levelCode"));
        conf.set(TableInputFormat.SCAN, convertScanToString(scan));*/
        //访问hbase
        JavaPairRDD<ImmutableBytesWritable,Result> hBaseRDD = jsc.newAPIHadoopRDD(conf,TableInputFormat.class , ImmutableBytesWritable.class, Result.class);
        System.out.println("分区数量："+hBaseRDD.getNumPartitions());//一个job ，一个job完成以后rdd会被清楚，所以如果还会用到这个rdd为了避免重复计算最好缓存起来
       // System.out.println("打印："+ hBaseRDD.first()._2().getColumn("NAME".getBytes(),"cf1".getBytes()).get(0).getKey().toString());

        hBaseRDD.cache(); //缓存
        //-------------操作注册成一个临时表开始--------------*/
        JavaRDD<Study> map = hBaseRDD.map((Function<Tuple2<ImmutableBytesWritable, Result>, Study>) immutableBytesWritableStudyTuple2 -> {
            //遍历reslut
            for (Cell cell : immutableBytesWritableStudyTuple2._2().listCells()) {

                //System.out.println("qualifier:" +Bytes.toString(CellUtil.cloneQualifier(cell)) );
               // System.out.println("value:" +Bytes.toString(CellUtil.cloneValue(cell)) );
            }
           // System.out.println("完成："+Bytes.toString(immutableBytesWritableStudyTuple2._2().getValue("cf1".getBytes(),"name".getBytes())));
            String rowKey = immutableBytesWritableStudyTuple2._2().getRow().toString();
            Study study = new Study();
            study.setName(Bytes.toString(immutableBytesWritableStudyTuple2._2().getValue("cf1".getBytes(),"name".getBytes())));
            return study;
        });
        Encoder<Study> studyEncoder = Encoders.bean(Study.class);
        DataFrame df= sqlContent.createDataFrame(map,Study.class);
        df.registerTempTable("study");
        DataFrame dd = sqlContent.sql("select * from study  LIMIT 5,20");
        dd.show();
        Dataset<Study> daftest = dd.as(studyEncoder); //将dataFrame转成dataset很简单，as(编码)就好了
       // daftest.cache();
        daftest.foreachPartition(new ForeachPartitionFunction<Study>() {
            @Override
            public void call(Iterator<Study> iterator) throws Exception {
                while (iterator.hasNext()){
                    Study s =iterator.next();
                    System.out.println("s:"+s.getName());
                }
            }
        });
        daftest.foreach((ForeachFunction<Study>) study-> { //这里可以操作遍历
            System.out.println("s1:"+study.getName());
        });
        //-------------操作注册成一个临时表结束--------------*/


        //遍历转换出结果，返回一个Tuple2元组
        JavaPairRDD<ImmutableBytesWritable,Study> rowKeyRdd = hBaseRDD.mapValues((Function<Result, Study>) result -> {
            Study study = new Study();
            study.setName(result.getRow().toString());
           // System.out.println("结果："+result.getRow().toString());
            return study;
        });

        rowKeyRdd.foreach(new VoidFunction<Tuple2<ImmutableBytesWritable,Study>>() { //另外一个job
            @Override
            public void call(Tuple2<ImmutableBytesWritable, Study> immutableBytesWritableStudyTuple2) throws Exception {
                Study study = immutableBytesWritableStudyTuple2._2();
              //  System.out.println("name:"+study.getName());
                //这里可操作插入数据库什么的
            }
        });
        jsc.stop();
    }




    @org.junit.Test
    public void getHbaseData(){

        HbaseTemplate hbaseTemplate=new HbaseTemplate();
        try {
            hbaseTemplate.createTable("test1",new String[]{"name"});
        } catch (IOException e) {
            e.printStackTrace();
        }
/*        try {
            Table table=hbaseTemplate.getTable("test");
            Scan s = new Scan();
            ResultScanner r = table.getScanner(s);
            for(Result t :r){
                System.out.println("-------------------:"+hbaseTemplate.getMap(t).toString());
            }

        } catch (IOException e) {
            e.printStackTrace();
        }*/

    }
}


