package cn.wangjie.spark.hbase

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * 将RDD数据保存至HBase表中
 */
object SparkWriteHBase {
	
	def main(args: Array[String]): Unit = {
		// 构建SparkContext上下文实例对象
		val sc: SparkContext = {
			// 1.a 创建SparkConf对象，设置应用属性，比如应用名称和运行模式
			val sparkConf = new SparkConf()
    			.setAppName(this.getClass.getSimpleName.stripSuffix("$"))
    			.setMaster("local[2]")
			// 1.b 创建实例对象
			SparkContext.getOrCreate(sparkConf)
		}
		
		// 从本地文件系统读取数据
		val inputRDD: RDD[String] = sc.textFile("datas/wordcount/wordcount.data")
		
		// 分析数据，调用RDD中函数
		val resultRDD: RDD[(String, Int)] = inputRDD
			// 过滤不合格的数据
            .filter(line => null != line && line.trim.length > 0)
			// 每行数据分割为单词
			.flatMap(line => line.trim.split("\\s+"))
			// 每个单词出现一次
			.mapPartitions{iter => iter.map(word => (word, 1))}
			// 分组，聚合统计
			.reduceByKey((tmp, item) => tmp + item)
		
		/*
			存储结果表的设计：htb_wordcount
			ROWKEY: word
			ColumnFamily: info
			Columns：count
		 */
		// TODO: 将RDD数据保存至HBase表中，要求RDD格式为KeyValue对，其中Key -> ImmutableBytesWritable， Value -> Put
		val putsRDD: RDD[(ImmutableBytesWritable, Put)] = resultRDD.map{case (word, count) =>
			// 构建Key对象
			val rowKey = new ImmutableBytesWritable(Bytes.toBytes(word))
			// 构建Put对象
			val put = new Put(Bytes.toBytes(word))
			// 设置列
			put.addColumn(Bytes.toBytes("info"), Bytes.toBytes("count"), Bytes.toBytes(count.toString))
			// 返回二元组
			rowKey -> put
		}
		
		
		// TODO: 保存数据RDD值HBase表中
		/*
		def saveAsNewAPIHadoopFile(
		      path: String,
		      keyClass: Class[_],
		      valueClass: Class[_],
		      outputFormatClass: Class[_ <: NewOutputFormat[_, _]],
		      conf: Configuration = self.context.hadoopConfiguration
		  ): Unit
		 */
		// TODO: 思考 -> 向HBase表写入数据时，连接HBase依赖ZK，表的名称
		val conf: Configuration = HBaseConfiguration.create()
		// 设置HBase依赖ZK信息
		conf.set("hbase.zookeeper.quorum", "node1.itcast.cn")
		conf.set("hbase.zookeeper.property.clientPort", "2181")
		conf.set("zookeeper.znode.parent", "/hbase")
		// 设置表的名称
		conf.set(TableOutputFormat.OUTPUT_TABLE, "htb_wordcount")
		
		putsRDD.saveAsNewAPIHadoopFile(
			"datas/spark/hbase-write-001", //
			classOf[ImmutableBytesWritable], //
			classOf[Put], //
			classOf[TableOutputFormat[ImmutableBytesWritable]],
			conf
		)
		
		// 应用结束，关闭资源
		sc.stop()
	}
	
}
