package spark.hbase

import java.util

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hbase.client._
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce._
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{HBaseConfiguration, HColumnDescriptor, HTableDescriptor, KeyValue, TableName}
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.rdd.RDD
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.sql.{DataFrame, Dataset, Row}
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.{Accumulator, SparkConf, SparkContext, sql}

import scala.collection.mutable.ListBuffer


/**
  * 创建表和向 HBase 插入数据（单条插入）
  */
object CreateTable{
	var connection: Connection = _
	var tableName: TableName = _
	
	/**
	  * 创建连接
	  */
	def createConnection() = {
		val hConf: Configuration = HBaseConfiguration.create
		connection = ConnectionFactory.createConnection(hConf)
	}
	
	/**
	  * 创建表、获得表名
	  */
	def createTable(tableNameString: String): Unit = {
		val admin: Admin = connection.getAdmin
		tableName = TableName.valueOf("user_info")
		// 创建表
		if (!admin.tableExists(tableName)) {
			// 获得表描述
			val descriptor: HTableDescriptor = new HTableDescriptor(tableName)
			
			val userFamily: HColumnDescriptor = new HColumnDescriptor("user")
			val orderFamily: HColumnDescriptor = new HColumnDescriptor("order")
			descriptor.addFamily(userFamily)
			descriptor.addFamily(orderFamily)
			
			admin.createTable(descriptor)
		}
		
		admin.close()
		connection.close()
		
	}
	def main(args: Array[String]): Unit = {
		createConnection()
		createTable("user_info")
	}
}
object SparkPutHBase {
	
	def main(args: Array[String]): Unit = {
		
		val userList = List(("zhangsan","beijin"),("lisi","shandong"),("wangwu","henan"),("zhaoliu","shandong"))
		
		val conf = new SparkConf().setAppName("SparkPutHBase").setMaster("local[6]")
		val sc = new SparkContext(conf)
		val userListRDD: RDD[(String, String)] = sc.parallelize(userList)
		/**
		  * 每插入一条数据都创建一次连接，效率低
		  */
		userListRDD.foreach( list => {
			val hbaseConf: Configuration = HBaseConfiguration.create()
			val connection: Connection = ConnectionFactory.createConnection(hbaseConf)
			val tableName: TableName = TableName.valueOf("user_info")
			val put = new Put(Bytes.toBytes(list._1))
			put.addColumn(Bytes.toBytes("user"),Bytes.toBytes("name"),Bytes.toBytes(list._2))
			val table: HTable = connection.getTable(tableName).asInstanceOf[HTable]
			table.put(put)
			table.close()
			connection.close()
		})
	}
}

/**
  * 每个分区插入一次
  * 执行的是 HTable.put(Put) ，没有文件生成
  */
object SparkPutHBase1 {
	def main(args: Array[String]): Unit = {
		val conf = new SparkConf().setAppName("SparkPutHBase1").setMaster("local[6]")
		val sc = new SparkContext(conf)
		val userList = List(("zhangsan1","beijin1"),("lisi1","shandong1"),("wangwu1","henan1"),("zhaoliu1","shandong1"))
		val userListRDD: RDD[(String, String)] = sc.parallelize(userList)
		
		/**
		  * Table.put(List[Put])
		  * 每个分区插入一次
		  */
		userListRDD.foreachPartition(it => {
			val list: List[(String, String)] = it.toList
			val puts = new util.ArrayList[Put]()
			for(l <- list){
				val put = new Put(Bytes.toBytes(l._1))
				put.addColumn(Bytes.toBytes("user"),Bytes.toBytes("city"),Bytes.toBytes(l._2))
				puts.add(put)
			}
			val hbaseConf: Configuration = HBaseConfiguration.create()
			val connection: Connection = ConnectionFactory.createConnection(hbaseConf)
			val table: HTable = connection.getTable(TableName.valueOf("user_info")).asInstanceOf[HTable]
			table.put(puts)
			table.close()
			connection.close()
		})
	}
}

/**
  * 使用 MapReduce 的形式读取Hfile
  * 设置属性给 TableOutputFormat 使用
  * 没有输出文件，直接插入到 HBase 数据库
  * 返回的是一个元祖(ImmutableBytesWritable, Put)
  */
object SparkPutHBase2 {
	def main(args: Array[String]): Unit = {
		val conf = new SparkConf().setAppName("SparkPutHBase1").setMaster("local[6]")
		val sc = new SparkContext(conf)
		val userList = List(("zhangsan2","beijin2"),("lisi2","shandong2"),("wangwu2","henan2"),("zhaoliu2","shandong2"))
		val userListRDD: RDD[(String, String)] = sc.parallelize(userList)
		/**
		  * 写出Hfile，每个分区进行一次
		  */
		val outHfile: RDD[(ImmutableBytesWritable, Put)] = userListRDD.map(list => {
			val outKey = new ImmutableBytesWritable()
			val put = new Put(Bytes.toBytes(list._1))
			put.addColumn(Bytes.toBytes("user"), Bytes.toBytes("city"), Bytes.toBytes(list._2))
			// 利用Put对象获得 RowKey
			outKey.set(put.getRow())
			(outKey, put)
		})
		val hbaseConf: Configuration = HBaseConfiguration.create()
		// TableOutputFormat 设置表名
		hbaseConf.set(TableOutputFormat.OUTPUT_TABLE,"user_info")
		// MRJobConfig 设置输入输出class
		hbaseConf.set("mapreduce.job.outputformat.class",classOf[TableOutputFormat[ImmutableBytesWritable]].getName)
		hbaseConf.set("mapreduce.job.output.key.class",classOf[ImmutableBytesWritable].getName)
		hbaseConf.set("mapreduce.job.output.value.class",classOf[Put].getName)
		/**
		  * 连接什么数据库取决于参数 Configuration
		  */
		println("改前分区数"+outHfile.getNumPartitions)
		// 需要shuffle ,使用到传输的类需要序列化
		val value: RDD[(ImmutableBytesWritable, Put)] = outHfile.repartition(6)
		outHfile.saveAsNewAPIHadoopDataset(hbaseConf)
	}
}

/**
  * 读取 ORC 文件
  * HiveContext 包装 sc 创建createOrReplaceTempView临时表,执行SQL查询获得DataFrame，转成rdd
  */
object SparkPutHBase3 {
	def main(args: Array[String]): Unit = {
		val sparkConf = new SparkConf().setAppName("PartitionSparkPutHBase3").setMaster("local[6]")
		val sc = new SparkContext(sparkConf)
		
		// 用 HiveContext 包装sc
		val hivec = new HiveContext(sc)
		val orcPath = "D:\\user\\zhangjian\\input\\orc"
//		val orcPath = "D:\\user\\zhangjian\\input\\appsimilarityjob\\000399_0"
		// 读 orc 文件
		val dataFrame: DataFrame = hivec.read.orc(orcPath)
		// 创建一个临时的表，只在这个Session中有效
		dataFrame.createOrReplaceTempView("temp_table")
		// 底层是各种RDD
		val df: DataFrame = hivec.sql("select pkgname, num from (select pkgname, count(1) as num from temp_table group by pkgname having num > 3000) limit 200 ")
		// 转成RDD
		val rdd: RDD[Row] = df.rdd
		println("更改前分区数："+rdd.getNumPartitions)
		/**
		  * repartition 必须要shuffle，底层是 coalesce(numPartitions,true)
		  */
		val partRDD: RDD[Row] = rdd.repartition(200)
		/**
		  * false 不会进行shuffle （会把每个Worker上的partition合并），true 会进行
		  */
		val coalesceRDD: RDD[Row] = partRDD.coalesce(200,false)
		println("更改后分区数："+partRDD.getNumPartitions)
		
		// 设置累加器验证是不是200条数据
		val acc: Accumulator[Int] = sc.accumulator(0)
		
		val outHfile: RDD[(ImmutableBytesWritable, Put)] = partRDD.mapPartitions(it => {
			acc.add(1)
			val list: List[Row] = it.toList
			val outList = new ListBuffer[(ImmutableBytesWritable, Put)]()
			val puts = new util.ArrayList[Put]()
			for (next <- list) {
				// 元祖拿到的是地址，放在外面会导致插入的数据相同
				val outKey = new ImmutableBytesWritable()
				val pkgname: String = next.getString(0)
				val num: Long = next.getLong(1)
				// Put 没有add 和 clean 方法，只能在里面 new
				val put = new Put(Bytes.toBytes("test_family"))
				put.addColumn(Bytes.toBytes("order"),Bytes.toBytes(pkgname),Bytes.toBytes(num))
				puts.add(put)
				outKey.set(put.getRow())
				outList += ((outKey, put))
			}
			outList.toIterator
		})
		
		val hbaseConf: Configuration = HBaseConfiguration.create()
		// TableOutputFormat 设置表名
		hbaseConf.set(TableOutputFormat.OUTPUT_TABLE,"user_info")
		// MRJobConfig 设置输入输出class
		hbaseConf.set("mapreduce.job.outputformat.class",classOf[TableOutputFormat[ImmutableBytesWritable]].getName)
		hbaseConf.set("mapreduce.job.output.key.class",classOf[ImmutableBytesWritable].getName)
		hbaseConf.set("mapreduce.job.output.value.class",classOf[Put].getName)
		/**
		  * 连接什么数据库取决于参数 Configuration
		  */
		// 需要shuffle ,使用到传输的类需要序列化
		val value: RDD[(ImmutableBytesWritable, Put)] = outHfile.repartition(6)
		outHfile.saveAsNewAPIHadoopDataset(hbaseConf)
	}
}

/**
  * 读 Orc 文件
  * LoadIncrementalHFiles.doBulkLoad 传入到 HBase文件中
  */
class HbaseBlukLoad
object HbaseBlukLoad{
	def main(args: Array[String]): Unit = {
		val sparkConf = new SparkConf().setAppName("HbaseBlukLoad")
		
		// 设置序列化
		sparkConf.registerKryoClasses(Array[Class[_]]())
		
		val sc = new SparkContext(sparkConf)
		val hivec = new HiveContext(sc)
		
		
		val orcPath:String = "/user/zhangjian/input/orc"
		/**
		  * 读取ORC文件,返回 Row (每一行),然后getString（int：num）
		  */
		val df: DataFrame = hivec.read.orc(orcPath)
		df.select()
		val fdLimit: Dataset[Row] = df.limit(10000)
		val orcRDD: RDD[Row] = fdLimit.rdd
		/**
		  * 输入输出的都是迭代器
		  */
		val hfileRdd: RDD[(ImmutableBytesWritable, KeyValue)] = orcRDD.mapPartitions(it => {
			val row: List[Row] = it.toList
			import scala.collection.mutable.ListBuffer
			val list = new ListBuffer[(ImmutableBytesWritable, KeyValue)]()
			for (r <- row) {
				// 第一列数据
				val pkgname: String = r.getString(1)
				// 第四列数据
				val country: String = r.getString(4)
				val outKey = new ImmutableBytesWritable()
				outKey.set(Bytes.toBytes(pkgname))
				val outValue = new KeyValue(outKey.get(), Bytes.toBytes("order"), Bytes.toBytes("country"), Bytes.toBytes(country))
				list += ((outKey, outValue))
			}
			list.toIterator
		}).sortByKey() //  因为 ImmutableBytesWritable 实现了 WritableComparable
		val outPath: String = "/user/zhangjian/output/task_"+args(1)
		import spark.util.MyPredef.delete
		outPath.delete()
		val hbaseConf: Configuration = HBaseConfiguration.create()
		hbaseConf.set(TableOutputFormat.OUTPUT_TABLE,"user_info")
		val job: Job = Job.getInstance(hbaseConf)
		job.setMapOutputKeyClass(classOf[ImmutableBytesWritable])
		job.setMapOutputValueClass(classOf[KeyValue])
		val connection: Connection = ConnectionFactory.createConnection(hbaseConf)
		val tableName: TableName = TableName.valueOf("user_info")
		val table: HTable = connection.getTable(tableName).asInstanceOf[HTable]
		// 丰富一下配置
		HFileOutputFormat2.configureIncrementalLoad(job,table.getTableDescriptor,table.getRegionLocator)
		
		hfileRdd.saveAsNewAPIHadoopFile(outPath,classOf[ImmutableBytesWritable],classOf[KeyValue],classOf[HFileOutputFormat2],job.getConfiguration)
		
		val admin: Admin = connection.getAdmin
		val load = new LoadIncrementalHFiles(hbaseConf)
		load.doBulkLoad(new Path(outPath),admin,table,table.getRegionLocator)
		
		
	}
}







/**
  * 读取 HBase 数据库，打印结果
  * Scan
  * hbaseConf.set(TableInputFormat.INPUT_TABLE,"user_info") ， 没有设置 Scan
  */
object SparkPutHBase4 {
	def main(args: Array[String]): Unit = {
		val sparkConf = new SparkConf().setAppName("SparkScanHbase").setMaster("local[6]")
		val sc = new SparkContext(sparkConf)
		val hbaseConf: Configuration = HBaseConfiguration.create()
		hbaseConf.set(TableInputFormat.INPUT_TABLE,"user_info")
		/**
		  * 没有设置 Scan 的startRow和StopRow，得到的是全表
		  */
		val hfileRDD: RDD[(ImmutableBytesWritable, Result)] = sc.newAPIHadoopRDD(hbaseConf,classOf[TableInputFormat],classOf[ImmutableBytesWritable],classOf[Result])
		println("读取的partitions:"+hfileRDD.getNumPartitions)
		hfileRDD.foreach(lines => {
			val rowKey = Bytes.toString(lines._1.get())
			val value: Array[Byte] = lines._2.getValue(Bytes.toBytes("user"),Bytes.toBytes("name"))
			val num: String = Bytes.toString(value)
			println(s"rowKey:${rowKey},num:${num}")
		})
	}
}

/**
  * SCAN
  * hbaseConf.set(TableInputFormat.SCAN,TableMapReduceUtil.convertScanToString(scan))
  * 利用HBaseConf读取HBase文件，newAPIHadoopRDD
  */
object SparkScanHBase5 {
	def main(args: Array[String]): Unit = {
		val sparkConf = new SparkConf().setAppName("SparkScanHBase5").setMaster("local[6]")
		val sc = new SparkContext(sparkConf)
		val hbaseConf: Configuration = HBaseConfiguration.create()
		hbaseConf.set(TableInputFormat.INPUT_TABLE,"user_info")
		
		val scan = new Scan()
		scan.addFamily(Bytes.toBytes("user"))
		// 不设置缓存
		scan.setCacheBlocks(false)
		// 每次从服务器端读取的行数，默认为配置文件中设置的值
		scan.setBatch(1000)
		scan.setStartRow(Bytes.toBytes("zhangsan"))
		scan.setStopRow(Bytes.toBytes("zhangsan2"))
		hbaseConf.set(TableInputFormat.SCAN,TableMapReduceUtil.convertScanToString(scan))
		/**
		  * Conf 中包含了表的表名、Scan 等；获得的结果是 HBase 的输入[K,V](ImmutableBytesWritable, Result)
		  */
		val hfileRDD: RDD[(ImmutableBytesWritable, Result)] = sc.newAPIHadoopRDD(hbaseConf,classOf[TableInputFormat],classOf[ImmutableBytesWritable],classOf[Result])
		println("扫描到数据的条数"+hfileRDD.count())
		println("读取的partitions:"+hfileRDD.getNumPartitions)
		hfileRDD.foreach(lines => {
			val rowKey = Bytes.toString(lines._1.get())
			val value: Array[Byte] = lines._2.getValue(Bytes.toBytes("user"), Bytes.toBytes("name"))
			val num: String = Bytes.toString(value)
			println(s"rowKey:${rowKey},num:${num}")
		})
	}
}



object SparkPutHBase10 {
	def main(args: Array[String]): Unit = {
		val sparkConf = new SparkConf().setAppName("考试").setMaster("local[6]")
		val sc = new SparkContext(sparkConf)
		val hivec = new HiveContext(sc)
		val orcPath = "D:\\user\\zhangjian\\input\\orc"
		val dataFrame: DataFrame = hivec.read.orc(orcPath)
		dataFrame.createOrReplaceTempView("temp_table")
		val df: DataFrame = hivec.sql("select a.aid,b.country from (select aid,country from temp_table) a inner join (select distinct country,count(1) from temp_table group by country) b on a.country = b.country")
		val partRDD: RDD[Row] = df.rdd
		partRDD.saveAsTextFile("D:\\user\\zhangjian\\output\\test\\kaoshi")
	}
}
