package spark.OrcReadAndWrite.Kryo

//import com.esotericsoftware.kryo.Kryo
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hive.ql.io.orc.{OrcNewInputFormat, OrcNewOutputFormat, OrcSerde, OrcStruct}
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector
import org.apache.hadoop.io.{NullWritable, Writable}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.serializer.{KryoRegistrator, KryoSerializer}
import org.apache.spark.{Accumulator, SparkConf, SparkContext}

import scala.collection.mutable
import scala.io.Source
//import spark.util.ORCUtil

/**
  * 广播变量需要系列化才可以发送到节点上
  */
object TT extends App {

}
object Kryo{
	
//	override def registerClasses(kryo: Kryo): Unit = {
//		kryo.register(classOf[ORCUtil])
//	}
	
	def main(args: Array[String]): Unit = {
		val conf = new SparkConf().setMaster("local[6]").setAppName("Kryo")
		//设置序列化（方式一：会把所有没有序列化的类都序列化）
		conf.registerKryoClasses(Array[Class[_]](classOf[ORCUtil],classOf[StructObjectInspector],classOf[OrcStruct]))
		conf.set("spark.kryo.registrationRequired","true")
		
		//		val classes: Array[Class[_]] = Array[Class[_]](classOf[ORCUtil],classOf[StructObjectInspector],classOf[OrcStruct])
		//		conf.set("spark.serialize", classOf[KryoSerializer].getName)
		//		conf.set("spark.kryo.registrationRequired","true")
		//		conf.registerKryoClasses(classes)
		val sc = new SparkContext(conf)
		val hadoopConf = new Configuration
		
		// 设置广播变量,读小文件,缓存在mutable.Map中
		import scala.collection.mutable.Map
		val mapCach: Map[String, String] = Map[String,String]()
		val list: List[String] = Source.fromFile("D:\\user\\zhangjian\\util\\country_dict.dat").getLines().toList
		for(lines <- list){
			val line: Array[String] = lines.split("\t")
			mapCach.put(line(0),line(1))
		}
		val mapCachBroad: Broadcast[Map[String, String]] = sc.broadcast(mapCach)

		val orcUtilRead = new ORCUtil()
		val orcUtilReadBroad: Broadcast[ORCUtil] = sc.broadcast(orcUtilRead)
		
		// 累加器
		val hasCountry: Accumulator[Int] = sc.accumulator(0)
		val notHasCountry: Accumulator[Int] = sc.accumulator(0)
		
		val orcInPath:String = "D:\\user\\zhangjian\\input\\orc"
		val orcRDD: RDD[(NullWritable, OrcStruct)] = sc.newAPIHadoopFile(orcInPath,classOf[OrcNewInputFormat],classOf[NullWritable],classOf[OrcStruct],hadoopConf)
		val filterRDD: RDD[String] = orcRDD.map(lines => {
			val smallTable: Map[String, String] = mapCachBroad.value
			val orcUtilBroad: ORCUtil = orcUtilReadBroad.value
			orcUtilBroad.setORCtype(ORCFormat.INS_STATUS)
//			orcUtilBroad.setORCWriteType(ORCFormat.INS_STATUS)
			orcUtilBroad.setRecord(lines._2)
			val countryCode: String = orcUtilBroad.getData("country")
			val countryName: String = smallTable.getOrElse(countryCode,"")
			if (!"".equals(countryName)) {
				hasCountry.add(1)
				s"${countryCode}\t${countryName}"
			}else{
				notHasCountry.add(1)
				""
			}
		}).filter(!"".equals(_))
		
		filterRDD.take(200).map(println)
		
		
		//val orcUtilWriteBroad: Broadcast[ORCUtil] = sc.broadcast(oRCUtilWrite)
		val orcOurRDD: RDD[(NullWritable, Writable)] = filterRDD.map(lines => {
			val oRCUtilWrite = new ORCUtil()
			//val orcUtilBroad1: ORCUtil = orcUtilWriteBroad.value
			oRCUtilWrite.setORCWriteType("struct<countrycode:string,countryname:string>")
			val strings: Array[String] = lines.split("\t")
			oRCUtilWrite.addAttr(strings(0)).addAttr(strings(1))
			(NullWritable.get(), oRCUtilWrite.serialize())
		})
		
		val outPath:String = "D:\\user\\zhangjian\\output\\spark"
		import spark.util.MyPredef.delete
		delete(outPath)
		orcOurRDD.saveAsNewAPIHadoopFile(outPath,classOf[NullWritable],classOf[Writable],classOf[OrcNewOutputFormat],hadoopConf)
		
		println(notHasCountry)
		println(hasCountry)
		
	}
}
