package spark.OrcReadAndWrite

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hive.ql.io.orc.{OrcNewInputFormat, OrcNewOutputFormat, OrcStruct}
import org.apache.hadoop.io.{NullWritable, Writable}
import org.apache.orc.OrcProto.CompressionKind
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.{Accumulator, SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.serializer.KryoSerializer
import spark.OrcReadAndWrite.Kryo.ORCUtil
import spark.util.ORCFormat

import scala.collection.mutable
import scala.collection.mutable.Map
import scala.io.Source

/**
  *
  */
object OrcReadAndWrite {
	def main(args: Array[String]): Unit = {
		val sparkConf = new SparkConf().setAppName("OrcReadAndWrite").setMaster("local[6]")
		val sc = new SparkContext(sparkConf)
		val hadoopConf = new Configuration
		
		//读文件
		val list: List[String] = Source.fromFile("D:\\user\\zhangjian\\util\\country_dict.dat").getLines().toList
		//把小文件缓存到map中
		val map = Map[String,String]()
		list.map(lines => {
			val line: Array[String] = lines.split("\t")
			map(line(0)) = line(1)
		})
		//把包含缓存的map设置成广播变量
		val countryBroadcast: Broadcast[Map[String, String]] = sc.broadcast(map)
		//设置累加器 :[əˈkju:mjəleɪt]
		val hasCountry: Accumulator[Int] = sc.accumulator(0)
		val notHasCountry: Accumulator[Int] = sc.accumulator(0)
		
		//得到输入orc文件
		val orcPath:String = "D:\\user\\zhangjian\\input\\orc"
		val orcFile: RDD[(NullWritable, OrcStruct)] = sc.newAPIHadoopFile(orcPath,classOf[OrcNewInputFormat],classOf[NullWritable],classOf[OrcStruct],hadoopConf)
		
		//map 读入
		val filterRDD: RDD[String] = orcFile.map(f => {
			val orcRead = new ORCUtil
			orcRead.setORCtype(ORCFormat.INS_STATUS)
			orcRead.setRecord(f._2)
			val countryCode: String = orcRead.getData("country")
			// 得到广播变量
			val countryB: mutable.Map[String, String] = countryBroadcast.value
			val countryName: String = countryB.getOrElse(countryCode,"")
			if (!"".equals(countryName)) {
				hasCountry.add(1)
				s"${countryCode}\t$countryName"
			} else {
				notHasCountry.add(1)
				""
			}
		}).filter(!"".equals(_))
		
		filterRDD.take(200).map(println(_))
//		hadoopConf.set("orc.compress",CompressionKind.SNAPPY.name())
//		hadoopConf.set("orc.create.index","true")
		
		// map 写出
		val orcOurRDD: RDD[(NullWritable, Writable)] = filterRDD.map(f => {
			val orcWrite = new ORCUtil
			orcWrite.setORCWriteType("struct<country:string,countryname:string>")
			val str1: Array[String] = f.split("\t")
			orcWrite.addAttr(str1(0)).addAttr(str1(1))
			// 最后写出的是元祖，MapReduce写出的是键值对
			val w: Writable = orcWrite.serialize()
//			println(w)
			(NullWritable.get(), w)
		})
		
		// 设置输出
		val outPath:String = "D:\\user\\zhangjian\\output\\spark"
		import spark.util.MyPredef.delete
		delete(outPath)
	
		orcOurRDD.saveAsNewAPIHadoopFile(outPath,classOf[NullWritable],classOf[Writable],classOf[OrcNewOutputFormat],hadoopConf)
		
		println(hasCountry.value)
		println(notHasCountry.value)
	}
}
