package cn.sheep.dolphin.report

import cn.sheep.dolphin.common.{DolphinAppComm, DolphinKpi, Jedispools}
import org.apache.commons.lang.StringUtils
import org.apache.spark.sql.SQLContext
import scalikejdbc._

/**
  * author: old sheep
  * QQ: 64341393 
  * Created 2018/12/1
  */
object AppAnalysisRedis {

	def main(args: Array[String]): Unit = {


		if (args.size != 1) {
			println(
				"""
				  |Usage: cn.sheep.dolphin.report.AppAnalysisCore
				  |	<parquetInputPath>
				""".stripMargin)
			sys.exit(-1)
		}

		val Array(parquetInputPath) = args
		val sc = DolphinAppComm.createSparkContext("媒体指标分析")

		val sQLContext = new SQLContext(sc)
		// 读取parquet文件
		val dataFrame = sQLContext.read.parquet(parquetInputPath)
		/*.filter("appid!=null or appid!='' or appname!=null")*/


		dataFrame.mapPartitions(iter => {
			val jedis = Jedispools.getJedis()
			val result = iter.map(row => {
				val day = row.getAs[String]("requestdate").substring(0, 10)

				var appName = row.getAs[String]("appname")
				if (StringUtils.isEmpty(appName)) {
					// 如果appName为空则根据appid去字典文件中找appName
					val appId = row.getAs[String]("appid")
					if (StringUtils.isNotEmpty(appId)) { // 从redis查找数据
						val appNameRedis = jedis.hget("appdict", appId)
						if (StringUtils.isNotEmpty(appNameRedis)) {
							appName = appNameRedis
						} else appName = appId
					} else appName = "未知"
				}
				((day, appName), DolphinKpi.caculateKpi(row))
			})
			jedis.close()
			result
		})
			.reduceByKey((list1, list2) => list1 zip list2 map (tp => tp._1 + tp._2))
			.foreachPartition(iter => {

				DB.localTx { implicit session =>
					val batchParams = iter.map(tp => Seq(tp._1._1, tp._1._2,
						tp._2.head,
						tp._2(1),
						tp._2(2),
						tp._2(3),
						tp._2(4),
						tp._2(5),
						tp._2(6),
						tp._2(7),
						tp._2(8)
					)).toSeq
					SQL("insert into r_app_analysis values(?,?,?,?,?,?,?,?,?,?,?)").batch(batchParams: _*).apply()
				}
			})


		sc.stop()

	}

}
