package com.fwmagic.spark.core.cases

import com.alibaba.fastjson.JSON
import com.fwmagic.spark.streaming.util.DBUtils
import java.sql.{Connection, PreparedStatement}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.slf4j.{Logger, LoggerFactory}

object CalCulateMoneyToMysql {
    def main(args: Array[String]): Unit = {

        val logger: Logger = LoggerFactory.getLogger(CalCulateMoneyToMysql.getClass)

        System.setProperty("HADOOP_USER_NAME", "hadoop")

        val conf: SparkConf = new SparkConf()
                .setAppName(this.getClass.getSimpleName)
                .setMaster("local[*]")

        val sc: SparkContext = new SparkContext(conf)

        //字典数据，用于匹配数据
        val rdd1: RDD[(Integer, String)] = sc.parallelize(List((1, "手机"), (2, "家具"), (3, "服装")))


        //从hdfs上读取数据
        val lines: RDD[String] = sc.textFile("hdfs://hd1:9000/tmp/case/data/data.txt")

        //json->Order
        val orderRDD: RDD[Order] = lines.map(line => {
            var order: Order = null
            try {
                order = JSON.parseObject(line, classOf[Order])
            } catch {
                case e: Exception => {
                    //日志记录异常数据
                    logger.error("=======>异常数据:" + line, e)
                    //e.printStackTrace()
                    println("=======>异常数据:" + line)
                }
            }
            order
        })
        //过滤
        val orders: RDD[Order] = orderRDD.filter(order => order != null)
        //构造tuple2
        val tp: RDD[(Integer, Integer)] = orders.map(order => (order.cid, order.money))

        //集合计算money
        val rdd2: RDD[(Integer, Integer)] = tp.reduceByKey(_ + _)

        //关联
        val joined: RDD[(Integer, (String, Integer))] = rdd1.join(rdd2)

        //获取结果
        val res: RDD[(String, Integer)] = joined.map(tp => tp._2)

        //控制台输出
        res.collect().foreach(println)

        //写到hdfs
        res.saveAsTextFile("hdfs://hd1:9000/tmp/case/res/cname_money")

        //写入数据库
        res.foreachPartition(iter => {
            var con: Connection = null
            var ps: PreparedStatement = null
            val sql = "insert into t_cname_money(cname,money) values(?,?)"
            try {
                iter.foreach(it => {
                    con = DBUtils.getConnection()
                    ps = con.prepareStatement(sql)
                    ps.setString(1, it._1)
                    ps.setInt(2, it._2)
                    ps.addBatch()
                })
                //批量写入，效率高一些。
                //注意：如果一次批量的数据量过大，批量则也不太合适，则需要用自定义一个计数器，每100或者1000条写一次，最后一个批次再写一次的方法（微批）
                ps.executeBatch()
            } catch {
                case e: Exception => {
                    e.printStackTrace()
                    println(e.getLocalizedMessage)
                }
            } finally {
                if (ps != null) ps.close()
                if (con != null) con.close()
            }
        })

        sc.stop()

    }

}
