package cn.doitedu.day04.demo

import java.sql.{DriverManager, PreparedStatement, ResultSet}

import cn.doitedu.day01.utils.SparkUtil
import org.apache.spark.rdd.{JdbcRDD, RDD}

/**
 * @Date 22.4.1
 * @Created by HANGGE
 * @Description
 */
object Demo02_订单02 {
  /**
   *       日志数据  (文件中)
   * 订单ID  商品类别  商品金额  商品ID
   * 1001,办公用品,900,1
   * 1002,生活用品,88,2
   * 1003,服装,699,3
   * 1004,办公用品,800,4
   * 1005,补品,9.9,6
   * 1006,服装,899,5
   * 1007,补品,99.88,7
   *
   *  商品信息 在数据库中
   * create table tb_goods(
   * pid int ,
   * name varchar(50),
   * describe  varchar(50)
   * ) ;
   * insert into tb_goods values
   * 1,"cherry机械键盘","just so so"),
   * 2,"雕牌卫生纸","厚而不硬"),
   * 3,"梦之蓝男装-格子衬衫","帅无边"),
   * 4,"奥妙打印机","你猜不到的结果"),
   * 5,"劲霸男装-短裤",""),
   * 6,"九全大补丸","就差那么一点点"),
   * 7,"多易牌黑枸杞","学大数, 不费劲") ;
   *
   * 1) 每种类别下的商品总金额
   * 2) 关联MySQL库中的商品名和描述信息 , 将结果存储到HDFS上
   *            - map算子
   *            - mapPartitions算子
   *            - MySQL数据构建RDD join
   *            - 广播变量
   */

  def main(args: Array[String]): Unit = {

    val sc = SparkUtil.getSc
    // 加载订单数据
    val data = sc.textFile("data/order.txt")
    // 处理每行订单
    val rdd1 = data.map(line => {
      val arr = line.split(",")
      //1005,补品,9.9,6
      (arr(0), arr(1), arr(2).toDouble, arr(3).toInt)
    })
    // 订单信息和商品信息关联
    // 思路  遍历每个点单的pid   去MySQL中查询  拼接结果

     //每条数据执行一次
   /* rdd1.map(tp=>{
      val pid = tp._4
      pid
      //去MySQL中查询  拼接结果   每条数据获取一次连接  优化mapPartitions
    })*/

    /**
     * 思路1    遍历每个订单的pid 去数据库中查询
     */
   val rdd2 =  rdd1.mapPartitions(iters=>{
      // 获取连接   每个分区获取一次连接对象
      val conn = DriverManager.getConnection("jdbc:mysql://localhost:3306/doit30?characterEncoding=UTF-8", "root", "root")
      val ps: PreparedStatement = conn.prepareStatement("select * from tb_goods where pid = ?")
      iters.map(tp=>{// 每个订单
        val pid = tp._4
        // 执行SQL语句
        ps.setInt(1 , pid)
        val resultSet = ps.executeQuery()
        // 封装结果
        var res: (String, String, Double, Int, String, String) = null
        // 获取结果数据
       if(resultSet.next()){
         val pid = resultSet.getInt(1)
         val name = resultSet.getString(2)
         val des = resultSet.getString(3)
         res =  (tp._1,tp._2,tp._3,tp._4 , name , des)
       }
        // 返回结果
        res
      })
    })
println("==========================DOIT30=====================================")
    /**
     * 思路2
     *    订单数据   RDD1
     *    数据库中的数据 RDD2
     *    RDD1  join   RDD2   (k)
     */
    val orderRDD = data.map(line => {
      val arr = line.split(",")
      (arr(3).toInt, line)
    })

    /**
     * sc: SparkContext,
     * getConnection: () => Connection,
     * sql: String,
     * lowerBound: Long,
     * upperBound: Long,
     * numPartitions: Int,
     * mapRow: (ResultSet) => T = JdbcRDD.resultSetToObjectArray _)
     */
    // 商品RDD
    val  goodsRDD = new JdbcRDD[(Int ,String)](
        sc ,
      ()=>DriverManager.getConnection("jdbc:mysql://localhost:3306/doit30?characterEncoding=UTF-8", "root", "root") ,
      "select * from tb_goods where ? <= pid and pid <=?" ,
      1 ,
      100 ,
      1 ,
      (rs:ResultSet)=>{
        val pid = rs.getInt(1)
        val name = rs.getString(2)
        val des = rs.getString(3)
        (pid , name+","+des)
      }
    )
    // 分区数 2
    val joined: RDD[(Int, (String, String))] = orderRDD.join(goodsRDD)
 // 将结果存储在HDFS上 saveAsTextFile行动算子
    joined.values.saveAsTextFile("hdfs://linux01:8020/orders/")

  }

}
