package org.example

import org.apache.spark.{SparkConf, SparkContext}
import org.example.MySQLHelper.logInfo

/**
 * @author yangzhen14
 * @create 2021/4/26 10:07
 */
object mytest {

  def main(args: Array[String]): Unit = {


    System.setProperty("hadoop.home.dir", "C:\\\\Users\\\\yangzhen14\\\\hadoop-2.7.1")
    val conf = new SparkConf().setAppName("HelloWorld").setMaster("local[*]")
    val sc = new SparkContext(conf)

    val a = List(("a1","b2","c2","d2","e2"),("a1","h1","h1","h1","h1"),("a3","b3","c3","d3","e3"),("a4","b4","c4","d4","e4"),("a4","b5","c5","d5","e5"))
    val wordPairsRDD = sc.parallelize(a)
    val b =wordPairsRDD.groupBy(_._1)

    b.foreach(y=>{
      val info = y._2
      println(info)
     val result =  info.map(x=>{
        val district = x._1

        val enterprise = x._2

        val originalMsg = x._3

        val vid =x._4

        val vin =x._5

        Map(
          "district" -> district,

          "enterprise" -> enterprise,

          "originalMsg" -> originalMsg,

          "vid" -> vid,

          "vin" -> vin
        )

      }).toList
      println(y._1)
      if("a1".equals(y._1)){
        MySQLHelper.insertBatch(result,"tt","")
      }



    })

  }
}

