package com.pw.study.sql

import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SaveMode, SparkSession}
import java.sql.{Connection, DriverManager, PreparedStatement}
import java.util.Properties

import com.pw.study.utils.SparkUtils

object SparkSql {
  private val appName: String = "SparkSql"
  val conf = new SparkConf().setAppName("spark").setMaster("local[4]").set("spark.testing.memory", "4718592000")
  val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()

  import spark.implicits._

  def main(args: Array[String]): Unit = {
SparkUtils.getEnvBySql(appName)

    //mkFrame1()
    //mkJdbc()
    //mkjdbc3()
    mk3();
    //sc.stop()
  }

  def mk3(): Unit = {
    val url = "jdbc:mysql://localhost:3306/study?useSSL=false&characterEncoding=UTF-8"
    val props = new Properties
    props.setProperty("user", "root")
    props.setProperty("password", "123456")
    val table = "tb_tmp"
    val df: DataFrame = spark.read.text("data/file/大学.txt")
    val rdd1 = df.rdd.flatMap(row => {
      row.getString(0).split("，")
    })
      .map(x=>(x,"大学同学"))

    rdd1.foreach(println(_))
    val df2: DataFrame = rdd1.toDF("name", "source")
    df2.write.mode(SaveMode.Append) jdbc(url, table, props)


  }

  def mkjdbc3(): Unit = {
    val url = "jdbc:mysql://localhost:3306/study?useSSL=false&characterEncoding=UTF-8"
    val props = new Properties
    props.setProperty("user", "root")
    props.setProperty("password", "123456")
    val table = "tb_tmp"
    val df: DataFrame = spark.read.text("data/file/高等数学.txt")
    val rdd: RDD[Row] = df.rdd
    val rdd1 = rdd.filter(row => !row.getString(0).isEmpty).map(row => {
      val str = row.getString(0).split(" ")
      ("大学-高等数学", str(0), str(1))
    })
    rdd1.foreachPartition(it => {

      var connection: Connection = null
      var statement: PreparedStatement = null
      try {
        connection = DriverManager.getConnection("jdbc:mysql://localhost:3306/study?useSSL=false&characterEncoding=UTF-8", "root", "123456")

        statement = connection.prepareStatement("INSERT INTO tb_life_math(type,title) VALUES(?,?)")

        var i = 0
        it.foreach(row => {
          statement.setString(1, row._1)
          statement.setString(2, row._2 + " @@ " + row._3)
          // statement.setString(3, row._3)
          statement.addBatch()
          //执行一次批次
          if (i % 100 == 0) {
            statement.executeBatch()
            statement.clearBatch()
          }
          i = i + 1
        })
        //执行最后一个不满100条数据的批次
        statement.executeBatch()

      } catch {
        case e: Exception => e.printStackTrace()
      } finally {
        if (statement != null)
          statement.close()
        if (connection != null)
          connection.close()
      }
    })

    rdd1.foreach(println(_))


  }

  def mkjdbc2(): Unit = {
    val url = "jdbc:mysql://localhost:3306/study?useSSL=false&characterEncoding=UTF-8"
    val props = new Properties
    props.setProperty("user", "root")
    props.setProperty("password", "123456")
    val table = "tb_tmp"
    val df: DataFrame = spark.read.text("data/file/初中数学.txt")
    val rdd: RDD[Row] = df.rdd
    val rdd1 = rdd.map(row => {
      val str: Array[String] = row.getString(0).split(" ")
      ("初中数学", str(0), str(2))
    })
    rdd1.foreach(println(_))
    val df2: DataFrame = rdd1.toDF("type", "title", "t3")
    df2.write.mode(SaveMode.Append) jdbc(url, table, props)


  }

  def mkJdbc(): Unit = {
    val df: DataFrame = spark.read.text("data/file/高中数学.txt")
    val rdd: RDD[Row] = df.rdd
    val rdd1 = rdd.map(row => {
      val str: String = row.getString(0)
      if (str.isEmpty) {
        "hh"
      } else {
        str
      }
    }).filter(str => str != "hh").map(x => {
      val str: Array[String] = x.split(" ")
      (str(1), "高中数学", str(2))
    })

    rdd1.foreachPartition(it => {

      var connection: Connection = null
      var statement: PreparedStatement = null
      try {
        connection = DriverManager.getConnection("jdbc:mysql://localhost:3306/study?useSSL=false&characterEncoding=UTF-8", "root", "123456")

        statement = connection.prepareStatement("INSERT INTO tb_life_math(type,title) VALUES(?,?)")

        var i = 0
        it.foreach(row => {


          statement.setString(1, row._2)
          statement.setString(2, row._1 + " @@ " + row._3)
          // statement.setString(3, row._3)
          statement.addBatch()
          //执行一次批次
          if (i % 100 == 0) {
            statement.executeBatch()
            statement.clearBatch()
          }
          i = i + 1
        })
        //执行最后一个不满100条数据的批次
        statement.executeBatch()

      } catch {
        case e: Exception => e.printStackTrace()
      } finally {
        if (statement != null)
          statement.close()
        if (connection != null)
          connection.close()
      }
    })

    val df2: DataFrame = rdd1.toDF("id", "type", "title")


    rdd1.foreach(println(_))

    rdd1.collect()
    df.show()

  }

  def mkFrame1() = {
    val df: DataFrame = spark.read.text("data/file/user.txt")
    val rdd: RDD[Row] = df.rdd
    val rdd1: RDD[(String, String, String)] = rdd.map(row => {
      val str: Array[String] = row.getString(0).split(" ")
      (str(0), str(1), str(2))
    })
    val df2: DataFrame = rdd1.toDF("id", "name", "age")
    df2.show()
    df.show()
  }
}
