package test

import java.util.Properties
import org.apache.spark.sql.types._
import org.apache.spark.sql.Row
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import scala.collection.JavaConverters._

object CountAvgBySql extends App {
  //  def main(args: Array[String]): Unit = {
  //    val dataMap = countBySex()
  //    dataMap.foreach(m => println(m._1 + "->" + m._2))
  //  }
  //计算同一类型的电影不同性别打分的平均分
  def countBySex(): java.util.Map[String, String] = {
    val conf = new SparkConf().setAppName("MovieSql").setMaster("local[2]")
    val sc = new SparkContext(conf)
    //以jdbc方式连接mysql
    val url = "jdbc:mysql://localhost:3306/spark?serverTimezone=GMT"
    //设置用户名和密码信息
    val prop = new java.util.Properties
    prop.setProperty("user", "root")
    prop.setProperty("password", "000000")
    prop.setProperty("driver", "com.mysql.jdbc.Driver")
    //创建sqlContext对象
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)
    //读取ratings数据表信息,设置分区读取参数，提升读取速度
    val dataRatings = sqlContext.read.jdbc(url, "ratings", "userId", 0, 10000, 6, prop)
    //读取movies数据表信息
    val dataMovies = sqlContext.read.jdbc(url, "movies", "movieId", 0, 3883, 3, prop)
    //读取users数据表信息
    val dataUsers = sqlContext.read.jdbc(url, "users", "userId", 0, 6040, 4, prop)
    //注册成临时表,以便使用sql语句
    dataRatings.createOrReplaceTempView("rateTable")
    dataMovies.createOrReplaceTempView("movieTable")
    dataUsers.createOrReplaceTempView("userTable")
    //使用sqlContext.sql（"XXX"）方式执行查询语句，读取“性别，电影类型，电影分数”
    val dataSex = sqlContext.sql("select gender,t.genres,rating " +
      "from rateTable,userTable,movieTable lateral view explode(split( genres,'[|]'))t as genres " +
      "where rateTable.userId=userTable.userId and rateTable.movieId = movieTable.movieId")
    //调用sql的groupBy和agg()方法获取同一类型的电影不同性别打分的平均分
    val result = dataSex.groupBy("genres", "gender").avg("rating")
    //将对象转化为抽象分布式数据集RDD，方便使用map，collect方法，生成map键值对的集合
    val data = result.rdd.map(row => (row.getAs[String]("genres") + "+" + row.getAs[String]("gender"),
      row.getAs[String]("avg(rating)").formatted("%.2f"))).collect().toMap[String, String]
    //遍历map集合
    //data.foreach(m => println(m._1 + "->" + m._2))
    //返回map键值对对象
    data.asJava
  }

  //方法1：计算同一类型的电影不同职业人群打分的平均分
  def countByWork(): java.util.Map[String, String] = {
    val conf = new SparkConf().setAppName("MovieSql").setMaster("local[2]")
    val sc = new SparkContext(conf)
    //以jdbc方式连接mysql
    val url = "jdbc:mysql://localhost:3306/spark?serverTimezone=GMT"
    //设置用户名和密码信息
    val prop = new java.util.Properties
    prop.setProperty("user", "root")
    prop.setProperty("password", "000000")
    prop.setProperty("driver", "com.mysql.jdbc.Driver")

    //创建sqlContext对象
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)
    //读取ratings数据表信息,设置分区读取参数，提升读取速度
    val dataRatings = sqlContext.read.jdbc(url, "ratings", "userId", 0, 10000, 6, prop)
    //读取movies数据表信息
    val dataMovies = sqlContext.read.jdbc(url, "movies", "movieId", 0, 3883, 3, prop)
    //读取users数据表信息
    val dataUsers = sqlContext.read.jdbc(url, "users", "userId", 0, 6040, 4, prop)
    //注册成临时表,以便使用sql语句
    dataRatings.createOrReplaceTempView("rateTable")
    dataMovies.createOrReplaceTempView("movieTable")
    dataUsers.createOrReplaceTempView("userTable")
    //使用sqlContext.sql（"XXX"）方式执行查询语句，读取“性别，电影类型，电影分数”
    val dataWork = sqlContext.sql("select (case occupation when 1 then 'academic/educator' when 2 then 'artist' " +
      "when 3 then 'clerical/admin' when 4 then 'college/grad student' when 5 then 'customer service' " +
      "when 6 then 'doctor/health care' when 7 then 'executive/managerial' when 8 then 'farmer' when 9 then 'homemaker' " +
      "when 10 then 'K-12 student' when 11 then 'lawyer' when 12 then 'programmer' when 13 then 'retired' when 14 then 'sales/marketing' " +
      "when 15 then 'scientist' when 16 then 'self-employed' when 17 then 'technician/engineer' when 18 then 'tradesman/craftsman' " +
      "when 19 then 'unemployed' when 20 then 'writer' else 'other' end) as occupation,t.genres,rating " +
      "from rateTable,userTable,movieTable lateral view explode(split( genres,'[|]'))t as genres " +
      "where rateTable.userId=userTable.userId and rateTable.movieId = movieTable.movieId")
    //调用sql的groupBy和agg()方法获取同一类型的电影不同职业人群打分的平均分
    val result = dataWork.groupBy("genres", "occupation").avg("rating")
    //将对象转化为抽象分布式数据集RDD，方便使用map，collect方法，生成map键值对的集合
    val data = result.rdd.map(row => (row.getAs[String]("genres") + "+" + row.getAs[String]("occupation"),
      row.getAs[String]("avg(rating)").formatted("%.2f"))).collect().toMap[String, String]
    //遍历map集合
    //data.foreach(m => println(m._1 + "->" + m._2))
    data.asJava
  }
}
