package com.etc.datefrme

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{Dataset, Row, SQLContext, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

object HelloDataFrames {

  case class Person(name: String, age: String)

  def main(args: Array[String]): Unit = {

    //    val spark = SparkSession
    //      .builder()
    //      .appName("HelloDataFrames")
    //      .master("local")
    //      .getOrCreate()
    //
    //    import spark.implicits._
    //    val frame = spark.read.json("D:\\qq下载\\第一阶段(1)\\第一阶段\\第73讲-Spark SQL：DataFrame的使用\\文档\\students.json")
    //显示文件内容
    //        frame.show()
    //显示元数据
    //    frame.printSchema()
    //查看所有的名字
    //    frame.select("name").show()
    //查看所有的名字 并且年龄加一
    //    frame.select($"name",$"age" + 1).show()
    //查看年龄大于18
    //    frame.filter($"age" >  17 ).show()
    //根据年龄聚合并且累计个数
    //    frame.groupBy("age").count().show()

    //        frame.createOrReplaceTempView("user")
    //
    //        spark.sql("select * from user").show()
    //
    //    spark.stop()
    //用类创建DateFrame
    /*    val caseClassDS = Seq(Person("Andy", 32)).toDS()
        caseClassDS.show()
        //
        val unit = Seq(1,2,3).toDS()
        unit.map(_ + 1).collect()*/
    val conf = new SparkConf().setAppName("sparkSql").setMaster("local")
    val sc = new SparkContext(conf)
    val sparkSql = new SQLContext(sc)
    val unit = sc.textFile("e:\\b.txt")
      .map(line => {
        val strings = line.split(" ")
        Person(strings(0), strings(1))
      })

    import sparkSql.implicits._
    val f = unit.toDF
    f.createOrReplaceTempView("person")
    sparkSql.sql("select * from person")
    f.show()
    sc.stop()
  }
}

object sparkSql {
  def main(args: Array[String]): Unit = {
    val session = SparkSession.builder().appName("sparkSql").master("local").getOrCreate()
    val unit = session.sparkContext.textFile("e:\\uv.txt")
    val structType = StructType(Array(
      StructField("data", StringType, true),
      StructField("id", IntegerType, true)
    ))
    val RowAdd = unit.map(_.split(",")).map(a => Row(a(0), a(1).toInt))
    val value = session.createDataFrame(RowAdd, structType)
    value.createOrReplaceTempView("person")
    session.sql("select * from person").show()
    session.stop()
  }
}

object DateSet {

  case class Person(name: String, age: Int)

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().appName("sparkSql").master("local").getOrCreate()
    val context = spark.sqlContext.read.textFile("e:\\a.txt")
    import spark.implicits._
    val wordData: Dataset[Person] = context.map(a => Person(a.split(" ")(0), a.split(" ")(1).toInt))
    wordData.createOrReplaceTempView("t_word")
    val frame = spark.sql("select * from t_word ")
    frame.show()
  }
}


object DataFramodemo {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().appName("sparkSql").master("local").getOrCreate()

    val value: RDD[String] = spark.sparkContext.textFile("e:\\a.txt")

    //1. 把数据分成row
    val datarow: RDD[Row] = value.map(line => {
      Row()
    })

    //2. 编写表头

    val safeVarargs = StructType(Array(
      StructField("name", StringType, true),
      StructField("age", IntegerType, true)
    ))

    val dfd = spark.createDataFrame(datarow, safeVarargs)

    spark.stop()
  }


  object DataframeDemo2 {

    case class Person(val name: String, val age: Int)

    def main(args: Array[String]): Unit = {
      val spark = SparkSession.builder().appName("sparkSql").master("local").getOrCreate()

      val value: RDD[String] = spark.sparkContext.textFile("e:\\a.txt")

      import spark.implicits._

      val personrdd: RDD[Person] = value.map(line => {
        Person("", 1)
      })
      val frame = personrdd.toDF()


      val valued: Dataset[String] = frame.map(lien => (""))

      valued.toDF()
      spark.stop()
    }
  }


}

//3种  创建dataset
object dateSetTest {

  case class Person(val name: String, val age: Int)

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().appName("sparkSql").master("local").getOrCreate()

    import spark.implicits._

    //1. rdd 创建
    /**
      *
      * 12 wangcc
      * 11 lizhe
      * 2 wangfei
      *
      *
      * _2   _3  _1
      */

    /*      val value: RDD[Person] = spark.sparkContext.textFile("e:\\a.txt").map(a => Person(a.split(" ")(0),a.split(" ")(1).toInt))

          val datadataset: Dataset[Person] = spark.createDataset(value)

          val frame = datadataset.toDF("name","age")
          frame.show()*/
    //2. seq 创建

    /*        val seqdata:Dataset[String] = spark.createDataset(Seq("jiege","18"))
            val frame = seqdata.toDF()
            frame.show()*/
    //3.list 创建
    val listData: Dataset[String] = spark.createDataset(List("d", "dfs"))
    val frame = listData.toDF
    frame.show()
    spark.stop()

  }
}

object dataLoadAndSave {

}

//数据源
//1种 parquet
object datasource {

  //从json获取数据
  def jsonDemo(): Unit = {

    // 怎么把数据转换成parquet
    val spark = SparkSession.builder().appName("sparkSql").master("local").getOrCreate()
    //    val frame = spark.read.json("e:\\a.json")

    //保存数据的级别   SaveMode.ErrorIfExists   SaveMode.Append   SaveMode.Overwrite  SaveMode.Ignore
    //          frame.write.mode(SaveMode.Append).parquet("e:\\person.parquet")
    //      val value = spark.read.parquet("e:\\person.parquet")
    val value = spark.read.json("e:\\students.json")
    val frame = value.toDF()
    frame.createOrReplaceTempView("student")
    spark.sql("select * from student").show()

    spark.stop()


  }

  //从jdbc获取数据
  def jdbcDemo(): Unit = {
    val spark = SparkSession.builder().appName("sparkSql").master("local").getOrCreate()
    val jdbcDF = spark.read
      .format("jdbc")
      .option("url", "jdbc:mysql://localhost:3306/linyujie")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("dbtable", "user")
      .option("user", "root")
      .option("password", "123456")
      .load()

    val frame = jdbcDF.toDF()
    //    frame.write.mode(SaveMode.Append).parquet("e:\\parquet")
    frame.createOrReplaceTempView("user")
    spark.sql("select * from user where achievement > 60").show()
    spark.stop()
  }

  //从csv获取数据
  def csvDemo(): Unit = {
    val spark = SparkSession.builder().appName("csvDemo").master("local").getOrCreate()
    val peopleDFCsv = spark.read.format("csv")
      .option("sep", ";")
      .option("inferSchema", "true")
      .option("header", "true")
      .load("e:\\abc.csv")
    val frame = peopleDFCsv.toDF()
    frame.createOrReplaceTempView("user")
    spark.sql("select * from user").show()
    spark.stop()
  }

  //从parquet获取数据
  def parquetDemo(): Unit = {
    val spark = SparkSession.builder().appName("parquetDemo").master("local").getOrCreate()
    val value = spark.read.parquet("e:\\parquet")
    val frame = value.toDF()
    frame.createOrReplaceTempView("user")
    spark.sql("select * from user").show()
    spark.stop()
  }

  //从hBase获取数据
  def hBase(): Unit = {
    val spark = SparkSession.builder().appName("hBase").master("local").getOrCreate()

  }

  //从hiveDemo获取数据
  def hiveDemo(): Unit = {
    val spark = SparkSession.builder().appName("d").master("local[*]").enableHiveSupport().getOrCreate()
    spark.sql("use linyujie")
//    spark.sql("create table user(id int,name string,sex string)row format delimited fields terminated by ',' ")
//    spark.sql("load data local inpath'/opt/a.txt' into table user")
    spark.sql("select * from user").show()

    spark.stop()
  }
  def uv(): Unit ={
    val spark = SparkSession.builder().appName("uv").master("local[*]").enableHiveSupport().getOrCreate()
//    spark.sql("create table uv(data string,id int)row format delimited fields terminated by ',' ")
    spark.sql("use linyujie")
//    spark.sql("load data local inpath 'E:\\uv.txt' into table uv")
    spark.sql("select data,count(DISTINCT(id))  from uv group by data").show()
    spark.stop()
  }

  def uvTest(): Unit ={

    val spark = SparkSession
      .builder()
      .appName("uvTest")
      .master("local")
      .getOrCreate()

    import org.apache.spark.sql.functions._
    import spark.implicits._

    val unit = spark.sparkContext.textFile("e:\\uv.txt")

    val structType = StructType(Array(
      StructField("data", StringType, true),
      StructField("id", IntegerType, true)
    ))

    val RowAdd = unit.map(_.split(",")).map(a => Row(a(0), a(1).toInt))

    val frame = spark.createDataFrame(RowAdd,structType)
//    frame.dropDuplicates("id").groupBy($"data").count().as("logcount").show()
    frame.groupBy("data").agg(countDistinct('id)).show()


  }

  def pv(): Unit ={
    SparkSession
  }
  def top3(): Unit ={
    val spark = SparkSession.builder().appName("top3").master("local").getOrCreate()
    spark.sql("use linyujie")
    spark.sql("select commodity,product,sales from (select commodity,product,sales,row_number()over(partition by product order by sales desc)as rank from sales)tmp where rank<=3").show()
    spark.stop()
  }
  def main(args: Array[String]): Unit = {
//        jsonDemo()
//        jdbcDemo()
//        csvDemo()
//        hiveDemo()
//        parquetDemo()
        uv()
//        uvTest()
//    top3()

  }
}
