package demo.spark.sql

import demo.spark.utils.{SparkCore, SparkSql}
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Encoders, Row, SQLContext, SparkSession, functions}
import org.apache.spark.storage.StorageLevel

import scala.collection.mutable


/**
 * Spark SQL模块意在通过 "应用程序内存 / 文件 / 外部数据源(Hive/Hdfs/Hbase)" 创建DataFrame，
 * DataFrame将尝试映射为满足 "JDBC / ODBC" 查询规范的结构化数据模板，使用户可以像操作SQL一样
 * 操作弹性分布式数据集，实际SQL会被解析成 类似 SparkCore模块中的 transform 和 action 算子在
 * 资源节点进行运算归并！
 */
object SqlDemo {

  //DataFrame类模板
  case class UserInfo (name:String, id: Long);
  case class UserInfoFull (name:String, id: Long, age: Long, hobby: Array[String]);
  case class UserInfoFull2 (name:String, id: Long, age: Long, hobby: Array[String],week:scala.collection.Map[String,String]);

  val SPARK_SQL :String = "spark-sql/";
  val SPARK_CORE :String = "spark-core/";
  val FILE_ROOT_PATH :String = "/Users/icasue/Desktop/lbs-server-plugins/icasue-plugins-demos/icasue-demo-spark/local_text/";

  val sparkContext: SparkContext = SparkCore.getContext("SqlDemo");
  val sparkSession: SparkSession = SparkSql.getSession("SqlDemo");
  val sqlContext: SQLContext = sparkSession.sqlContext;


  def getDataFrameByJson(jsonPath:String): DataFrame = {
    sparkSession.read.json(jsonPath)
  }


  // 读取parquet文件，映射成DataFrame，并执行部分列查询, 且按照age排序.s
  def getStudentInfo(sparkContext: SparkContext = sparkContext, sqlContext: SQLContext = sqlContext):DataFrame = {
    import sqlContext.implicits._;
    val stuInfoDF:DataFrame = sparkContext
      //读取RDD[String] 文件行.
      .textFile(FILE_ROOT_PATH + SPARK_SQL + "parquet_student_info.txt", 1)
      //转换元祖(Int,String,Int)
      .map(line => (line.split(",")(0),line.split(",")(1),line.split(",")(2)))
      //RDD转为DataFrame，同时给出临时表表头.
      .toDF("id","name","age")
      //查询name，(age * 2) as age列
      .select($"name", $"age" * 2 as "age")
      //排序age倒序，name正序
      .sort($"age".desc, $"name".asc)
    stuInfoDF;
  }



  // 读取json文件，映射成为DataFrame.
  def getUserInfo(sparkSession: SparkSession = sparkSession): DataFrame = {
    import sparkSession.sqlContext.implicits._;
    //读取JSON DataFrame.
    //sparkSession.read.format("json").load(FILE_ROOT_PATH + SPARK_SQL + "user.json.txt")
    var userInfoDF:DataFrame = sparkSession
      //读取json文件，映射为DataFrame.
      .read.json(FILE_ROOT_PATH + SPARK_SQL + "user.json.txt")
      //舍弃week, hobby
      .select("id","name","age")
      //转成RDD
      .rdd
      //RDD转换: (String,Int)
      .map(row => (row.getAs[String](1),row.getAs[Long](0)))
      //RDD转换: SQL查询类模板 case class UserInfo.
      .map(tuple2 => UserInfo(tuple2._1,tuple2._2))
      //RDD转DataFrame, 会使用类模板中的fields.
      .toDF()
      //查询 "name", "id"
      .select($"name", $"id" as "id")
      .sort($"name".asc);
    userInfoDF;
  }

  // 测试DataFrame视图的作用域，全局视图对于复杂的关联查询 中加变量等很有帮助！
  def tempViewValidArea(): Unit = {
    val stuInfoDF:DataFrame = getStudentInfo(sqlContext = sqlContext);
    stuInfoDF.createOrReplaceTempView("student");
    stuInfoDF.createOrReplaceGlobalTempView("glo_student");

    sqlContext.sql("select * from student order by age asc").show()
    sqlContext.sql("select * from global_temp.glo_student order by age asc").show()

    /*
      尝试跨线程创建SqlSession 发现无效，仍是使用的缓存中的session,
      请阅读: SparkSession.Builder.getOrCreate();
      var session:SparkSession = null;
      val countDownLatch:CountDownLatch = new CountDownLatch(1);
      new Thread(new Runnable {
        override def run(): Unit = {
          session = SparkSql.getSession("NewSession");
          countDownLatch.countDown();
        }
      }).start();
      countDownLatch.await();
     */

    var session:SQLContext = sqlContext.newSession();
    // 通过, glo_student 被注册成为了全局DataFrame模板.
    session.sql("select * from global_temp.glo_student order by age desc").show();
    // 异常, 因为新session作用域里没有student的DataFrame模板
    session.sql("select * from student order by age asc").show();
  }

  //通过序列 或 集合 创建DataFrame.
  def createUserInfoDF(): DataFrame = {
    import sqlContext.implicits._;
    Seq(UserInfo("qiaohang",1),UserInfo("zhouzhou",2),UserInfo("qiyan",3)).toDF()
    //List(UserInfo("qiaohang",1),UserInfo("zhouzhou",2),UserInfo("qiyan",3)).toDF()
  }

  //通过序列 或 集合 创建DataSet.
  def createUserInfoDS() : Dataset[UserInfo] = {
    import sqlContext.implicits._;
    Seq(UserInfo("qiaohang",1),UserInfo("zhouzhou",2),UserInfo("qiyan",3)).toDS()
    //List(UserInfo("qiaohang",1),UserInfo("zhouzhou",2),UserInfo("qiyan",3)).toDS()
  }

  /*
    // 读取JSON文件，并映射一个DataSet模板 .as[UserInfoFull],
    // 注意DataFrame对于JSON格式读取(只关心满足JSON格式的value)
    // 例如数据 {"name": "qiaohang", "hobby":["one","two","three"], "week":{"mon":"apple","tues":"pear"}}
    // 经过DataFrame转化后keys (name, hobby, week) 被作为表头，而":"右边的value则是 ("qiaohang",["one","two","three"],{"apple","pear"})
    // 数组还好说，满足JSON格式，但是对于Map结构，经过DataFrame抽析后是一个不完整的数据类型！
    // 因此在DataFrame -> DataSet进行转换的时候，不存在 数据类型{"apple","pear"} 的Encoder，因此模板类不允许显示映射week列！
    // 但是这里的week字段会被原样抛过去，可用于show().
   */
  def readJsonFrameTODataSet():Dataset[UserInfoFull] = {
    import sqlContext.implicits._;
    sqlContext.read.json(FILE_ROOT_PATH + SPARK_SQL + "user.json.txt").as[UserInfoFull]

    /*
      readJsonFrameTODataSet().show();
      readJsonFrameTODataSet().toDF("col1","col2","col3","col4","col5").show();
      readJsonFrameTODataSet().toDF()
        .select("name","age","hobby","week")
        .limit(2)
        .show()
     */
  }


  def implicitImprove(): Unit ={
    import sqlContext.implicits._;
    implicit val mapEncoder = Encoders.kryo[mutable.Map[String,String]];
    implicit val anyEncoder = Encoders.kryo[Any];
    sparkSession.read.json(FILE_ROOT_PATH + SPARK_SQL + "user.json.txt")
      .select($"id", $"age" * 2 as "DoubleAge", $"name".as("Xname"), $"hobby", $"week")
      .map(row => UserInfoFull2(
        row.getAs[String]("Xname"),
        row.getAs[Long]("id"),
        row.getAs[Long]("DoubleAge"),
        row.getAs[List[String]]("hobby").toArray,
        row.getMap[String,String](4)
      ))
      .select($"name".withField("real",functions.lit(true))  as "XXname")
      .toDF()
      .show()
  }

  //RDD 到 DataFrame的映射的方式：
  // 1.通过反射(类模板)
  // 2.通过反射(显示指定顺序化的schema: toDF(schemas:String*))
  // 2.通过手动创建DataFrame, 构建与转化schemas by StructType, 该方式下可以针对不同角色将RDD进行重新运算与投影！
  def rddToDataFrame(): Unit = {
    //  type1.
    import sqlContext.implicits._;
    sparkContext.textFile(FILE_ROOT_PATH + SPARK_SQL + "parquet_student_info.txt",1)
      .map(line => UserInfo(line.split(",")(1),line.split(",")(0).toInt))
      .toDF()
      .show();

    //typ2.
    sparkContext.textFile(FILE_ROOT_PATH + SPARK_SQL + "parquet_student_info.txt",1)
      .map(line => (line.split(",")(1),line.split(",")(2).toInt))
      .toDF("name","age")
      .show();

    //type3.
    // schema views 1 for user1
    import org.apache.spark.sql.types._
    val schemaDef: String = "id,name,age";
    val schemaFields:Array[StructField] = schemaDef.split(",").map(schema => StructField(schema, StringType, nullable = true))
    val schemaType: StructType = StructType(schemaFields);
    // schema views 2 for Admin.
    val schemaDef2: String = "idX,nameX,ageX";
    val schemaFields2:Array[StructField] = schemaDef2.split(",").map(schema => StructField(schema, StringType, nullable = true))
    val schemaType2: StructType = StructType(schemaFields2);

    var rowRDD:RDD[Row] = sparkContext.textFile(FILE_ROOT_PATH + SPARK_SQL + "parquet_student_info.txt",1)
      .map(line => Row(line.split(",")(0),line.split(",")(1),line.split(",")(2)))
      .persist(StorageLevel.MEMORY_ONLY);

    var schema1DF: DataFrame = sqlContext.createDataFrame(rowRDD,schemaType);
    schema1DF.select($"name", $"age" * 2 as "DoubleAge").show();

    var schema2DF: DataFrame = sqlContext.createDataFrame(rowRDD,schemaType2);
    schema2DF.select($"nameX", $"idX" + 1 as "Xid").show()
  }


}
