package com.study.sql

import com.study.utils.SparkUtils
import org.apache.spark.sql.{DataFrame, Dataset, Row}
import org.apache.spark.sql.functions.{col, expr}

import scala.collection.parallel.{ForkJoinTaskSupport, ParSeq}

/**
 * @program: spark2.3.2
 * @author: Zhoujian
 * @date: 2022-06-22 11:38
 * @version: 1.0
 * @description: ${description}
 * */
object Union {

	def main(args: Array[String]): Unit = {

		// 将集合转换为多线程执行
		implicit class PoolParallelCollection[T](val col: Seq[T]) {
			def parN(poolSize: Int): ParSeq[T] = {
				val colPar: ParSeq[T] = col.par
				val taskSupport = new ForkJoinTaskSupport(new scala.concurrent.forkjoin.ForkJoinPool(poolSize))
				colPar.tasksupport = taskSupport
				colPar
			}
		}

		val spark = SparkUtils.sparkSessionBuild()

		val df = spark.read.orc("out/orc")
		df.show()

		import spark.implicits._
		val df1 = Seq(("1", 2, 3.01),("1", 2, 4.01)).toDF("col1", "col2", "col3")

		val df2 = Seq(("1", 3, 3.01),("2", 2, 3.01)).toDF("col1", "col2", "col3")

		val df3 = Seq(("4", 5, 6.01),("5", 5, 8.01),("5", 5, 9.01)).toDF("col1", "col2", "col3")

		val frame = spark.createDataFrame(spark.sparkContext.emptyRDD[Row], df1.schema)

		val frames = Array(df1,df2,df3)

		val frame1: DataFrame = frames.foldLeft(frame)((x, y) => x.union(y)).coalesce(1)
			.select(col("col1"),$"col2",'col3,expr("date_sub(col1,rn) as date_diff"))
		frame1.write.orc("out/orc-1")

		frame1.collect().map(_.mkString(","))
		frame.schema.fieldNames.mkString(",")

		val dfSort1 = Seq(true,false,true,false,true).toDF("col1")
		val dfSort2 = Seq(false,true,false,false,true).toDF("col1")

		dfSort1.sort('col1)
		dfSort2.sort($"col1")


		val df4: DataFrame = Seq(("zhangsan", 2),("lishi", 2)).toDF("name", "age")
//		df4.printSchema()
//		println(df4.schema)
//		df4.show()
//		val value: Dataset[Person] = df4.as[Person]
//		value.printSchema()
//		println(value.schema)
//		value.show()


	}

	case class Person(name:String,age:Int)

}
