package spark.SparkServer

import java.sql.{Connection, DriverManager, ResultSet, Statement}

import com.mysql.jdbc.Driver
import org.apache.hive.jdbc.HiveDriver
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{DataTypes, StructField, StructType}
import org.apache.spark.sql._
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.storage.StorageLevel
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.ArrayBuffer

/**
  *
  */
object Jdbchive {
	def main(args: Array[String]): Unit = {
//		classOf[HiveDriver]
		val connection: Connection = DriverManager.getConnection("jdbc:hive2://nn2.hadoop:20000/hainiu","","")
		val statement: Statement = connection.createStatement()
		statement.execute("set spark.sql.shuffle.partitions=20")
		val result: ResultSet = statement.executeQuery("select country,count(1) num from user_install_status_other group by country limit 100")
		try{
			while(result.next()){
				val str: String = result.getString(1)
				val str1: String = result.getString("num")
				println(str+","+str1)
			}
		}catch {
			case e:Exception => e.printStackTrace()
		}finally {
			statement.close()
			connection.close()
		}
	}
}

/**
  * 创建根据 sc 创建 SQLContext
  * SQLContext.read.json(jsonPath) 返回 DataFrame
  */
object SqlJson{
	
	def main(args: Array[String]): Unit = {
		// sql 后的partitions 默认是200
		val conf = new SparkConf().setMaster("local[6]").setAppName("SqlJson").set("spark.sql.shuffle.partitions","1")
		val sc = new SparkContext(conf)
		val sqlc: SQLContext = new SQLContext(sc)
		val jsonPath = "D:\\user\\zhangjian\\input\\json"
		
		/**
		  * 读取Json文件，可以识别类型，String，long
		  */
		val df: DataFrame = sqlc.read.json(jsonPath)
		df.show()
		println("打印前1行")
		df.show(1)
		println("打印前2行")
		df.show(2)
		println("打印前3行")
		df.show(3)
		println("打印Schema")
		df.printSchema()
		println("count:"+df.count())
		df.select(df.col("country")).show()
		df.select(df.col("country"),df.col("num").plus(100).alias("num_add")).show()
		val dfCountryCount: DataFrame = df.groupBy(df.col("country")).count()
		val rdd: RDD[Row] = dfCountryCount.rdd
		val outPath = "D:\\user\\zhangjian\\output\\json"
		import spark.util.MyPredef.delete
		outPath.delete()
		rdd.saveAsTextFile(outPath)
//		保存的文件是这样的
//		[cn,2]
//		[us,1]
	}
}

/**
  * 利用 RDD[Row] 和 Schema 创建表表字段的RDD，进行查询过滤等
  * def createDataFrame(rowRDD: RDD[Row], schema: StructType): DataFrame
  */
object CreateSQLContext{
	def main(args: Array[String]): Unit = {
		val conf = new SparkConf().setAppName("CreateSQLContext").setMaster("local[6]").set("spark.sql.shuffle.partitions","1")
		val sc = new SparkContext(conf)
		val rdd: RDD[String] = sc.textFile("D:\\user\\zhangjian\\input\\json").repartition(1)
		/**
		  * 一行一行的读进来转换
		  */
		val rddRow: RDD[Row] = rdd.map(f => RowFactory.create(f,"new_"+f))
		/**
		  * DataTypes 是一个工具类，要获得/创建特定的数据类型，用户应该使用这个类提供的单身对象和工厂方法。
		  * 创建 tableSchema ，列名是“line”，字段可以为空
		  */
		val structField1: StructField = DataTypes.createStructField("line1",DataTypes.StringType,true)
		val structField2: StructField = DataTypes.createStructField("line2",DataTypes.StringType,true)
		val schema: StructType = DataTypes.createStructType(Array(structField1,structField2))
		/**
		  * 创建 DataFrame
		  */
		val sqlc = new SQLContext(sc)
		val df: Dataset[Row] = sqlc.createDataFrame(rddRow,schema)
		println("过滤前行数："+df.count())
		val ds: Dataset[Row] = df.filter(df.col("line1").like("%gameloft%"))
		ds.printSchema()
		ds.show()
	}
}
/**
  * def createDataFrame(rdd: RDD[_], beanClass: Class[_]): DataFrame
  */
object CreateSQLContext1{
	def main(args: Array[String]): Unit = {
		val conf = new SparkConf().setAppName("CreateSQLContext").setMaster("local[6]").set("spark.sql.shuffle.partitions","2")
		val sc = new SparkContext(conf)
		val rdd: RDD[String] = sc.textFile("D:\\user\\zhangjian\\input\\json_person").repartition(1)
		
		sc.broadcast()
		val personsRDD: RDD[Person] = rdd.map(lines => {
			val line: Array[String] = lines.split(":")
			new Person(line(0), line(1))
		})
//		val structField1: StructField = DataTypes.createStructField("person",DataTypes.StringType,true)
//		val structType: StructType = DataTypes.createStructType(Array(structField1))
		
		val sqlc = new SQLContext(sc)
		val df: DataFrame = sqlc.createDataFrame(personsRDD,classOf[Person])
		df.printSchema()
		val value: RDD[(String, Int)] = df.rdd.map(person => (person.toString(),1))
		val collect: collection.Map[String, Int] = value.collectAsMap()
		collect.map(lines => {println(lines._1+","+lines._2)})
		
	}
}

class Person(var name:String,var as:String) extends Serializable {
	def getName() : String = {
		name
	}
	def setName(name:String): Unit ={
		this.name = name
	}
	def getAs() : String = {
		as
	}
	def setAs(name:String): Unit ={
		this.name = name
	}
	override def toString: String = s"${name},${as}"
}

/**
  * SQLContext.read.orc
  */
object SQLContextOrc{
	def main(args: Array[String]): Unit = {
		val conf = new SparkConf().setMaster("local[*]").setAppName("SQLContextOrc").set("spark.sql.shuffle.partitions","1")
		val sc = new SparkContext(conf)
		val sqlc = new SQLContext(sc)
		val df: DataFrame = sqlc.read.orc("\\user\\zhangjian\\input\\orc")
		df.printSchema()
		df.show()
//		aid|             pkgname|    uptime|type|country|      gpcategory|
		df.select(df.col("country")).limit(100).show(5) // 打印前5条
		df.select(df.col("country").as("local")).show(6)
		// 这样转换之后是 DataFrame
		val count: DataFrame = df.groupBy(df.col("country")).count()
		count.printSchema()
		val num: DataFrame = count.select(count.col("country"),count.col("count").alias("num"))
		val limit: Dataset[Row] = num.filter(num.col("num").lt(3000)).limit(8)
		val persist: Dataset[Row] = limit.persist(StorageLevel.MEMORY_ONLY)
		
		persist.write.mode(SaveMode.Overwrite).format("orc").save("\\user\\zhangjian\\output\\sql_orc")
		persist.write.mode(SaveMode.Append).format("json").save("\\user\\zhangjian\\output\\sql_json")
		
	}
}

/**
  * HiveContext.read.orc
  * HiveContext.sql
  */
object SqlHiveOrc{
	def main(args: Array[String]): Unit = {
		val conf = new SparkConf().set("spark.sql.shuffle.partitions","1").setAppName("SqlHiveOrc").setMaster("local[*]")
		val sc = new SparkContext(conf)
		val hivec = new HiveContext(sc)
		val df: DataFrame = hivec.read.orc("\\user\\zhangjian\\input\\orc")
		df.createOrReplaceTempView("temp_table")
		/**
		  * 返回的必须是一个值
		  */
		val select: DataFrame = hivec.sql("select concat(a.country,\"\t\",a.n) as concatString from (select country , count(1) n from temp_table group by country ) a where a.n < 300 limit 10")
		select.show()
		select.printSchema()
		select.write.mode(SaveMode.Overwrite).format("text").save("\\user\\zhangjian\\output\\sql_orc_hive")
		
	}
}

/**
  * HiveContext 创建虚拟表，load 进去数据，进行各种查询操作
  */
object SqlHiveHql {
	def main(args: Array[String]): Unit = {
		val conf = new SparkConf().set("spark.sql.shuffle.partitions", "1").setAppName("SqlHiveOrc").setMaster("local[*]")
		val sc = new SparkContext(conf)
		val hivec = new HiveContext(sc)
		val orcPath = "file://///////D:/user/zhangjian/input/orc"
		val df: DataFrame = hivec.read.orc(orcPath)
//		hivec.sql("drop database if exists manager")
		hivec.sql("create database if not exists manager")
		hivec.sql("use manager")
		val createTableSql =
			"""
			  |create table my (
			  |`aid` string,
			  |`pkgname` string,
			  |`uptime` long,
			  |`type` int,
			  |`country` string,
			  |`gpcategory` string)
			  |stored as orc
			  |tblproperties('orc.compress'='snappy')
			""".stripMargin
		println(createTableSql)
		hivec.sql("drop table if exists my")
		hivec.sql(createTableSql)
		hivec.sql("load data local inpath \'"+orcPath+"\' into table my")
		val limit: DataFrame = hivec.sql("select * from my limit 50")
		limit.printSchema()
		limit.show()
		val select: DataFrame = hivec.sql("select concat(a.country,\"\t\",a.n) as concatString from (select country , count(1) n from my group by country ) a where a.n < 300 limit 10")
		select.show()
		select.printSchema()
		select.write.mode(SaveMode.Overwrite).format("text").save("\\user\\zhangjian\\output\\sql_orc_hql")
	}
}

/**
  * 连接 Mysql
  */
object SqlMysql{
	def main(args: Array[String]): Unit = {
		val conf = new SparkConf().set("spark.sql.shuffle.partitions", "1").setAppName("SqlHiveOrc").setMaster("local[*]")
		val sc = new SparkContext(conf)
		val sqlc = new SQLContext(sc)
		val df2: DataFrame = sqlc.jdbc("jdbc:mysql://192.168.88.195:3306/hainiutest?user=hainiu&password=12345678","yingxiong")
		val df1: DataFrame = sqlc.jdbc("jdbc:mysql://127.0.1:3306/hainiu?user=root&password=123456","wanjia")
		df1.createOrReplaceTempView("wanjia")
		df2.createOrReplaceTempView("yingxiong")
		val select: DataFrame = sqlc.sql("select * from wanjia w inner join yingxiong y on w.w_age = y.age")
		select.show()
		select.printSchema()
	}
}

/**
  * 用 SparkSession 连接 Mysql ，创建临时表，查询等操作
  */
object SqlSparkSession{
	def main(args: Array[String]): Unit = {
		val conf = new SparkConf().setMaster("local[*]")
		val session: SparkSession = SparkSession.builder().config(conf).appName("SqlSparkSession").getOrCreate()
		val df: DataFrame = session.read.format("jdbc")
				.option("driver", classOf[Driver].getName)
				.option("url", "jdbc:mysql://127.0.1:3306/hainiu")
				.option("dbtable", "wanjia")
				.option("user", "root")
				.option("password", "123456")
				.load()
		df.createOrReplaceTempView("temp_table")
		val select: DataFrame = session.sql("select * from temp_table")
		select.show()
		select.printSchema()
	}
}
