package cn.lsh.spark.sql;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.*;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;

import java.io.Serializable;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;

public class SqlTest {

	public static void main(String[] args) throws AnalysisException {
		readHive();

		/*SparkConf conf = new SparkConf();
		// conf.setMaster("spark://node02:7077").setAppName("SQL TEST");
		conf.setMaster("local").setAppName("SQL TEST");
		//设置spark任务分区数，默认为200
		// conf.set("spark.sql.shuffle.partitions", "1");

		JavaSparkContext jsc = new JavaSparkContext(conf);
		SQLContext sqlContext = new SQLContext(jsc);

		// testJson(sqlContext);
		// reflectCreateDataFrame(sqlContext, jsc);
		// dynamicCreateDataFrame(sqlContext, jsc);
		// crateParquet(sqlContext);
		// readParquet(sqlContext);
		// readMySql(sqlContext);
		// createMySql(sqlContext);

		jsc.stop();*/
	}

	public static void testJson(SQLContext sqlContext) throws AnalysisException {
		//sqlContext读取json文件加载Dataset，dataset中的列会按照列的ASCII码排序
		Dataset<Row> dataset = sqlContext.read().format("json").load("file:/bigdata/hadoop-test/input/spark/user_json.txt");
		// Dataset<Row> dataset = sqlContext.read().json("file:/bigdata/hadoop-test/input/spark/user_json.txt");
		//展示表结构和表数据，默认显示前20行
		dataset.show();
		//展示schema
		dataset.printSchema();

		Dataset<Row> id111 = dataset.select("id", "name", "sex", "age")
				.where(dataset.col("id").equalTo("1111"));
		// .where("id = 1111");
		id111.show();

		//将Dataset注册成临时表，这个表不存在内存也不存在磁盘，只是相当于一个指针，指向注册的Dataset
		dataset.createTempView("users");
		//sql加载出来DataFrame列不会按照ASCII码排序
		Dataset<Row> sql = sqlContext.sql("select * from users where id = 1111");
		sql.show();

		//Dataset转换成javaRDD
		JavaRDD<Row> rowJavaRDD = dataset.javaRDD();
		rowJavaRDD.foreach(row -> row.getAs("name"));
	}

	public static void reflectCreateDataFrame(SQLContext sqlContext, JavaSparkContext jsc) {
		JavaRDD<String> lines = jsc.textFile("file:/bigdata/hadoop-test/input/spark/user.txt");
		User user = new User();
		user.setPassword("123456");
		user.setWork("moyu");
		JavaRDD<User> userRdd = lines.map(l -> {
			String[] fields = l.split(",");
			if (fields.length > 3) {
				user.setAge(Integer.parseInt(fields[3]));
			} else {
				user.setAge(0);
			}
			if (fields.length > 2) {
				user.setSex(Integer.parseInt(fields[2]));
			} else {
				user.setSex(0);
			}
			if (fields.length > 1) {
				user.setName(fields[1]);
			} else {
				user.setName("");
			}
			user.setId(Integer.parseInt(fields[0]));
			return user;
		});
		//通过反射实体类的属性创建DataFrame
		Dataset<Row> dataFrame = sqlContext.createDataFrame(userRdd, User.class);
		dataFrame.show();
		dataFrame.printSchema();
	}

	public static void dynamicCreateDataFrame(SQLContext sqlContext, JavaSparkContext jsc) {
		JavaRDD<String> lines = jsc.textFile("file:/bigdata/hadoop-test/input/spark/user.txt");
		JavaRDD<Row> rowRdd = lines.map(l -> {
			String[] fields = l.split(",");
			String name = "";
			int sex = 0, age = 0;
			if (fields.length > 1) {
				name = fields[1];
			}
			if (fields.length > 2) {
				sex = Integer.parseInt(fields[2]);
			}
			if (fields.length > 3) {
				age = Integer.parseInt(fields[3]);
			}
			return RowFactory.create(Integer.parseInt(fields[0]), name, sex, age);
		});
		List<StructField> fields = Arrays.asList(
				//DataFrame按照StructType定义的列顺序显示
				DataTypes.createStructField("id", DataTypes.IntegerType, false),
				DataTypes.createStructField("name", DataTypes.StringType, true),
				DataTypes.createStructField("sex", DataTypes.IntegerType, true),
				DataTypes.createStructField("age", DataTypes.IntegerType, true)
		);
		StructType structType = DataTypes.createStructType(fields);
		Dataset<Row> dataFrame = sqlContext.createDataFrame(rowRdd, structType);
		dataFrame.show();
		dataFrame.printSchema();

	}

	public static void crateParquet(SQLContext sqlContext) {
		Dataset<Row> dataset = sqlContext.read().format("json").load("file:/bigdata/hadoop-test/input/spark/user_json.txt");
		//两种方式都可以
		dataset.write().mode(SaveMode.Overwrite).format("parquet").save("file:/bigdata/hadoop-test/input/spark/user_json_parquet");
		// dataset.write().mode(SaveMode.Overwrite).parquet("file:/bigdata/hadoop-test/input/spark/user_json_parquet");
	}

	public static void readParquet(SQLContext sqlContext) {
		Dataset<Row> dataset = sqlContext.read().parquet("file:/bigdata/hadoop-test/input/spark/user_json_parquet");
		dataset.show();
		dataset.printSchema();
	}

	public static void readMySql(SQLContext sqlContext) {
		System.out.println("**********第一种方式*********");
		String url = "jdbc:mysql://127.0.0.1:3306/test";
		String table = "user";
		Properties properties = new Properties();
		properties.setProperty("user", "root");
		properties.setProperty("password", "admin");
		properties.setProperty("driver", "com.mysql.jdbc.Driver");
		Dataset<Row> dataset = sqlContext.read().format("json").load("file:/bigdata/hadoop-test/input/spark/user_json.txt");

		Dataset<Row> jdbc = sqlContext.read().jdbc(url, table, properties);
		/*System.out.println("**********第二种方式*********");
		Map<String, String> options = new HashMap<>();
		options.put("driver", "com.mysql.jdbc.Driver");
		options.put("url", "jdbc:mysql://localhost:3306/test");
		options.put("user", "root");
		options.put("password", "admin");
		options.put("dbtable", "user");
		Dataset<Row> jdbc = sqlContext.read().format("jdbc").options(options).load();*/

		/*System.out.println("**********第三种方式*********");
		DataFrameReader jdbcReader = sqlContext.read().format("jdbc");
		jdbcReader.option("driver", "com.mysql.jdbc.Driver")
				.option("url", "jdbc:mysql://localhost:3306/test")
				.option("user", "root")
				.option("password", "admin")
				.option("dbtable", "user");
		Dataset<Row> jdbc = jdbcReader.load();*/
		jdbc.show(10);
		jdbc.printSchema();
	}

	public static void createMySql(SQLContext sqlContext) {
		String url = "jdbc:mysql://127.0.0.1:3306/test";
		String table = "person";
		Properties properties = new Properties();
		properties.setProperty("user", "root");
		properties.setProperty("password", "admin");
		properties.setProperty("driver", "com.mysql.jdbc.Driver");
		Dataset<Row> dataset = sqlContext.read().format("json").load("file:/bigdata/hadoop-test/input/spark/user_json.txt");
		//向mysql表中追加数据，如果表不存在会创建表
		dataset.write().mode(SaveMode.Append).jdbc(url, table, properties);
	}

	/**
	 * 能连接到，但是读取不到数据
	 *
	 * @param sqlContext
	 */
	public static void jdbcReadHive(SQLContext sqlContext) {
		DataFrameReader hiveReader = sqlContext.read().format("jdbc");
		hiveReader.option("driver", "org.apache.hive.jdbc.HiveDriver")
				.option("url", "jdbc:hive2://node00:10000/test")
				.option("user", "root")
				.option("password", "11111")
				.option("dbtable", "spark_test");
		Dataset<Row> jdbc = hiveReader.load();
		jdbc.show(10);
		jdbc.printSchema();
	}

	public static void readHive() {
		SparkConf conf = new SparkConf()
				.setMaster("local")
				//设置metastore服务器连接地址，或者再hive-site.xml里面配置也可以，不然SparkSession是在本地新起了一个hive matestore连接
				.set("hive.metastore.uris", "thrift://node00:9083")
				// .set("hive.metastore.uris", "jdbc:hive2://node00:10000/test")
				.setAppName("SQL TEST");
		SparkSession sparkSession = SparkSession.builder().config(conf)
				//开启spark支持外部hive
				.enableHiveSupport()
				//在这里配置也可以
				//.config("hive.metastore.uris", "thrift://node00:9083")
				.getOrCreate();
		// sparkSession.sql("use test");
		// Dataset<Row> users = sparkSession.sql("select id, name, sex, age, address from users");
		// Dataset<Row> users = sparkSession.table("users");
		Dataset<Row> users = sparkSession.sql("show databases");

		/*JavaSparkContext jsc = new JavaSparkContext(conf);
		HiveContext hc = new HiveContext(jsc);
		Dataset<Row> users = hc.sql("show databases");*/
		// hc.sql("use test");
		// Dataset<Row> users = hc.sql("select id, name, sex, age, address from users");
		users.show();
		users.printSchema();
	}

	/**
	 * 定义的类如果需要再节点间传输，需要实现序列化接口，即当对象在Driver端创建，并在Executor端被引用。
	 * 有以下几种情况不能被序列化：
	 * 1、被transient关键字修饰的变量不能被序列化（不能序列化是指值没有，但是字段还是能反射出来）
	 * 2、静态变量属于类，不属于对象，也不能被序列化
	 * 3、反序列化时serializable版本号不一样时会导致不能序列化
	 * 4、子类实现序列化接口，父类没有实现序列化接口，父类的字段不能序列化。相反，子类没有实现序列化接口，父类实现了序列化接口，子类可以序列化
	 */
	public static class User implements Serializable {
		private int id;

		private String name;

		private int sex;

		private int age;

		private transient String password;

		private static String work;

		public String getWork() {
			return work;
		}

		public void setWork(String work) {
			this.work = work;
		}

		public String getPassword() {
			return password;
		}

		public void setPassword(String password) {
			this.password = password;
		}

		public int getId() {
			return id;
		}

		public void setId(int id) {
			this.id = id;
		}

		public String getName() {
			return name;
		}

		public void setName(String name) {
			this.name = name;
		}

		public int getSex() {
			return sex;
		}

		public void setSex(int sex) {
			this.sex = sex;
		}

		public int getAge() {
			return age;
		}

		public void setAge(int age) {
			this.age = age;
		}

		@Override
		public String toString() {
			return "User{" +
					"id=" + id +
					", name='" + name + '\'' +
					", sex=" + sex +
					", age=" + age +
					'}';
		}
	}
}
