package com.hw.spark.service;

import java.io.Serializable;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.scheduler.JobResult;
import org.apache.spark.scheduler.SparkListener;
import org.apache.spark.scheduler.SparkListenerApplicationEnd;
import org.apache.spark.scheduler.SparkListenerApplicationStart;
import org.apache.spark.scheduler.SparkListenerJobEnd;
import org.apache.spark.scheduler.SparkListenerJobStart;
import org.apache.spark.scheduler.SparkListenerTaskEnd;
import org.apache.spark.scheduler.SparkListenerTaskStart;
import org.apache.spark.sql.DataFrameReader;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoder;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Service;

import com.hw.spark.model.User;

import scala.Tuple2;

@Service
public class SparkSqlService implements Serializable {

	private static Logger log=LoggerFactory.getLogger(SparkSqlService.class);
	
	public void sqlFromText() {
//		创建sparksql的会话
		SparkSession spark = SparkSession.builder().master("local[4]").appName("spark-sql-text").getOrCreate();

		JavaRDD<String> lines = spark.read().textFile("D:\\wordcount.txt").javaRDD();

		JavaRDD<String> words = lines.flatMap(s -> Arrays.asList(s.split("|")).iterator());

		JavaPairRDD<String, Integer> ones = words.mapToPair(s -> new Tuple2<>(s, 1));

		JavaPairRDD<String, Integer> counts = ones.reduceByKey((i1, i2) -> i1 + i2);

		log.info("demo3---------------------------------------------");
		List<Tuple2<String, Integer>> output = counts.collect();
		for (Tuple2<?, ?> tuple : output) {
			log.info(tuple._1() + ": " + tuple._2());
		}
		spark.stop();
	}

	public void sqlFromJson() {
		// SparkConf sf=new
		// SparkConf().setMaster("local[4]").setAppName("spark-sql-json");
		// JavaSparkContext jsc=new JavaSparkContext(sf);
		// SQLContext sc=new SQLContext(jsc);
		// 2.0以后的版本推荐用以下方式
		SparkSession sparkSession = SparkSession.builder().master("local[4]").appName("spark-sql-json")
				.config("spark.sql.shuffle.partitions", 1).getOrCreate();
		// 从sparkSession中得到JavaSparkContext
		// JavaSparkContext sc =
		// JavaSparkContext.fromSparkContext(sparkSession.sparkContext());
		Dataset<Row> nameInfo = sparkSession.read().json("F:\\name.json");
		Dataset<Row> scoreInfo = sparkSession.read().json("F:\\score.json");

		nameInfo.show();
		scoreInfo.show();

		// nameInfo.registerTempTable("t_name");
		// scoreInfo.registerTempTable("t_score");
		nameInfo.createOrReplaceTempView("T_name");
		scoreInfo.createOrReplaceTempView("T_score");

		Dataset<Row> result = sparkSession.sql(
				"select t_name.name,t_name.age,t_score.score " + "from t_name,t_score where t_name.name=t_score.name");

		result.show();

		sparkSession.close();
		sparkSession.stop();
	}

	public void sqlFromDb() {
		SparkSession sparkSession = SparkSession.builder()
				.master("local[4]")
				.appName("spark-sql-jdbc")
//				增加监听器
				.config("spark.extraListeners", "com.hw.spark.listener.SparkSQLListener")
				.getOrCreate();
		
		String conUrl = "jdbc:mysql://127.0.0.1:3306/test?serverTimezone=UTC";
		String dbTable = "admin_user";
		
		Map conMap = new HashMap<String, String>();
		conMap.put("user", "root");
		conMap.put("password", "root");
		conMap.put("driver", "com.mysql.jdbc.Driver");

		DataFrameReader dfr = sparkSession.read().format("jdbc").option("url", conUrl).option("dbtable", dbTable)
				.options(conMap);

		Dataset<Row> users = dfr.load();
		users.schema();
		users.show();
//		users.drop("pwd");
//		users.show();
		users.createOrReplaceTempView("user");
		Dataset<Row> one = sparkSession.sql(""
				+ "select user.id,user.name,user.addr,user.email "
				+ "from user where user.name='何伟'");
		one.write().csv("F:\\user.csv");
		
//		定义类型解码器
		Encoder<User> uEncoder=Encoders.bean(User.class);
//		类型数据转换
		Dataset<User> userSet=users.map(new MapFunction<Row,User>(){
			@Override
			public User call(Row r) throws Exception {
				User user =new User();
				user.setId(10);
				user.setName(r.getString(4));
				user.setAddr(r.getString(0));
//				org.apache.commons.httpclient.util.DateUtil
				user.setCreateTime(r.getString(1));
				user.setEmail(r.getString(2));
				user.setPhone(r.getString(5));
				user.setPwd(r.getString(6));
				return user;
			}
		},uEncoder);
		
		List<User> userList=userSet.toJavaRDD().collect();
		userList.forEach(u->log.info(u.toString()));
		
		
//		批量插入
		/*
		Dataset<Row> newUser=sparkSession.read().json("F:\\user.json");
		newUser.schema();
		newUser.show();
		Properties pro=new Properties();
		pro.setProperty("url", conUrl);
		pro.setProperty("dbtable", dbTable);
		pro.setProperty("user", "root");
		pro.setProperty("password", "root");
		pro.setProperty("driver", "com.mysql.jdbc.Driver");
//		String saveMode = "Overwrite";
		newUser.write().mode(SaveMode.Append).jdbc(conUrl, dbTable, pro);
		*/
		
		sparkSession.close();
		sparkSession.stop();
	}
	
}
