package com.mango.ch04;

import java.io.IOException;
import java.net.URISyntaxException;
import java.util.HashSet;
import java.util.List;
import java.util.Set;

import org.apache.hadoop.fs.Path;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.Optional;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.PairFunction;

import com.mango.HDFSTools.HDFSAPI;

import scala.Tuple2;

/**
 * 使用spark的高层API实现 左外连接
 * 
 * @author Mango
 *
 *         2018年8月3日-下午2:40:15
 */
public class Org_Spark_Left_Out_Join_Job {
	static Path transactions = new Path("/MR_Left_Out_Join_Job/input/transactions.txt");// input
	static Path locations = new Path("/MR_Left_Out_Join_Job/input/users.txt"); // input
	static Path outPath = new Path("/MR_Left_Out_Join_Job/ori_spark_output");
	static Path inputPath = new Path("/MR_Left_Out_Join_Job/input");

	@SuppressWarnings("serial")
	public static void main(String[] args) {
		// 创建输入文件夹
		try {
			HDFSAPI hdfs = new HDFSAPI();
			hdfs.createDirectory(inputPath);
			// 先删除已经有的输出文件夹
			hdfs.delDirectory(outPath);
			// hdfs.orpOver();
		} catch (IOException e1) {
			// TODO Auto-generated catch block
			e1.printStackTrace();
			System.out.println("----------文件操作失败");
		} catch (InterruptedException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (URISyntaxException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}

		JavaSparkContext jsc = new JavaSparkContext();
		JavaRDD<String> location_lines = jsc.textFile(locations.toString(), 1);
		JavaRDD<String> product_lines = jsc.textFile(transactions.toString(), 1);
		JavaPairRDD<String, String> usersRDD = location_lines.mapToPair(new PairFunction<String, String, String>() {

			@Override
			public Tuple2<String, String> call(String t) throws Exception {
				// TODO Auto-generated method stub
				String[] tokens = t.split(",");
				// userid locationid
				return new Tuple2<String, String>(tokens[0], tokens[1]);
			}
		});
		JavaPairRDD<String, String> productsRDD = product_lines.mapToPair(new PairFunction<String, String, String>() {

			@Override
			public Tuple2<String, String> call(String t) throws Exception {
				// TODO Auto-generated method stub
				String[] tokens = t.split(",");
				String product = tokens[1];
				return new Tuple2<String, String>(tokens[2], product);
			}
		});
		// 使用spark内置外连接方法
		/**
		 * 返回的类型是 指定的共同的(key ,(然后是左表中的对应值，有表中对应值的集合))
		 * 
		 * 注意，一定小心左表中的字段在右表中没有的话，则是空， 在选择谁作为左表时要慎重考虑，若A表中包含B表所有相应字段，则最好是将A作为右表
		 */
		JavaPairRDD<String, Tuple2<String, Optional<String>>> joined = usersRDD.leftOuterJoin(productsRDD);
		// 输出临时数据
		joined.saveAsTextFile(outPath.toString() + "/1");
		// 按需求重新分割数据 (product,location)
		JavaPairRDD<String, String> productLocation = joined
				.mapToPair(new PairFunction<Tuple2<String, Tuple2<String, Optional<String>>>, String, String>() {

					@Override
					public Tuple2<String, String> call(Tuple2<String, Tuple2<String, Optional<String>>> t)
							throws Exception {
						// TODO Auto-generated method stub
						String product = t._2._1;
						// (product,location)
						return new Tuple2<String, String>(product, t._2._2.get());
					}
				});
		// 然后按照相同key分组
		JavaPairRDD<String, Iterable<String>> groupRDD = productLocation.groupByKey();
		groupRDD.saveAsTextFile(outPath.toString() + "/2");
		// 整理数据 ，创建最终输出
		JavaPairRDD<String, Tuple2<Set<String>, Integer>> productLocationUnique = groupRDD
				.mapValues(new Function<Iterable<String>, Tuple2<Set<String>, Integer>>() {

					@Override
					public Tuple2<Set<String>, Integer> call(Iterable<String> v1) throws Exception {
						// TODO Auto-generated method stub
						Set<String> locations = new HashSet<>();
						for (String location : v1) {
							locations.add(location);
						}
						return new Tuple2<Set<String>, Integer>(locations, locations.size());
					}
				});
		// 输出结果
		productLocationUnique.saveAsTextFile(outPath.toString() + "/3");
		List<Tuple2<String, Tuple2<Set<String>, Integer>>> results = productLocationUnique.collect();
		System.out.println("-----------the result--------");
		for (Tuple2<String, Tuple2<Set<String>, Integer>> t : results) {
			String produtID = t._1;
			System.out.println("产品ID： " + produtID + "  对应的地址信息：" + t._2.toString() + "  地址数量  " + t._2._2);
		}
		jsc.close();
		System.exit(0);
	}
}
