package com.mango.ch04;

import java.io.IOException;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;

import org.apache.hadoop.fs.Path;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.PairFlatMapFunction;
import org.apache.spark.api.java.function.PairFunction;

import com.mango.HDFSTools.HDFSAPI;

import scala.Tuple2;

/**
 * 使用传统Spark方式实现左外连接
 * 
 * @author Mango
 *
 */
public class Spark_Left_Out_Join_Job {
	static Path transactions = new Path("/MR_Left_Out_Join_Job/input/transactions.txt");// input
	static Path locations = new Path("/MR_Left_Out_Join_Job/input/users.txt"); // input
	static Path outPath = new Path("/MR_Left_Out_Join_Job/spark_output");
	static Path inputPath = new Path("/MR_Left_Out_Join_Job/input");

	@SuppressWarnings("serial")
	public static void main(String[] args) {
		// 创建输入文件夹
		try {
			HDFSAPI hdfs = new HDFSAPI();
			hdfs.createDirectory(inputPath);
			// 先删除已经有的输出文件夹
			hdfs.delDirectory(outPath);
			// hdfs.orpOver();
		} catch (IOException e1) {
			// TODO Auto-generated catch block
			e1.printStackTrace();
			System.out.println("----------文件操作失败");
		} catch (InterruptedException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (URISyntaxException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		JavaSparkContext jsc = new JavaSparkContext();
		// 分别创建users和locations的rdd文件
		JavaRDD<String> location_lines = jsc.textFile(locations.toString(), 1);
		JavaRDD<String> product_lines = jsc.textFile(transactions.toString(), 1);
		// 然后分别将连个文件 按业务需求创建出键值对
		JavaPairRDD<String, Tuple2<String, String>> users = location_lines
				.mapToPair(new PairFunction<String, String, Tuple2<String, String>>() {

					@Override
					public Tuple2<String, Tuple2<String, String>> call(String t) throws Exception {
						// TODO Auto-generated method stub
						String[] tokens = t.split(",");
						Tuple2<String, String> location = new Tuple2<String, String>("L", tokens[1]);
						// 输出 [userID,("L", locationTD)]
						return new Tuple2<String, Tuple2<String, String>>(tokens[0], location);
					}
				});
		JavaPairRDD<String, Tuple2<String, String>> products = product_lines
				.mapToPair(new PairFunction<String, String, Tuple2<String, String>>() {

					@Override
					public Tuple2<String, Tuple2<String, String>> call(String t) throws Exception {
						// TODO Auto-generated method stub
						String[] tokens = t.split(",");
						Tuple2<String, String> product = new Tuple2<String, String>("P", tokens[1]);
						// [userID,("P", productTD)]
						return new Tuple2<String, Tuple2<String, String>>(tokens[2], product);
					}
				});
		users.saveAsTextFile(outPath.toString() + "/1");
		products.saveAsTextFile(outPath.toString() + "/2");
		// 将两个pairs 求并集 将两个rdd的数据合起来 然后按照key分组规约数据
		JavaPairRDD<String, Tuple2<String, String>> allPairs = users.union(products);
		allPairs.saveAsTextFile(outPath.toString() + "/3");
		// 然后按照相同key分组 注意值是iterable类型的组合型数据
		JavaPairRDD<String, Iterable<Tuple2<String, String>>> groupRDD = allPairs.groupByKey();
		groupRDD.saveAsTextFile(outPath.toString() + "/4");
		// 创建 我们需要的productLocationsRDD值对
		JavaPairRDD<String, String> productLocationsRDD = groupRDD.flatMapToPair(
				new PairFlatMapFunction<Tuple2<String, Iterable<Tuple2<String, String>>>, String, String>() {

					@Override
					public Iterator<Tuple2<String, String>> call(Tuple2<String, Iterable<Tuple2<String, String>>> t)
							throws Exception {
						// TODO Auto-generated method stub
						// String userID=t._1 不需要
						Iterable<Tuple2<String, String>> pairs = t._2;
						String location = "undefined";
						List<String> products = new ArrayList<>();
						for (Tuple2<String, String> tup : pairs) {
							if (tup._1.equals("L")) {
								location = tup._2;
							} else {
								// 标明是（"P",*) 键值对
								products.add(tup._2);
							}
						}
						// 发出键值对
						List<Tuple2<String, String>> kvList = new ArrayList<>();
						for (String product : products) {
							// (products ,lcaotion)
							Tuple2<String, String> tmpTup = new Tuple2<String, String>(product, location);
							kvList.add(tmpTup);
						}
						return kvList.iterator();
					}
				});
		productLocationsRDD.saveAsTextFile(outPath.toString() + "/5");
		// 分组 合并重复键的值 计数
		JavaPairRDD<String, Iterable<String>> productByLocations = productLocationsRDD.groupByKey();
		productByLocations.saveAsTextFile(outPath.toString() + "/6");
		JavaPairRDD<String, Tuple2<Set<String>, Integer>> plUnique = productByLocations
				.mapValues(new Function<Iterable<String>, Tuple2<Set<String>, Integer>>() {

					@Override
					public Tuple2<Set<String>, Integer> call(Iterable<String> v1) throws Exception {
						// TODO Auto-generated method stub
						Set<String> uniqueLocations = new HashSet<>();
						for (String location : v1) {
							uniqueLocations.add(location);
						}
						/**
						 * 返回的相当于 (productID ,((地址列表),地指数))
						 */
						return new Tuple2<Set<String>, Integer>(uniqueLocations, uniqueLocations.size());
					}
				});
		plUnique.saveAsTextFile(outPath.toString() + "/7");
		// 输出结果
		System.out.println("-----------the result--------");
		List<Tuple2<String, Tuple2<Set<String>, Integer>>> result = plUnique.collect();
		for (Tuple2<String, Tuple2<Set<String>, Integer>> t : result) {
			String produtID = t._1;
			System.out.println("产品ID： " + produtID + "对应的地址信息：" + t._2.toString());
		}
		jsc.close();
		System.exit(0);
	}
}
