package com.hw.spark.service;

import java.io.Serializable;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Pattern;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.PairFunction;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

import scala.Tuple2;

@Service
public class SparkDemo implements Serializable {

	private transient SparkConf sf;
	private static final Pattern SPACE = Pattern.compile("|");
	@Autowired
	// 当类的某个字段被 transient 修饰，默认序列化机制就会忽略该字段
	private transient JavaSparkContext jsc;

	public SparkDemo() {
		super();
//		this.sf = new SparkConf().setMaster("local[4]").setAppName("sparkDemo");
//		this.jsc = new JavaSparkContext(this.sf);
	}

	public void demo() {
		JavaRDD<Integer> jrint = jsc.parallelize(Arrays.asList(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), 2);
		Integer sum = jrint.reduce((a, b) -> a + b);
		System.out.println("0,1,2,3,4,5,6,7,8,9 之和等于：" + sum);
	}

	public void demo1() {
		JavaRDD<Integer> jrint = jsc.parallelize(Arrays.asList(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), 2);
		// 转换操作
		JavaPairRDD<String, Integer> si = jrint.mapToPair(new PairFunction<Integer, String, Integer>() {
			@Override
			public Tuple2<String, Integer> call(Integer t) throws Exception {
				switch (t) {
				case 1:
					return new Tuple2("一", 1);
				case 2:
					return new Tuple2("二", 2);
				case 3:
					return new Tuple2("三", 3);
				case 4:
					return new Tuple2("四", 4);
				case 5:
					return new Tuple2("五", 5);
				case 6:
					return new Tuple2("六", 6);
				case 7:
					return new Tuple2("七", 7);
				case 8:
					return new Tuple2("八", 8);
				case 9:
					return new Tuple2("九", 9);
				default:
					return new Tuple2("零", 0);
				}
			}

		});
		List<Tuple2<String, Integer>> pairlist = si.collect();
		for (Tuple2 t : pairlist) {
			System.out.println(t._1.toString() + "" + t._2.toString());
		}
	}

	public void demo2() {
		// 1.读取文件
		// JavaRDD<String>
		// frd=jsc.textFile("hdfs://127.0.0.1:9000/hw/wordcount.txt", 1);
		JavaRDD<String> frd = jsc.textFile("D:\\wordcount.txt", 1);
		// 2.文件分割，转换成词语列表.flatMap和map的区别是flatMap一次返回多个值(列表，数组等)，而map一次返回一个值

		frd.foreach(f->System.out.println(f));
		JavaRDD<String> words=frd.flatMap(f->Arrays.asList(f.split("|")).iterator());

		// 3.单词转换，返回成对的<String,Integer>类型数据
		JavaPairRDD<String, Integer> wordPairs = words.mapToPair(new PairFunction<String, String, Integer>() {
			public Tuple2<String, Integer> call(String word) {
				System.out.println(word);
				return new Tuple2<String, Integer>(word, 1);
			}
		});

//		 wordPairs.persist(StorageLevel.MEMORY_ONLY());
		// 4.对单词数进行统计,Function2表示有两个参数的函数。返回值类型都在spark函数参数列表的最后
		JavaPairRDD<String, Integer> wordcount = wordPairs.reduceByKey((c1, c2) -> c1 + c2);

		// 5.获取统计结果
		System.out.println("demo2---------------------------------------------");
		List<Tuple2<String, Integer>> wordcounts = wordcount.collect();
		for (Tuple2 w : wordcounts) {
			System.out.println(w._1.toString() + " " + w._2.toString());
		}

		wordcount.saveAsTextFile("F:\\wordout.txt");
		// wordcount.saveAsTextFile("hdfs://127.0.0.1:9000/hw/wordout.txt");

	}

}
