package com.hw.spark.service;

import java.io.Serializable;
import java.util.Iterator;
import java.util.List;

import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.storage.StorageLevel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

import com.huaban.analysis.jieba.JiebaSegmenter;
import com.huaban.analysis.jieba.JiebaSegmenter.SegMode;
import com.huaban.analysis.jieba.SegToken;
import com.hw.spark.model.WordCount;

import scala.Tuple2;

@Service
public class SparkService implements Serializable {

	@Autowired
	// 当类的某个字段被 transient 修饰，默认序列化机制就会忽略该字段
	private transient JavaSparkContext javaSparkContext;
	/*
	由于spark的转换函数都是由代理类懒加载执行的，只有在进行计算操作的时候才会执行。所以在
	spark的转换函数中使用本类实例的注入全局变量会报空指针的错误。
	解决的方法是1.将变量声明为静态的类变量，或者声明为spark转换函数Function的内部变量。
	@Autowired
	private transient JiebaSegmenter jbs;
	*/
	private static JiebaSegmenter jbs=new JiebaSegmenter();
	private static Logger log = LoggerFactory.getLogger(SparkService.class);

	public List<WordCount> doWordCount() {

//		1.获取本地文件 生成javaRDD,每次读取1行
//		JavaRDD<String> file=javaSparkContext.textFile("hdfs://127.0.0.1:9000/hw/wordcount.txt", 1);
		JavaRDD<String> file = javaSparkContext.textFile("D:\\wordcount.txt", 1);
//		2.对每行进行分词处理
		JavaRDD<SegToken> words = file.flatMap(new FlatMapFunction<String,SegToken>() {
			public Iterator<SegToken> call(String line) throws Exception {
//				JiebaSegmenter jbs=new JiebaSegmenter();
				Iterator<SegToken> segIterRator=jbs.process(line,SegMode.SEARCH).iterator();
//				第一次的iterator进行遍历完毕.当前指针已经指向了集合的最后.此迭代器相当于为空。
//				再次使用hasnext会直接返回false.所以如果想再次遍历或使用,要重新获取迭代器对象.
//				while(segIterRator.hasNext()){
//					log.info("分词："+segIterRator.next().word);
//				}
				return segIterRator ;
			}

		});
		words.cache();
//		words.persist(StorageLevel.MEMORY_ONLY());
//		words.unpersist();
//		3.将数据包装成键值对
		JavaPairRDD<String, Long> wordpair = words.mapToPair(
//				v->new Tuple2<String, Long>(v.word, 1L)
				// 返回成对数据要使用PairFunction
				new PairFunction<SegToken, String, Long>() {
					@Override
					public Tuple2<String, Long> call(SegToken v) throws Exception {
						log.info("转换："+v.word);
						return new Tuple2<String, Long>(v.word, 1L);
					}

				}
		);
//		4.根据key进行统计运算
		JavaPairRDD<String, Long> wordcount = wordpair.reduceByKey((count1, count2) -> count1 + count2);
		wordcount.cache();
//		wordcount.persist(StorageLevel.MEMORY_ONLY());
//		wordcount.saveAsTextFile("hdfs://127.0.0.1:9000/hw/wordCountOut");
//		5.将结果转换为 WordCount对象
		JavaRDD<WordCount> wordcounts = wordcount.map((tuple2) -> new WordCount(tuple2._1, tuple2._2));
//		wordcounts.unpersist();
		wordcounts.foreach(w->System.out.println(w.getWord()+":"+w.getCount()));
//		存储信息
//		wordcounts.saveAsObjectFile("F:\\wordCountOut");
		List<WordCount> result = wordcounts.collect();
		wordcounts.saveAsTextFile("F:\\wordCountOut");
		return result;
	}

}
