package com.song.sparkstudy;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;

public class JavaJsonTest {
	
	
	public static void main(String[] args) {
		SparkConf conf = new SparkConf().setAppName("hdfssave");
		JavaSparkContext context = new JavaSparkContext(conf);
		List<String> lines = new ArrayList<String>() {
			{
				add("key,1,23");
				add("key,2,23");
			}
		};
		JavaRDD<String> rddString = context.parallelize(lines);
		JavaRDD<String> flatMapRdd = rddString.flatMap(new FlatMapFunction<String, String>() {
			@Override
			public Iterable<String> call(String t) throws Exception {
				return Arrays.asList(t.split(","));
			}
		});

		for (String t : flatMapRdd.collect()) {
			System.out.print(" " + t + " ");
		}
		
		List<String> nlines = new ArrayList<String>() {
			{
				add("key	1	23");
				add("key	2	23");
				add("key	4	23");
				add("key	7	23");
			}
		};
		
		JavaRDD<String> trdd = context.parallelize(nlines);
		JavaRDD<String> tokrdd = trdd.flatMap(new FlatMapFunction<String, String>() {

			@Override
			public Iterable<String> call(String str) throws Exception {
				String key = str.split("\t")[0]+"_"+str.split("\t")[1];
				return  Arrays.asList(key);
			}
			
		});
		for (String t : tokrdd.collect()) {
			System.out.print(" " + t + " ");
		}
		
		/**
		 *   spark 是按行加载 hdfs 或者是 本地的文件到 rdd 所以 rdd 的数据是按行的。
		 */
		JavaRDD<String> hdfsstr = context.textFile("hdfs://song-dinfo:9000/sparkdata");
		for(String t :hdfsstr.collect())
		{
			System.out.println("  hdfs str is :" +t);
		}
		
		context.close();
	}
	
}
