package spark;

import com.google.common.collect.Lists;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.mllib.feature.HashingTF;

import java.util.ArrayList;
import java.util.Iterator;

/**
 * 作者: LDL
 * 功能说明:
 * 创建日期: 2015/7/6 13:30
 */
public class SparkTfIdf {
    public static void main(String[] args) {
        JavaSparkContext sc = new JavaSparkContext("local", "JavaTfIdfSuite");
        HashingTF tf = new HashingTF();
        @SuppressWarnings("unchecked")
        JavaRDD<ArrayList<String>> documents = sc.parallelize(Lists.newArrayList(
                Lists.newArrayList("this is a sentence".split(" ")),
                Lists.newArrayList("this is another sentence".split(" ")),
                Lists.newArrayList("this is still a sentence".split(" "))), 2);
        Iterator<ArrayList<String>> d = documents.toLocalIterator();

        while (d.hasNext()){
            for(String s : d.next()){
                System.out.println(s);
            }
        }
        /*JavaRDD<Vector> termFreqs = tf.transform(documents);
        termFreqs.collect();
        IDF idf = new IDF();
        JavaRDD<Vector> tfIdfs = idf.fit(termFreqs).transform(termFreqs);
        List<Vector> localTfIdfs = tfIdfs.collect();
        int indexOfThis = tf.indexOf("this");
        for (Vector v: localTfIdfs) {
            System.out.println(v.apply(indexOfThis));
        }*/
    }

}
