package crp.logic.clustering;

import java.io.File;

import crp.pipe.TokenSequenceRemoveNonTopicWords;

import cc.mallet.pipe.CharSequence2TokenSequence;
import cc.mallet.pipe.FeatureSequence2FeatureVector;
import cc.mallet.pipe.Input2CharSequence;
import cc.mallet.pipe.Pipe;
import cc.mallet.pipe.SerialPipes;
import cc.mallet.pipe.TokenSequence2FeatureSequence;
import cc.mallet.pipe.TokenSequenceLowercase;

import cc.mallet.pipe.iterator.FileIterator;
import cc.mallet.types.FeatureVector;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;

public class RunNeoClustering {

	/**
	 * @param args
	 */
	public static void main(String[] args) {
		//File patentp = new File("test");
		File[] directories = new File[1];
		directories[0] = new File("test");
		File kwp = new File("keywords");
		File[] kwps = kwp.listFiles();
		
		Pipe instancePipe = new SerialPipes (new Pipe[] {
				new Input2CharSequence (),	
				new CharSequence2TokenSequence (),  // Data String -> TokenSequence
				new TokenSequenceLowercase (),		  // TokenSequence words lowercased
				new TokenSequenceRemoveNonTopicWords(kwps),// Remove stopwords from sequence
				new TokenSequence2FeatureSequence(),// Replace each Token with a feature index
				new FeatureSequence2FeatureVector(),// Collapse word order into a "feature vector"
			});

		InstanceList ilist = new InstanceList (instancePipe);
		ilist.addThruPipe (new FileIterator (directories, null));
		for(int i = 0 ; i < ilist.size() ; i++){
			Instance inst = ilist.get(i);
			FeatureVector fv = (FeatureVector) inst.getData ();
			System.out.println();
		}
		System.out.println();
	}

}
