package clusteringDemo;

/*
 * Source code for Listing 14.1
 * Mahout书中的一个源码例子，利用lucene建立空间向量
 * 
 */

import java.io.IOException;
import java.io.StringReader;
import java.util.Iterator;
import java.util.ArrayList;
import java.util.List;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.Version;
import org.apache.mahout.math.RandomAccessSparseVector;
import org.apache.mahout.math.SequentialAccessSparseVector;
import org.apache.mahout.math.Vector;
import org.apache.mahout.vectorizer.encoders.FeatureVectorEncoder;
import org.apache.mahout.vectorizer.encoders.StaticWordValueEncoder;

public class TokenizingAndVectorizingText {

	public SequentialAccessSparseVector TransformToVector(String text) throws IOException {
		FeatureVectorEncoder encoder = new StaticWordValueEncoder("text");
		Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_31);     
 
		StringReader in = new StringReader(text);
		TokenStream ts = analyzer.tokenStream("body", in);
		TermAttribute termAtt = ts.addAttribute(TermAttribute.class);

		Vector v1 = new SequentialAccessSparseVector(100);                   
		while (ts.incrementToken()) {
		  char[] termBuffer = termAtt.termBuffer();
		  
//		  for(int i = 0;i<termBuffer.length;i++)
//		  {System.out.println("term   "+termBuffer[i]);}
		  
		  
		  int termLen = termAtt.termLength();
//		  System.out.println("length   "+termLen);
		  
		  String w = new String(termBuffer, 0, termLen);   
		  System.out.println("Splited Word:  "+ w);
		  encoder.addToVector(w, 1, v1);   
		  
//		  System.out.println("one word end");		
		}
		SequentialAccessSparseVector sv = new SequentialAccessSparseVector(v1);
//		System.out.printf("%s\n", new SequentialAccessSparseVector(v1));
//		System.out.printf("fsdfds   "+sv.toString());
		return sv;
	}
	
	/*
	public static List<Vector> test() throws IOException
	{
		TokenizingAndVectorizingText test =new TokenizingAndVectorizingText();
		Vector v1 = test.TransformToVector("text to magically vectorize name text");// name text
		Vector v2 = test.TransformToVector("text to magically vectorize name text");
		Vector v3 = test.TransformToVector("machine learning");
		//tvt.TransformToVector(v1);
		List<Vector> vec = new ArrayList<Vector>();
		vec.add(v1);
		vec.add(v2);
		vec.add(v3);
		return vec;
	}
	*/
	
	/*
	public static void main(String args[]) throws IOException
	{
		TokenizingAndVectorizingText test =new TokenizingAndVectorizingText();
		Vector v1 = test.TransformToVector("text to magically vectorize name text");// name text
		Vector v2 = test.TransformToVector("text to magically vectorize name text");
		Vector v3 = test.TransformToVector("machine learning");
		//tvt.TransformToVector(v1);
		List<Vector> vec = new ArrayList<Vector>();
		vec.add(v1);
		vec.add(v2);
		vec.add(v3);
		//vec.add(v1);
		
		//String v2 = test.TransformToVector("we love our text text");
		//System.out.println(v1);
		//System.out.println(v2);
		
	}
	*/

}
