package clusteringTest;

import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.HTablePool;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;




import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.ObjectWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;

import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Reducer.Context;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;

import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.mahout.math.SequentialAccessSparseVector;
import org.apache.mahout.math.Vector;
import org.apache.mahout.math.VectorWritable;

import test.ReaderFromHDFS;

public class HbaseToVector {

/**
  * @author dxl
  */
 /**
  * @throws IOException 
  * @fuction read data from hbase and transform them to vectors 
  * @throws Exception
  */
	public static Configuration configuration;
	static {
		configuration = HBaseConfiguration.create();
		//configuration.set("hbase.zookeeper.property.clientPort", "2181");
		//configuration.set("hbase.zookeeper.quorum", "192.168.1.100");
		//configuration.set("hbase.master", "192.168.1.100:600000");
	}
	
	final static String CRLN = System.getProperty("line.separator");
	public static  VectorizingText VT =new VectorizingText();
	
	public void TransformToVector(String tableName,String date) throws IOException
	{
		 ReaderFromHDFS RF = new ReaderFromHDFS();
		 
		Configuration conf = new Configuration();
		String uri = "hdfs://localhost:9000/user/tuxinhui/clustering/points/";
		Path path = new Path("hdfs://localhost:9000/user/tuxinhui/clustering/points/file1");
		FileSystem fs = FileSystem.get(URI.create(uri),conf);
		
		SequenceFile.Writer writer = new SequenceFile.Writer(fs, conf, path, Text.class, VectorWritable.class);
	     
		HTablePool pool = new HTablePool(configuration, 1000);
		HTable table = (HTable) pool.getTable(tableName);

		StringBuffer sb = new StringBuffer();
		
	    Scan scan = new Scan();
	    scan.setCaching(500); 
	    scan.setCacheBlocks(false); 
	    //List<Filter> filters = new ArrayList<Filter>();
	    
	    Filter filter1 = new SingleColumnValueFilter(Bytes.toBytes("date"), null, CompareOp.EQUAL, Bytes.toBytes(date)); // 当列column1的值为aaa时进行查询
//	    filters.add(filter1);
//	    Filter filter2 = new SingleColumnValueFilter(Bytes.toBytes("html"), null, CompareOp.LESS_OR_EQUAL, Bytes.toBytes("bbb5")); // 当列column1的值为aaa时进行查询
//	    filters.add(filter2);
	    
//	    FilterList filterList = new FilterList(filters);
	    scan.setFilter(filter1);
		ResultScanner rs = table.getScanner(scan);
	
		for (Result r : rs) {
			System.out.println("获得到rowkey:" + new String(r.getRow()));
			String id =null;
			Text ID = new Text();
			SequentialAccessSparseVector point = null;
			for (KeyValue keyValue : r.list()) { 
				 if ("url".equals(Bytes.toString(keyValue.getFamily())))
			       {
				    	id = Bytes.toString(keyValue.getValue());  
				    	System.out.println("id: "+ id);
				    	ID.set(id);
				    }//if
//				  System.out.println("列：" + new String(keyValue.getFamily())
//					+ "====值:" + new String(keyValue.getValue()));
			      if ("seg".equals(Bytes.toString(keyValue.getFamily()))) 
			       {
			    	   String content = Bytes.toString(keyValue.getValue());
			    	   System.out.println("content: "+ content);
			    	   point = VT.TransformToVector(content);// name text  
			  		   System.out.println("Vector  " + point.toString());  
			       }//if  
			}//for
		       VectorWritable vec = new VectorWritable();
			   vec.set(point);//id作为key，vector作为value
			   writer.append(ID, vec);
			   sb.append(id);sb.append(" ");sb.append(point.toString());sb.append(CRLN);
		}//for
		String result = sb.toString().trim();
		RF.WriterHDFS(result,"hdfs://localhost:9000/user/tuxinhui/clustering/dic");//part-r-00000
		rs.close();
		writer.close();
		fs.close();

	}
	

  public static void main(String[] args) throws Exception {  
	  HbaseToVector hv= new HbaseToVector();
	  hv.TransformToVector("blog_page","2010-12-08");

  }
}