package wordCount;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.FileInputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;


import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.*;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import clusteringTest.RemoveStopWord;


public class WordCount {

	/**
	  * @author dxl
	  */
	 /**
	  * @fuction read data form hbase and word count
	  * @throws Exception
	  */
	final static String CRLN = System.getProperty("line.separator");
	
	  public static class TokenizerMapper extends TableMapper <Text, IntWritable> 
	  {  
	    private final static IntWritable one = new IntWritable(1);
	    private Text word = new Text();
	    public RemoveStopWord rw = new RemoveStopWord();
	    
	    public void map(ImmutableBytesWritable row, Result values,Context context) throws IOException, InterruptedException
	    {
		     String term = null;
		     for (KeyValue kv : values.list()) {
		       if ("seg".equals(Bytes.toString(kv.getFamily()))) 
		       {
		    	   term=Bytes.toString(kv.getValue());
		    	   term = rw.RemoveStop(term);
		    	   System.out.println("term: "+ term);
		       }//if      
	      }//for
		       
		      StringTokenizer itr = new StringTokenizer(term);
		      while (itr.hasMoreTokens()) {
		      word.set(itr.nextToken());
		      context.write(word, one);
		     }//while
		     System.out.println("map end");
	    }//map
	  }
	  
	  public static class IntSumReducer extends Reducer<Text,IntWritable,Text,IntWritable>
	  {
		private IntWritable result = new IntWritable();
		public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException
		{
			 int sum = 0;
			 for (IntWritable val : values) 
			 { sum += val.get(); }//for
			 result.set(sum);
			 context.write(key, result);
			 System.out.println("combine/reduce  end:	" + "term/num: "+ key + "	"+result );
		}//reduseblog_page
	  }
	  public void WordCount(String page_title, String page_date, String wordcount_result) throws Exception {  
		
	    Configuration conf = new Configuration();
	    conf.set("hadoop.job.ugi", "tuxinhui,tuxinhui");
	    conf = HBaseConfiguration.create(conf);
	    Job job = new Job(conf, "word count");
	    job.setJarByClass(WordCount.class);
	    
	    job.setCombinerClass(IntSumReducer.class);
	    job.setReducerClass(IntSumReducer.class);
	    job.setOutputKeyClass(Text.class);
	    job.setOutputValueClass(IntWritable.class);
	    
	    Scan scan = new Scan();
	    scan.setCaching(500); 
	    scan.setCacheBlocks(false); 
    
	    Filter filter = new SingleColumnValueFilter(Bytes.toBytes("date"), null, CompareOp.EQUAL, Bytes.toBytes(page_date)); // 当列column1的值为aaa时进行查询
	    scan.setFilter(filter);
	//  scan.addFamily(Bytes.toBytes("title"));//read data of title
	//  FileInputFormat.addInputPath(job, new Path("hdfs://localhost:9000/user/tuxinhui/input"));
	    TableMapReduceUtil.initTableMapperJob(page_title, scan,WordCount.TokenizerMapper.class,
	    Text.class, IntWritable.class, job);   
//	    FileOutputFormat.setOutputPath(job, new Path("hdfs://localhost:9000/user/tuxinhui/output"));
	    Path temp = new Path(wordcount_result+"/temp/"+page_date);
	    FileOutputFormat.setOutputPath(job, temp);
	   // TableMapReduceUtil.initTableReducerJob("tag_friend",GetDataFromHbaseTest.IntSumReducer.class, job);
	 //   System.exit(job.waitForCompletion(true) ? 0 : 1);
	    job.waitForCompletion(true);
	    
	    
	    HashMap<String, Integer> hm_1 = new HashMap<String, Integer>();
	    BufferedReader br =new BufferedReader(new InputStreamReader(new FileInputStream(wordcount_result+"/temp/"+page_date+"/part-r-00000"), "UTF-8"));
	    String word = "";
	    for(word = br.readLine(); word != null; word = br.readLine())
		   {
			   String split[] = word.split("	");
//			   System.out.println(split.length);
//			   System.out.println(split[0]);
//			   System.out.println(split[1]);
			   hm_1.put(split[0] , Integer.parseInt(split[1]) );
		   }
	    br.close();
	    
	    if(hm_1.size()!=0)
	    {
		    List sorted_map = new LinkedList(hm_1.entrySet());
	
			Collections.sort (sorted_map, 
					new Comparator() {
	          public int compare(Object o1, Object o2) {
	               return ((Comparable) ((Map.Entry) (o2)).getValue())
	              .compareTo(((Map.Entry) (o1)).getValue());
	          }//compare()
			}//Comparator
			);//Collection.sort()
			
		    String resultfile = wordcount_result+"/"+page_date;
			BufferedWriter bw = new BufferedWriter(new FileWriter(resultfile));
			StringBuffer sb = new StringBuffer();
			Iterator iterator = sorted_map.iterator();
			while (iterator.hasNext())
			{
				Map.Entry entry = (Map.Entry)iterator.next();
				//listmapkey:list中map元素的key
				//listmapvalue:list中map元素的value
				Object listmapkey = entry.getKey();
				Object listmapvalue =entry.getValue();
				sb.append(listmapkey.toString()+" "+listmapvalue.toString());
				sb.append(CRLN);
				System.out.println("word: " + listmapkey.toString() + "  count: " + listmapvalue.toString());
			}//while get hs
			bw.write(sb.toString());
			bw.flush();
			bw.close();
	    }//if
		FileSystem fs = FileSystem.get(conf);
	    fs.deleteOnExit(new Path(wordcount_result+"/temp"));	
	  }
	  
	  public static void main (String[] args)
	  {
		  String page_title = "blog_page";//网页名称
		  String page_date = "2010-12-10";//网页日期
		  String wordcount_result = "/home/tuxinhui/workspace/analyzer/AnalyzerResluts/wordcountresult";//存放结果的文件夹路径
		  WordCount testwordcount = new WordCount();
		  try 
		  {
			testwordcount.WordCount(page_title, page_date, wordcount_result);
		  } 
		  catch (Exception e) 
		  {
			// TODO Auto-generated catch block
			e.printStackTrace();
		  }
	  }
}