package com.xinlang.cluster;

import java.io.IOException;
import java.util.List;
import java.util.Map;

import org.apache.commons.cli.BasicParser;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Options;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.Writer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.ToolRunner;
import org.apache.mahout.clustering.kmeans.KMeansDriver;
import org.apache.mahout.text.SequenceFilesFromDirectory;
import org.apache.mahout.utils.SequenceFileDumper;
import org.apache.mahout.utils.clustering.ClusterDumper;
import org.apache.mahout.vectorizer.SparseVectorsFromSequenceFiles;

import com.xinlang.excel.Table;
import com.xinlang.excel.TableUtil;
import com.xinlang.util.HadoopUtil;
import com.xinlang.util.HadoopUtil.RUN_MODE;

public class KMeansCluster {
	
	static Configuration conf = null;
	static FileSystem fs = null;

	static boolean runSequential = true;

	static void initCluster(String runmode) throws IOException {
		RUN_MODE mode = RUN_MODE.LOCAL;
		if ("cluster".equals(runmode)) {
			mode = RUN_MODE.CLUSTER;
			runSequential = false;
		}
		conf = HadoopUtil.getConf(mode);
		fs = FileSystem.get(conf);
	}
	
	private final static Options buildOptions() {
		Options options = new Options();
		options.addOption("intype", true,
				"[optional] txt(default)");
		options.addOption("in", true, "[required] input original data");
		options.addOption("nkluster", true,
				"[required] number of clusters. eg: 20");
		options.addOption("out", true, "[required] output directory path");
		options.addOption("outtype", true, "[optional] xls(default)");
		return options;
	}
	
	private final static void printUsage(Options options) {
		HelpFormatter help = new HelpFormatter();
		help.printHelp("KMeansCluster", options);
	}

	public static void main(String[] args) throws Exception {
		Options options = buildOptions();
		BasicParser parser = new BasicParser();
		CommandLine commands = parser.parse(options, args);
		if (!commands.hasOption("in") && !commands.hasOption("out") && !commands.hasOption("nkluster")) {
			printUsage(options);
			return;
		}
		String input = commands.getOptionValue("in");
		if(!input.endsWith(".txt")){
			printUsage(options);
			return;
		}
//		String inType = commands.getOptionValue("intype", "txt"); // 默认是txt格式
		String output = commands.getOptionValue("out");
		if(output.endsWith("/")){
			output = output.substring(0, output.length()-1);
		}
//		String outType = commands.getOptionValue("outtype", "xls"); // 默认是xls格式
		String kluster = commands.getOptionValue("nkluster"); // 默认是xls格式
		int nkluster = Integer.parseInt(kluster);
		// 0.初始化环境
		System.out.println("Start step 0. Init environment.");
		initCluster("cluster");
		// 1.从文件夹中读取一个个文件,将数据转换成SequenceFile格式的文件
		System.out.println("Start step 1. Local file send to HDFS.");
		writeToSequenceFile(output,input);
//		seqDumper(output);
		// 2.将文本生成向量
		System.out.println("Start step 2. Sequence file to vectors.");
		seqToSparse(output+"/original-seqdir",output+"/original-seqdir-sparse-tfidf");
		// 3.使用算法进行聚类
		System.out.println("Start step 3. Use k-means cluster algorithm.");
		kmeansCluster(output+"/original-seqdir-sparse-tfidf", nkluster);
		// 4.导出聚类总结结果到目录下
		System.out.println("Start step 4. Export result using cluster dumper.");
		clusterDumper(output+"/original-seqdir-sparse-tfidf");
		// 5.生成EXCEL以便观察结果
		System.out.println("Start step 5. Generate final report to excel.");
		List<Table> tables = KMeansClusterOutput.output(output+"/original-seqdir-sparse-tfidf/kmeans-output","clusterdump.json",input);
		TableUtil.dumpToExcel(tables, "result.xls");
		
		System.out.println("All done!");
	}
	
	/*
	 * ./bin/mahout clusterdump 
	 * -i /user/root/kmeans/original-kmeans/clusters-2-final 
	 * -o clusterdump.json -of JSON 
	 * -d /user/root/kmeans/original-out-seqdir-sparse-tfidf/dictionary.file-0 
	 * -dt sequencefile 
	 * -b 100 -n 20 --evaluate 
	 * -dm org.apache.mahout.common.distance.CosineDistanceMeasure 
	 * -sp 0 
	 * --pointsDir /user/root/kmeans/original-kmeans/clusteredPoints
	 */
	public static void clusterDumper(String outDir) throws Exception{
		String [] args={"-i",outDir + "/kmeans-output/clusters-2-final",
				"-o","clusterdump.json",
				"-of","JSON",
				"-d",outDir + "/dictionary.file-0",
				"-dt","sequencefile",
				"-b", "100", "-n", "20", "--evaluate", 
				"-dm","org.apache.mahout.common.distance.CosineDistanceMeasure",	// 指定距离测量方法
				"-sp","0",
				"--pointsDir",outDir + "/kmeans-output/clusteredPoints"};	// 执行方法
		ToolRunner.run(conf, new ClusterDumper(), args);
	}
	
	/*
	 * ./bin/mahout kmeans 
	 * -i /user/root/kmeans/original-out-seqdir-sparse-tfidf/tfidf-vectors 
	 * -c /user/root/kmeans/original-kmeans-clusters 
	 * -o /user/root/kmeans/original-kmeans 
	 * -dm org.apache.mahout.common.distance.CosineDistanceMeasure 
	 * -x 10 
	 * -k 20 
	 * -ow -cl
	 */
	public static void kmeansCluster(String outDir, int nCluster) throws Exception{
		String [] args={"-i",outDir + "/tfidf-vectors",
				"-c",outDir	+ "/clusters",
				"-o",outDir + "/kmeans-output",
				"-dm","org.apache.mahout.common.distance.CosineDistanceMeasure",	// 指定距离测量方法
				"-x","10",	// 指定最大迭代次数
				"-k",nCluster+"",	// k参数指定了中心是由RandomSeedGenerator随机生成的
				"-cl",	// 是否执行聚类
				"-ow",
				"-xm","sequential"};	// 执行方法
		ToolRunner.run(conf, new KMeansDriver(), args);
	}
	
	/*
	 * ./bin/mahout seq2sparse 
	 * -i /user/root/kmeans/original-out-seqdir 
	 * -o /user/root/kmeans/original-out-seqdir-sparse-tfidf 
	 * -lnorm -nv -wt tfidf 
	 * -a org.wltea.analyzer.lucene.IKAnalyzer -ow
	 */
	public static void seqToSparse(String inDir, String outDir) throws Exception{
		String [] args={"-i",inDir,
				"-o",outDir,
				"-lnorm", "-nv", "-ow",
				"-wt","tfidf",
				"-a","org.wltea.analyzer.lucene.IKAnalyzer"};	// 执行方法
		ToolRunner.run(conf, new SparseVectorsFromSequenceFiles(), args);
	}
	
	/*
	 * 输入sequence file的方法,相当于hadoop里的
	 * hadoop fs -text part-m-00000 | more
	 * 
	 * mahout seqdumper 
	 * -i sogou-seq/part-m-00000
	 * -o sogou-seq-dir/part-m-00000
	 */
	public static void seqDumper(String outDir) throws Exception{
		String [] args={"-i",outDir + "/original-seqdir/part-m-00000",
				"-o",outDir + "/part-m-00000.txt"
				};	// 执行方法
		ToolRunner.run(conf, new SequenceFileDumper(), args);
	}
	
	public static void writeToSequenceFile(String inDir, String original) throws IOException{
		// 准备M/R输入文件
		Path path = new Path(inDir + "/original-seqdir/part-m-00000");
		SequenceFile.Writer writer = SequenceFile.createWriter(
				conf,
				new SequenceFile.Writer.Option[] {
						SequenceFile.Writer.file(path),
						Writer.keyClass(Text.class),
						Writer.valueClass(Text.class) });
		
		Map<String, String> map = KMeansClusterOutput.readOriginalFile(original);
		for(String key : map.keySet()){
			String value = map.get(key);
			writer.append(new Text(key), new Text(value));
		}
		writer.close();
	}
	
	/*
	 * 将包含微博的文件夹导成sequence file格式文件
	 * 
	 * mahout seqdirectory 
	 * -i data 
	 * -o reuters-out-seqdir 
	 * -c UTF-8 -chunk 5
	 */
	public static void toSequenceFile(String inDir) throws Exception{
		String [] args={"-i",inDir + "/original",
				"-o",inDir + "/original-seqdir",
				"-c","UTF-8",
				"-chunk","5"};	// 执行方法
		ToolRunner.run(conf, new SequenceFilesFromDirectory(), args);
	}
}
