package com.xinlang.cluster;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.Reader.Option;
import org.apache.mahout.clustering.classify.WeightedPropertyVectorWritable;
import org.apache.mahout.clustering.kmeans.Kluster;

import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.xinlang.excel.Table;
import com.xinlang.excel.TableUtil;
import com.xinlang.util.HadoopUtil;
import com.xinlang.util.HadoopUtil.RUN_MODE;

public class KMeansClusterOutput {

	static Configuration conf = null;
	static FileSystem fs = null;

	static boolean runSequential = true;

	static void initCluster(String runmode) throws IOException {
		RUN_MODE mode = RUN_MODE.LOCAL;
		if ("cluster".equals(runmode)) {
			mode = RUN_MODE.CLUSTER;
			runSequential = false;
		}
		conf = HadoopUtil.getConf(mode);
		fs = FileSystem.get(conf);
	}

	public static List<Table> output(String outDir, String clusterDumperJson, String originalDir) throws IOException {
		initCluster("cluster");
		Path path = new Path(outDir + "/" + Kluster.CLUSTERED_POINTS_DIR + "/part-m-0");
		SequenceFile.Reader reader = new SequenceFile.Reader(conf,
				new Option[] { SequenceFile.Reader.file(path.makeQualified(fs.getUri(), fs.getWorkingDirectory())) });
		IntWritable key = new IntWritable();
		WeightedPropertyVectorWritable value = new WeightedPropertyVectorWritable();
		initCluster("local");
		Table table = new Table("聚类数据");
		Table tableSummary = new Table("汇总信息", "类别号");
		// 写入总结类别信息,列出特征值高的词
		List<String> lines = readFileByLines(clusterDumperJson);
		for (String line : lines) {
			if (line.contains("top_terms")) {
				JSONObject jsonObj = JSONObject.fromObject(line);
				int cluster_id = jsonObj.getInt("cluster_id");
				JSONArray jArray = jsonObj.getJSONArray("top_terms");
				String keywords = "";
				for (int i = 0; i < jArray.size(); i++) {
					JSONObject jo = jArray.getJSONObject(i);
					String term = jo.getString("term");
					keywords = keywords + " " + term;
				}
				tableSummary.put("" + cluster_id, "关键词", keywords);
			}
		}

		// map用来分别存储每个类别的微博数据
		Map<String, Table> clusterTableMap = new HashMap<String, Table>();
		// 原始微博数据读取
		Map<String, String> map = readOriginalFile(originalDir);
		int index = 0;
		Table tableCluster = null;
		while (reader.next(key, value)) {
			index++;
			String clusterId = key.toString();
			String valueString = value.getVector().toString();
			int i = valueString.indexOf("{");
			String weiboId = value.getVector().toString().substring(0, i - 1);
			// 输出所有微博聚类数据
			String id = String.format("%6d", index);
			table.put(id, "微博ID", weiboId);
			table.put(id, "类别", clusterId);
			table.put(id, "微博文本", map.get(weiboId));
			// 输出本类别的微博列表
			if (clusterTableMap.containsKey(clusterId)) {
				tableCluster = clusterTableMap.get(clusterId);
			} else {
				tableCluster = new Table("类别-" + clusterId, "序号");
				clusterTableMap.put(clusterId, tableCluster);
			}
			// 输出所有微博聚类数据
			tableCluster.put(weiboId, "类别", clusterId);
			tableCluster.put(weiboId, "微博文本", map.get(weiboId));
			tableSummary.increase(clusterId, "微博数", 1);
		}
		reader.close();

		List<Table> tables = new ArrayList<Table>();
		tables.add(tableSummary);
		tables.add(table);
		tables.addAll(clusterTableMap.values());

		return tables;
	}

	public static Map<String, String> readOriginalFile(String filePath) {
		Map<String, String> map = new HashMap<String, String>();
		List<String> lines = readFileByLines(filePath);
		for (String line : lines) {
			String[] temp = line.split("\t");
			map.put(temp[0], temp[1]);
		}
		return map;
	}

	public static Map<String, String> readOriginalFiles(String filePath) {
		Map<String, String> map = new HashMap<String, String>();
		File root = new File(filePath);
		File[] files = root.listFiles();
		for (File file : files) {
			List<String> lines = readFileByLines(file.getPath());
			if (lines.size() != 0) {
				map.put(file.getName(), lines.get(0));
			}
		}
		return map;
	}

	/**
	 * 以行为单位读取文件，常用于读面向行的格式化文件
	 */
	public static List<String> readFileByLines(String fileName) {
		File file = new File(fileName);
		BufferedReader reader = null;
		List<String> lines = new ArrayList<String>();
		try {
			FileInputStream in = new FileInputStream(file);
			reader = new BufferedReader(new InputStreamReader(in, "utf-8"));
			String tempString = "";
			// 一次读入一行，直到读入null为文件结束
			while ((tempString = reader.readLine()) != null) {
				// 显示行号
				lines.add(tempString);
			}
			reader.close();
		} catch (IOException e) {
			e.printStackTrace();
		} finally {
			if (reader != null) {
				try {
					reader.close();
				} catch (IOException e1) {
				}
			}
		}
		return lines;
	}

	public static void main(String[] args) throws IOException {
		initCluster("local");

		List<Table> tables = output(".", "clusterdump.json", "original.txt");
		TableUtil.dumpToExcel(tables, "test0404.xls");

		System.out.println("All done!");
	}
}
