import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Hashtable;
import java.util.List;

import vo.Status;
import Service.StatusService;
import Utils.FileUtil;
import Utils.HashUtil;
import Utils.POSTool;
import Utils.StringUtils;

public class NLPTools {
	// 统计，相邻的词对分布，词对词性满足给定的pos对
	public static Hashtable<String, Integer> pairCount(String content, String pos1, String pos2) {
		Hashtable<String, Integer> word_count = new Hashtable<String, Integer>();
		String posStr = POSTool.POSSentence(content);
		String items[] = posStr.split(" ");
		for (int i = 0; i < items.length - 1; i++) {
			// 没有词性，停止单词循环
			int index1 = items[i].indexOf("/");
			int index2 = items[i + 1].indexOf("/");
			if (index1 < 0 || index2 < 0)
				continue;
			// 取词和词性
			String first = items[i].substring(0, index1);
			String second = items[i + 1].substring(0, index2);
			String suffix1 = items[i].substring(index1, items[i].length());
			String suffix2 = items[i + 1].substring(index2, items[i + 1].length());

			// 如果不是要的匹配，停止此次循环
			if (!suffix1.equals("/" + pos1) || !suffix2.equals("/" + pos2))
				continue;
			String pattern = first + "_" + second;

			word_count = HashUtil.countHash(word_count, pattern);
		}
		return word_count;
	}

	// 统计，某词性的词的分布
	public static Hashtable<String, Integer> customizeItemCount(String content, String pos) {
		Hashtable<String, Integer> word_count = new Hashtable<String, Integer>();

		// 读取状态
		String posStr = POSTool.POSSentence(content);
		String items[] = posStr.split(" ");

		// 遍历所有的词
		for (String item : items) {
			int index = item.indexOf("/");
			if (index < 0)
				continue;
			String key = item.substring(0, index);
			if (!item.substring(index, item.length()).equals("/" + pos))
				continue;

			// 统计词
			if (word_count.containsKey(key))
				word_count.put(key, word_count.get(key) + 1);
			else
				word_count.put(key, 1);
		}
		return word_count;
	}

	// 抽取相邻词对
	public static Hashtable<String, Integer> pairCount(String content) {
		Hashtable<String, Integer> word_count = new Hashtable<String, Integer>();
		String posStr = POSTool.SegmentSentence(content);
		String items[] = posStr.split(" ");
		for (int i = 0; i < items.length - 1; i++) {
			if (items[i].length() < 0 || items[i + 1].length() < 0)
				continue;
			String key = items[i] + "_" + items[i + 1];
			if (word_count.containsKey(key))
				word_count.put(key, word_count.get(key) + 1);
			else
				word_count.put(key, 1);
		}
		return word_count;
	}

	// TODO
	public static void frequentWords(String content, String userId, String userName) {
		Hashtable<String, Integer> word_count = new Hashtable<String, Integer>();
		String pos = POSTool.SegmentSentence(content);
		String items[] = pos.split(" ");
		for (String item : items) {
			if (word_count.containsKey(item))
				word_count.put(item, word_count.get(item) + 1);
			else
				word_count.put(item, 1);
		}
		HashUtil.writeHash(word_count, Test.root + "term/" + userId + "_" + userName + ".txt", -1);
	}

	// TODO
	public static void removeStops(String ids[], String names[]) {
		// load停用词表
		ArrayList<String> stops = new ArrayList<String>();
		String stopContent = FileUtil.readFile(Test.stopPath, "utf-8");
		String lines[] = stopContent.split(Test.LINE_SEPARATOR);
		for (String line : lines) {
			stops.add(line);
		}
		System.out.println("stops:" + stops.toString());
		// 去掉所有人词表的stopwords,重新写文件
		for (int i = 0; i < ids.length; i++) {
			String newContent = "";
			String userId = ids[i];
			String userName = names[i];

			String fileName = Test.root + "term/" + userId + "_" + userName + ".txt";
			String content = FileUtil.readFile(fileName, "utf-8");
			lines = content.split("\n");

			for (String line : lines) {
				if (line.split(Test.TERM_SEPARATOR).length != 2)
					continue;
				String word = line.split(Test.TERM_SEPARATOR)[0];
				String count = line.split(Test.TERM_SEPARATOR)[1];
				// 只保留不在停用词表里面的内容
				if (!stops.contains(word))
					newContent += word + Test.TERM_SEPARATOR + count + Test.LINE_SEPARATOR;

			}
			FileUtil.write(fileName, newContent);
		}

	}

	// TODO
	public static void compDf(String ids[], String names[]) { // load 所有的文件
		ArrayList<String> contents = new ArrayList<String>();
		for (int i = 0; i < ids.length; i++) {
			String userId = ids[i];
			String userName = names[i];
			String content = FileUtil.readFile(Test.root + "term/" + userId + "_" + userName + ".txt", "utf-8");
			contents.add(content);
		}

		String result = "";
		String termContent = FileUtil.readFile(Test.termPath + "terms.txt", "utf-8");
		String[] lines = termContent.split(Test.LINE_SEPARATOR);
		int count = 0;

		for (String line : lines) {
			count = 0;
			for (int i = 0; i < contents.size(); i++)
				if (contents.get(i).contains(line))
					count++;
			result += line + Test.TERM_SEPARATOR + count + Test.LINE_SEPARATOR;
		}
		FileUtil.write(Test.dfPath + "df.txt", result);
	}

	// TODO
	public static void compDf(String ids[], String names[], String pos) { // load
																			// 所有的文件
		ArrayList<String> contents = new ArrayList<String>();
		for (int i = 0; i < ids.length; i++) {
			String userId = ids[i];
			String userName = names[i];
			String content = FileUtil.readFile(Test.root + pos + "/" + userId + "_" + userName + ".txt", "utf-8");
			contents.add(content);
		}

		String result = "";
		String posPath = Test.root + pos + "_terms.txt";
		String posContent = FileUtil.readFile(posPath, "utf-8");
		String[] lines = posContent.split(Test.LINE_SEPARATOR);
		int count = 0;

		for (String line : lines) {
			count = 0;
			for (int i = 0; i < contents.size(); i++)
				if (contents.get(i).contains(line))
					count++;
			result += line + Test.TERM_SEPARATOR + count + Test.LINE_SEPARATOR;
		}

		String resultPath = Test.dfPath + pos + "_df.txt";
		FileUtil.write(resultPath, result);
	}

	// TODO
	public static void compTfidf(String ids[], String names[]) {
		// 读取df文件,用预算tfidf
		Hashtable<String, Integer> word_df = new Hashtable<String, Integer>();
		String dfContent = FileUtil.readFile(Test.dfPath + "df.txt", "utf-8");
		String dfs[] = dfContent.split(Test.LINE_SEPARATOR);
		for (String dfStr : dfs) {
			String word = dfStr.split(Test.TERM_SEPARATOR)[0];
			int df = Integer.parseInt(dfStr.split(Test.TERM_SEPARATOR)[1]);
			word_df.put(word, df);
		}

		// 读取terms文件，用于计算用户词表外的词的tf
		ArrayList<String> terms = new ArrayList<String>();
		String termContent = FileUtil.readFile(Test.termPath + "terms.txt", "utf-8");
		String lines[] = termContent.split(Test.LINE_SEPARATOR);
		for (String line : lines) {
			terms.add(line);
		}

		for (int i = 0; i < ids.length; i++) {
			ArrayList<String> userTerms = new ArrayList<String>();
			String tfidfStr = "";
			String userId = ids[i];
			String userName = names[i];
			String content = FileUtil.readFile(Test.root + "term/" + userId + "_" + userName + ".txt", "utf-8");
			lines = content.split(Test.LINE_SEPARATOR);
			// 计算用户词表中的tfidf
			for (String line : lines) {
				if (line.split(Test.TERM_SEPARATOR).length != 2)
					continue;
				// 计算tfidf
				String word = line.split(Test.TERM_SEPARATOR)[0];
				int fq = Integer.parseInt(line.split(Test.TERM_SEPARATOR)[1]);
				double tfidf = (double) (fq) / (word_df.get(word));
				tfidfStr += word + Test.TERM_SEPARATOR + tfidf + Test.LINE_SEPARATOR;
				userTerms.add(word);
			}

			// 计算非用户词表的tfidf
			for (String item : terms) {
				if (!userTerms.contains(item))
					tfidfStr += item + Test.TERM_SEPARATOR + 0 + Test.LINE_SEPARATOR;
			}

			// 为每个用户写tfidf文件
			FileUtil.write(Test.tfidfPath + userId + "_" + userName + ".txt", tfidfStr);

		}

	}

	// TODO
	public static void compTfidf(String ids[], String names[], String pos) {
		// 读取df文件,用预算tfidf
		Hashtable<String, Integer> word_df = new Hashtable<String, Integer>();
		String posDfPath = Test.dfPath + pos + "_df.txt";
		String dfContent = FileUtil.readFile(posDfPath, "utf-8");

		String dfs[] = dfContent.split(Test.LINE_SEPARATOR);
		for (String dfStr : dfs) {
			String word = dfStr.split(Test.TERM_SEPARATOR)[0];
			int df = Integer.parseInt(dfStr.split(Test.TERM_SEPARATOR)[1]);
			word_df.put(word, df);
		}

		// 读取terms文件，用于计算用户词表外的词的tf
		ArrayList<String> terms = new ArrayList<String>();
		String posTermPath = Test.root + pos + "_terms.txt";
		String termContent = FileUtil.readFile(posTermPath, "utf-8");

		String lines[] = termContent.split(Test.LINE_SEPARATOR);
		for (String line : lines) {
			terms.add(line);
		}

		for (int i = 0; i < ids.length; i++) {
			ArrayList<String> userTerms = new ArrayList<String>();
			String tfidfStr = "";
			String userId = ids[i];
			String userName = names[i];
			String fileName = Test.root + pos + "/" + userId + "_" + userName + ".txt";
			String content = FileUtil.readFile(fileName, "utf-8");
			lines = content.split(Test.LINE_SEPARATOR);
			// 计算用户词表中的tfidf
			for (String line : lines) {
				if (line.split(Test.TERM_SEPARATOR).length != 2)
					continue;
				// 计算tfidf
				String word = line.split(Test.TERM_SEPARATOR)[0];
				int fq = Integer.parseInt(line.split(Test.TERM_SEPARATOR)[1]);
				double tfidf = (double) (fq) / (word_df.get(word));
				tfidfStr += word + Test.TERM_SEPARATOR + tfidf + Test.LINE_SEPARATOR;
				userTerms.add(word);
			}

			// 计算非用户词表的tfidf
			for (String item : terms) {
				if (!userTerms.contains(item))
					tfidfStr += item + Test.TERM_SEPARATOR + 0 + Test.LINE_SEPARATOR;
			}

			// 为每个用户写tfidf文件
			FileUtil.write(Test.tfidfPath + userId + "_" + userName + "_" + pos + ".txt", tfidfStr);

		}

	}

	// TODO
	public static void extractBehavior() {
		String string = "我买了这套书，非常漂亮，值得买回家和女儿一起看;火速买了火车票;我只是买了一个精华三支唇膏两个面膜;逛超市买了把儿童牙刷送了块牛掰爆了的手表;发完肥胖围脖以后义无反顾的买了薯片啊擦;刚买了小米它就出1s了;终于买了新衣服，好像有点晒红了？;今儿买了一对很漂亮的耳环给老妈!!!高兴!;再去买了一堆烘焙原料;自从买了这部车，亲戚们都不怎么喜欢.";
		String word = "买";
		while (string.length() > 0) {
			int index = string.indexOf(word);
			if (index < 0)
				break;
			string = string.substring(index + word.length(), string.length());
			String pos = POSTool.POSSentence(string);
			String[] items = pos.split(" ");
			String target = null;
			for (String item : items) {
				if (item.indexOf("/w") > 0)
					break;
				if (item.indexOf("/n") > 0)
					target = item;
			}
			if (target != null)
				System.out.println("word(target):" + word + "(" + target + ")");
		}
	}

	// 统计词汇的分布
	public static void itemCount(String src, String dest) {
		Hashtable<String, Integer> word_count = new Hashtable<String, Integer>();
		String content = FileUtil.readFile(src, "utf-8");
		String items[] = POSTool.SegmentSentence(content).split(" ");
		for (String item : items) {
			if (item == null || item.trim().length() < 0)
				continue;
			word_count = HashUtil.countHash(word_count, item);
		}
		String result = "";
		HashUtil.writeHash(word_count, dest, -1);
	}

	// public static void pairCount(String src, String dest, String key) {
	// Hashtable<String, Integer> word_count = new Hashtable<String, Integer>();
	// String content = FileUtils.readFile(src, "utf-8");
	// String items[] = POSTool.SegmentSentence(content).split(" ");
	// for (String item1 : items) {
	// for (String item2 : items) {
	// if (word_count.containsKey(item1 + "_" + item2))
	// word_count.put(item1 + "_" + item2, word_count.get(item1 + "_" + item2) +
	// 1);
	// else
	// word_count.put(item1 + "_" + item2, 1);
	// }// if(item==null||item.trim().length()<0)
	// // continue;
	// }
	// String result = "";
	// Map.Entry[] maps = SortUtils.sortedHashtableByValue(word_count, -1);
	// for (int i = 0; i < maps.length; i++)
	// result += maps[i].getKey() + ":" + maps[i].getValue() + "\n";
	// FileUtils.write(dest, result);
	// }

	// 统计所有人的词汇pos分布
	public static void compAllItemPOSCount(String fileName) {
		List<String> ids = Data.getUserIds();
		Hashtable<String, Integer> item_count = new Hashtable<String, Integer>();
		for (String id : ids) {
			// 统计一个人的分布
			List<Status> statuses = StatusService.getByUserId(id);
			for (Status status : statuses) {
				String content = status.getContent();
				String itemposes[] = POSTool.POSSentence(content).split(" ");
				for (String itempos : itemposes)
					item_count = HashUtil.countHash(item_count, itempos);
			}
		}
		HashUtil.writeHash(item_count, fileName, -1);
	}

	// 统计所有人的词汇分布
	public static void compAllItemCount(String fileName) {
		List<String> ids = Data.getUserIds();
		Hashtable<String, Integer> item_count = new Hashtable<String, Integer>();
		for (String id : ids) {
			// 统计一个人的分布
			List<Status> statuses = StatusService.getByUserId(id);
			for (Status status : statuses) {
				String content = status.getContent();
				String items[] = POSTool.SegmentSentence(content).split(" ");
				for (String item : items)
					item_count = HashUtil.countHash(item_count, item);
			}
		}
		HashUtil.writeHash(item_count, fileName, -1);
	}

	// 抽取neighbour itempos pair
	public static Hashtable<String, Integer> pairPOSCount(String content) {
		Hashtable<String, Integer> word_count = new Hashtable<String, Integer>();
		String posStr = POSTool.POSSentence(content);
		String items[] = posStr.split(" ");
		for (int i = 0; i < items.length - 1; i++) {
			if (items[i].length() < 0 || items[i + 1].length() < 0)
				continue;
			String key = items[i] + "_" + items[i + 1];
			if (word_count.containsKey(key))
				word_count.put(key, word_count.get(key) + 1);
			else
				word_count.put(key, 1);
		}
		return word_count;
	}

	// 抽取所有的词性
	public static Hashtable<String, Integer> extractPOS(String content) {
		Hashtable<String, Integer> word_count = new Hashtable<String, Integer>();
		String posStr = POSTool.POSSentence(content);
		String items[] = posStr.split(" ");
		for (int i = 0; i < items.length; i++) {
			String key = items[i];
			if (word_count.containsKey(key))
				word_count.put(key, word_count.get(key) + 1);
			else
				word_count.put(key, 1);
		}
		return word_count;
	}

	// 在content文本中，抽取某词的左右词性
	public static Hashtable<String, Integer> LRPOSPairCount(String content, String key) {
		Hashtable<String, Integer> pair_count = new Hashtable<String, Integer>();
		String pos = POSTool.POSSentence(content);
		String items[] = pos.split(" ");
		for (int i = 0; i < items.length; i++) {
			if (i + 1 >= items.length || i - 1 < 0)
				continue;
			String left = items[i - 1];
			String right = items[i + 1];
			if (items[i].indexOf(key) >= 0) {
				if (left.indexOf("/") < 0 || right.indexOf("/") < 0)
					continue;
				String leftPOS = left.substring(left.indexOf("/"), left.length());
				String rightPOS = right.substring(right.indexOf("/"), right.length());
				pair_count = HashUtil.countHash(pair_count, leftPOS + "_" + rightPOS);
			}
		}
		return pair_count;
	}

	// 在content文本中，抽取某词表的左右词性
	public static Hashtable<String, Integer> LRPOSPairCount(String content, List<String> termlist) {
		Hashtable<String, Integer> pair_count = new Hashtable<String, Integer>();
		String pos = POSTool.POSSentence(content);
		String items[] = pos.split(" ");
		for (int i = 0; i < items.length; i++) {
			if (i + 1 >= items.length || i - 1 < 0)
				continue;
			String left = items[i - 1];
			String right = items[i + 1];
			boolean contains = StringUtils.contains(items[i], termlist);
			if (contains == true) {
				if (left.indexOf("/") < 0 || right.indexOf("/") < 0)
					continue;
				String leftPOS = left.substring(left.indexOf("/"), left.length());
				String rightPOS = right.substring(right.indexOf("/"), right.length());
				pair_count = HashUtil.countHash(pair_count, leftPOS + "_" + rightPOS);
			}
		}
		return pair_count;
	}

	// 抽取左右词性是给定词性的词,中间词性也确定
	// /r_/v,5
	public static Hashtable<String, Integer> customizedItemCount(String content, String leftPOS, String middlePOS, String rightPOS) {
		Hashtable<String, Integer> item_count = new Hashtable<String, Integer>();
		// 分词
		String pos = POSTool.POSSentence(content);
		String items[] = pos.split(" ");
		for (int i = 0; i < items.length; i++) {
			// 取出左右词pos
			if (i + 1 >= items.length || i - 1 < 0)
				continue;
			String item = items[i];
			String left = items[i - 1];
			String right = items[i + 1];

			// 如果左右的都有词性，统计
			if (left.indexOf("/" + leftPOS) >= 0 && item.indexOf("/" + middlePOS) >= 0 && right.indexOf("/" + rightPOS) >= 0)
				HashUtil.countHash(item_count, item.substring(0, item.indexOf("/")));
		}
		return item_count;
	}

	// 在content语料中，扩展和term类似的词汇
	// pairsThreshold: 开始抽取的pos对的个数
	// termThreshold: 开始抽取的最终词汇的个数
	// list:用于学习的词汇表
	// content：是一堆一堆的文字，防止太长了不能分词
	public static void expandTermList(List<String> contents, List<String> list, int pairThreshold, int termThreshold) {
		// 抽取出词汇的左右词性
		Hashtable<String, Integer> pair_count = new Hashtable<String, Integer>();
		for (String content : contents)
			for (String term : list)
				pair_count = HashUtil.mergeHash(pair_count, LRPOSPairCount(content, term));
		pair_count = HashUtil.cutHash(pair_count, pairThreshold);
		HashUtil.printHash(pair_count);

		// 抽取出扩展的词汇
		Hashtable<String, Integer> term_count = new Hashtable<String, Integer>();
		for (String content : contents) {
			Enumeration<String> en = pair_count.keys();
			while (en.hasMoreElements()) {
				String pair = (String) en.nextElement();
				String poses[] = pair.split("_");
				String left = poses[0].substring(poses[0].indexOf("/") + 1, poses[0].length());
				String right = poses[1].substring(poses[1].indexOf("/") + 1, poses[1].length());
				term_count = HashUtil.mergeHash(term_count, customizedItemCount(content, left, "n", right));
			}
		}
		System.out.println("词汇词汇嗷");
		term_count = HashUtil.cutHash(term_count, termThreshold);
		HashUtil.printHash(term_count);
	}

	// 从item抽词性
	public static String parsePOS(String item) {
		int index = item.indexOf("/");
		String pos = item.substring(index + 1, item.length());
		return pos;
	}

	// 从item抽取词
	public static String parseTerm(String item) {
		int index = item.indexOf("/");
		String term = item.substring(0, index);
		return term;
	}

	// 抽取一个词性左侧出现
	// type=1,抽取term;type=2,抽取pos
	public static Hashtable<String, Integer> extractLByPOS(String sentence, List<String> poses, int type) {
		Hashtable<String, Integer> pos_count = new Hashtable<String, Integer>();
		String posSentence = POSTool.POSSentence(sentence);
		String items[] = posSentence.split(" ");
		for (int i = 0; i < items.length; i++) {
			if (i + 1 >= items.length || i - 1 < 0)
				continue;
			String left = items[i - 1];
			String middle = items[i];

			// 判断是否命中middle的词性
			boolean hitMiddlePOS = false;
			for (String pos : poses) {
				if (parsePOS(middle).equals(pos))
					hitMiddlePOS = true;
			}
			if (hitMiddlePOS) {
				if (type == 1)
					HashUtil.countHash(pos_count, parseTerm(left));
				if (type == 2)
					HashUtil.countHash(pos_count, parsePOS(left));
			}
		}
		return pos_count;
	}

	// 抽取所有命中词性的词
	public static List<String> extractHits(String sentence, List<String> poses, int type) {
		List<String> result = new ArrayList<String>();
		String posSentence = POSTool.POSSentence(sentence);
		String items[] = posSentence.split(" ");
		for (int i = 0; i < items.length; i++) {
			if (i + 1 >= items.length || i - 1 < 0)
				continue;
			String middle = items[i];

			// 判断是否命中middle的词性
			boolean hitMiddlePOS = false;
			for (String pos : poses) {
				if (parsePOS(middle).equals(pos))
					hitMiddlePOS = true;
			}
			if (hitMiddlePOS) {
				String hit=parseTerm(middle);
				if (type == 1&&!result.contains(hit))
					result.add(hit);
				if (type == 2&&!result.contains(hit))
					result.add(parsePOS(hit));
			}
		}
		return result;
	}

	// 抽取一个词性左侧出现
	// type=1,抽取term;type=2,抽取pos
	public static Hashtable<String, Integer> extractRByPOS(String sentence, List<String> poses, int type) {
		Hashtable<String, Integer> pos_count = new Hashtable<String, Integer>();
		String posSentence = POSTool.POSSentence(sentence);
		System.out.println(posSentence);
		String items[] = posSentence.split(" ");
		for (int i = 0; i < items.length; i++) {
			if (i + 1 >= items.length || i - 1 < 0)
				continue;
			String middle = items[i];
			String right = items[i + 1];

			// 判断是否命中middle的词性
			boolean hitMiddlePOS = false;
			for (String pos : poses) {
				if (parsePOS(middle).equals(pos))
					hitMiddlePOS = true;
			}
			if (hitMiddlePOS) {
				if (type == 1)
					HashUtil.countHash(pos_count, parseTerm(right));
				if (type == 2)
					HashUtil.countHash(pos_count, parsePOS(right));
			}
		}
		HashUtil.printHash(pos_count);
		return pos_count;
	}

	// 抽取一个词性左右出现的pos
	public static Hashtable<String, Integer> extractLRByPOS(String sentence, List<String> poses, int type) {
		Hashtable<String, Integer> pos_count = new Hashtable<String, Integer>();
		String posSentence = POSTool.POSSentence(sentence);
		System.out.println(posSentence);
		String items[] = posSentence.split(" ");
		for (int i = 0; i < items.length; i++) {
			if (i + 1 >= items.length || i - 1 < 0)
				continue;
			String left = items[i - 1];
			String middle = items[i];
			String right = items[i + 1];
			boolean hitMiddlePOS = false;
			for (String pos : poses) {
				if (parsePOS(middle).equals(pos))
					hitMiddlePOS = true;
			}
			if (hitMiddlePOS) {
				if (type == 1)
					HashUtil.countHash(pos_count, parseTerm(left) + "_" + parseTerm(right));
				if (type == 2)
					HashUtil.countHash(pos_count, parsePOS(left) + "_" + parsePOS(right));
			}

		}
		HashUtil.printHash(pos_count);
		return pos_count;
	}

	// 抽取一个词性左侧出现
	public static Hashtable<String, Integer> extractLPOS(String sentence, String term) {
		Hashtable<String, Integer> pos_count = new Hashtable<String, Integer>();
		String posSentence = POSTool.POSSentence(sentence);
		System.out.println(posSentence);
		String items[] = posSentence.split(" ");
		for (int i = 0; i < items.length; i++) {
			if (i + 1 >= items.length || i - 1 < 0)
				continue;
			String left = items[i - 1];
			String middle = items[i];
			if (parseTerm(middle).equals(term)) {
				HashUtil.countHash(pos_count, parsePOS(left));
			}
		}
		HashUtil.printHash(pos_count);
		return pos_count;
	}

	// type=1，抽取term;type=2，抽取POS
	public static Hashtable<String, Integer> extractLByTerm(String sentence, String term, int type) {
		Hashtable<String, Integer> pos_count = new Hashtable<String, Integer>();
		String posSentence = POSTool.POSSentence(sentence);
		String items[] = posSentence.split(" ");
		for (int i = 0; i < items.length; i++) {
			if (i + 1 >= items.length || i - 1 < 0)
				continue;
			String left = items[i - 1];
			String middle = items[i];
			if(middle.indexOf("/")<0)
				continue;
			if (parseTerm(middle).equals(term)) {
				if (type == 1)

					HashUtil.countHash(pos_count, parseTerm(left));
				else if (type == 2)
					HashUtil.countHash(pos_count, parsePOS(left));
			}
		}
		return pos_count;
	}

	// type=1，抽取term;type=2，抽取POS
	public static Hashtable<String, Integer> extractLByTerms(String sentence, List<String> termlist, int type) {
		Hashtable<String, Integer> pos_count = new Hashtable<String, Integer>();
		String posSentence = POSTool.POSSentence(sentence);
		String items[] = posSentence.split(" ");
		for (int i = 0; i < items.length; i++) {
			if (i + 1 >= items.length || i - 1 < 0)
				continue;
			String left = items[i - 1];
			String middle = items[i];

			for (String term : termlist) {
				if(middle.indexOf("/")<0)
					continue;
				// 如果命中中间词
				if (parseTerm(middle).equals(term)) {
					if (type == 1)
						HashUtil.countHash(pos_count, parseTerm(left));
					else if (type == 2)
						HashUtil.countHash(pos_count, parsePOS(left));
				}
			}
		}
		return pos_count;
	}

	// type=1，抽取term;type=2，抽取POS
	public static Hashtable<String, Integer> extractRByTerms(String sentence, List<String> termlist, int type) {
		Hashtable<String, Integer> pos_count = new Hashtable<String, Integer>();
		String posSentence = POSTool.POSSentence(sentence);
		System.out.println(posSentence);
		String items[] = posSentence.split(" ");
		for (int i = 0; i < items.length; i++) {
			if (i + 1 >= items.length || i - 1 < 0)
				continue;
			String middle = items[i];
			String right = items[i + 1];

			for (String term : termlist) {
				if (parseTerm(middle).equals(term)) {
					if (type == 1)
						HashUtil.countHash(pos_count, parseTerm(right));
					else if (type == 2)
						HashUtil.countHash(pos_count, parsePOS(right));
				}
			}
		}
		return pos_count;
	}

	// type=1，抽取term;type=2，抽取POS
	public static Hashtable<String, Integer> extractLRByTerms(String sentence, List<String> termlist, int type) {
		Hashtable<String, Integer> pos_count = new Hashtable<String, Integer>();
		String posSentence = POSTool.POSSentence(sentence);
		String items[] = posSentence.split(" ");
		for (int i = 0; i < items.length; i++) {
			if (i + 1 >= items.length || i - 1 < 0)
				continue;
			String left = items[i - 1];
			String middle = items[i];
			String right = items[i + 1];

			for (String term : termlist) {
				if (parseTerm(middle).equals(term)) {
					if (type == 1)
						HashUtil.countHash(pos_count, parseTerm(left) + "_" + parseTerm(right));
					else if (type == 2)
						HashUtil.countHash(pos_count, parsePOS(left) + "_" + parsePOS(right));
				}
			}
		}
		return pos_count;
	}

	// type=1，抽取term;type=2，抽取POS
	public static Hashtable<String, Integer> extractRByTerm(String sentence, String term, int type) {
		Hashtable<String, Integer> pos_count = new Hashtable<String, Integer>();
		String posSentence = POSTool.POSSentence(sentence);
		String items[] = posSentence.split(" ");
		for (int i = 0; i < items.length; i++) {
			if (i + 1 >= items.length || i - 1 < 0)
				continue;
			String middle = items[i];
			String right = items[i + 1];
			if (parseTerm(middle).equals(term)) {
				if (type == 1)
					HashUtil.countHash(pos_count, parseTerm(right));
				else if (type == 2)
					HashUtil.countHash(pos_count, parsePOS(right));
			}
		}
		return pos_count;
	}

	// type=1，抽取term;type=2，抽取POS
	public static Hashtable<String, Integer> extractLRByTerm(String sentence, String term, int type) {
		Hashtable<String, Integer> pos_count = new Hashtable<String, Integer>();
		String posSentence = POSTool.POSSentence(sentence);
		String items[] = posSentence.split(" ");
		for (int i = 0; i < items.length; i++) {
			if (i + 1 >= items.length || i - 1 < 0)
				continue;
			String left = items[i - 1];
			String middle = items[i];
			String right = items[i + 1];
			if (parseTerm(middle).equals(term)) {
				if (type == 1)
					HashUtil.countHash(pos_count, parseTerm(left) + "_" + parseTerm(right));
				else if (type == 2)
					HashUtil.countHash(pos_count, parsePOS(left) + "_" + parsePOS(right));
			}
		}
		return pos_count;
	}

	public static void main(String args[]) {
		// String src = "c:/weibo/test/source.txt";
		// String dest = "c:/weibo/test/destination.txt";
		// pairCount(src, dest, "");
		// String content = "我爱吃饭，我爱唱歌，我爱读书";
		// eg:content, "v", "n"
		// itemCount(src, dest);
		// Hashtable<String, Integer> pattern_count =
		// customizeItemCount(content, "v");
		// HashUtils.printHash(pattern_count);

		// itemCount(src, dest);
		// compAllItemCount("c:/weibo/test/item_count");

		// 抽取某词的左右词性
		// String content =
		// "我喜欢吃苹果，我喜欢吃桃子，我喜欢吃西红柿，我喜欢吃西瓜，我喜欢喝牛奶，我喜欢玩篮球,我喜欢妈妈，我喜欢爸爸，我喜欢好朋友，我喜欢书，我喜欢苹果,我爱吃苹果，我爱吃橘子，我爱学习，我爱家乡，我爱香蕉";
		// Hashtable<String, Integer> ht1=LRPOSPairCount(content, "喜欢");
		// HashUtil.printHash(ht1);
		// System.out.println();

		// 抽取某词表左右的搭配
		// String content =
		// "我热爱桃子，我热爱苹果，我讨厌品苹果，我讨厌香蕉，我讨厌西瓜，我喜欢吃苹果，我喜欢吃桃子，我喜欢吃西红柿，我喜欢吃西瓜，我喜欢喝牛奶，我喜欢玩篮球,我喜欢妈妈，我喜欢爸爸，我喜欢好朋友，我喜欢书，我喜欢苹果,我爱吃苹果，我爱吃橘子，我爱学习，我爱家乡，我爱香蕉";
		// List<String> termlist = new ArrayList<String>();
		// termlist.add("喜欢");
		// termlist.add("爱");
		// Hashtable<String, Integer> ht2=LRPOSPairCount(content, termlist);
		// HashUtil.printHash(ht2);

		// 抽取左右词满足词性的中间词
		// String content =
		// "我喜欢吃苹果，我喜欢吃桃子，我喜欢吃西红柿，我喜欢吃西瓜，我喜欢喝牛奶，我喜欢玩篮球,我喜欢妈妈，我喜欢爸爸，我喜欢好朋友，我喜欢书，我喜欢苹果,我爱吃苹果，我爱吃橘子，我爱学习，我爱家乡，我爱香蕉";
		// customizedItemCount(content,"r", "n");

		// 扩充词汇集合
		// List<String> contents = Data.getContents();
		// List<String> termlist = new ArrayList<String>();
		// termlist.add("北京大学");
		// termlist.add("清华大学");
		// termlist.add("北大");
		// termlist.add("清华");
		// expandTermList(contents, termlist, 1, 20);

		// 提取term和POS
		// String item = "我/r";
		// System.out.println(parsePOS(item));
		// System.out.println(parseTerm(item));

		// 抽取某词性左侧的POS
		// String sentence =
		// "我是一个中国人，你是一个韩国人,他是一个日本人,他是一个青岛人，他是一个吉林人，他是一个上海人，他是一个加州人,我觉得中国很美,我觉得日本很美,我觉得韩国很美";
		// List<String> poses = new ArrayList<String>();
		// poses.add("ns");
		// List<String> terms = new ArrayList<String>();
		// terms.add("北京");
		// terms.add("上海");
		// terms.add("中国");

		// 抽取某个pos的上下文
		// extractLByPOS(sentence, poses, 2);
		// extractRByPOS(sentence, poses, 1);
		// extractRByPOS(sentence, poses, 2);
		// extractLRByPOS(sentence, poses, 1);
		// extractLRByPOS(sentence, poses, 2);

		// 抽取list的公共上下文
		// Hashtable<String,Integer> lTermByTerms=extractLByTerms(sentence,
		// terms, 1);
		// extractRByTerms(sentence, terms, 1);
		// extractRByTerms(sentence, terms, 2);
		// extractLRByTerms(sentence, terms, 1);
		// extractLRByTerms(sentence, terms, 2);

		// 抽取term的公共上下文
		// extractLByTerm(sentence, "一个", 1);
		// extractLByTerm(sentence, "一个", 2);
		// extractRByTerm(sentence, "一个", 1);
		// extractRByTerm(sentence, "一个", 2);
		// extractLRByTerm(sentence, "一个", 1);
		// extractLRByTerm(sentence, "一个", 2);

	}
}
