package com.zhenyi.api;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.StringReader;
import java.util.List;

import org.lionsoul.jcseg.extractor.impl.TextRankKeyphraseExtractor;
import org.lionsoul.jcseg.extractor.impl.TextRankKeywordsExtractor;
import org.lionsoul.jcseg.tokenizer.core.ADictionary;
import org.lionsoul.jcseg.tokenizer.core.DictionaryFactory;
import org.lionsoul.jcseg.tokenizer.core.ISegment;
import org.lionsoul.jcseg.tokenizer.core.IWord;
import org.lionsoul.jcseg.tokenizer.core.JcsegException;
import org.lionsoul.jcseg.tokenizer.core.JcsegTaskConfig;
import org.lionsoul.jcseg.tokenizer.core.SegmentFactory;

import cn.hutool.core.lang.Singleton;

public class FenciUtils {
	private JcsegTaskConfig config;
	private ADictionary dic;

	public static FenciUtils instance() {
		FenciUtils utils = Singleton.get(FenciUtils.class);
		return utils;
	}

	public FenciUtils() throws NumberFormatException, FileNotFoundException, IOException {
		config = new JcsegTaskConfig(true);
		dic = DictionaryFactory.createSingletonDictionary(config, true);
		dic.load(new File("lexicon/lex-fname.lex"));
		dic.load(new File("lexicon/lex-yitai.lex"));
	}

	public String run(String text) {
		try {
			ISegment seg = SegmentFactory.createJcseg(JcsegTaskConfig.SEARCH_MODE, new Object[] { config, dic });
			// 设置要分词的内容
			String str = text;
			seg.reset(new StringReader(str));
			TextRankKeywordsExtractor extractor = new TextRankKeywordsExtractor(seg);
			extractor.setMaxIterateNum(100); // 设置pagerank算法最大迭代次数，非必须，使用默认即可
			extractor.setWindowSize(5); // 设置textRank计算窗口大小，非必须，使用默认即可
			extractor.setKeywordsNum(15); // 设置最大返回的关键词个数，默认为10
			List<String> keywords = extractor.getKeywords(new StringReader(str));
			String result = "";
			for (String string : keywords) {
				result = result + string + ",";
			}
			return result;
		} catch (Exception e) {
			return "";
		}
	}

	public static String fenci(String str, int type) throws JcsegException, IOException {
		JcsegTaskConfig config = new JcsegTaskConfig(true);
		ADictionary dic = DictionaryFactory.createSingletonDictionary(config, true);
		dic.load(new File("lexicon/lex-fname.lex"));
		dic.load(new File("lexicon/lex-yitai.lex"));
		// for (String path : config.getLexiconPath()) {
		// dic.loadDirectory(path);
		// System.out.println(path);
		// }
		ISegment seg = SegmentFactory.createJcseg(type, new Object[] { config, dic });
		// 设置要分词的内容
		seg.reset(new StringReader(str));
		TextRankKeywordsExtractor extractor = new TextRankKeywordsExtractor(seg);
		extractor.setMaxIterateNum(100); // 设置pagerank算法最大迭代次数，非必须，使用默认即可
		extractor.setWindowSize(5); // 设置textRank计算窗口大小，非必须，使用默认即可
		extractor.setKeywordsNum(15); // 设置最大返回的关键词个数，默认为10
		List<String> keywords = extractor.getKeywords(new StringReader(str));
		String result = "";
		for (String string : keywords) {
			System.out.println(string + " √");
			result = result + string + ",";
		}
		return result;
	}

	public static void wordByType(String str, int type) throws JcsegException, IOException {
		JcsegTaskConfig config = new JcsegTaskConfig(true);
		ADictionary dic = DictionaryFactory.createSingletonDictionary(config, true);
		dic.load(new File("lexicon/lex-fname.lex"));
		dic.load(new File("lexicon/lex-yitai.lex"));
		// for (String path : config.getLexiconPath()) {
		// dic.loadDirectory(path);
		// System.out.println(path);
		// }
		ISegment seg = SegmentFactory.createJcseg(type, new Object[] { config, dic });

		// 设置要分词的内容
		seg.reset(new StringReader(str));
		// 获取分词结果
		IWord word = null;
		while ((word = seg.next()) != null) {
			System.out.println(word.getValue());
		}
		System.out.println("------------");

		TextRankKeywordsExtractor extractor = new TextRankKeywordsExtractor(seg);
		extractor.setMaxIterateNum(100); // 设置pagerank算法最大迭代次数，非必须，使用默认即可
		extractor.setWindowSize(5); // 设置textRank计算窗口大小，非必须，使用默认即可
		extractor.setKeywordsNum(15); // 设置最大返回的关键词个数，默认为10
		List<String> keywords = extractor.getKeywords(new StringReader(str));
		for (String string : keywords) {
			System.out.println(string + " √");
		}
		System.out.println("************");
		TextRankKeyphraseExtractor extractor2 = new TextRankKeyphraseExtractor(seg);
		extractor2.setMaxIterateNum(100); // 设置pagerank算法最大迭代词库，非必须，使用默认即可
		extractor2.setWindowSize(5); // 设置textRank窗口大小，非必须，使用默认即可
		extractor2.setKeywordsNum(10); // 设置最大返回的关键词个数，默认为10
		extractor2.setMaxWordsNum(5);
		List<String> keyphrases = extractor2.getKeyphrase(new StringReader(str));
		for (String string : keyphrases) {
			System.out.println(string);
		}
		System.out.println("============\n");

	}

	public static void keywords(String str, int type) throws JcsegException, IOException {
		JcsegTaskConfig config = new JcsegTaskConfig(true);
		config.setClearStopwords(true); // 设置过滤停止词
		config.setAppendCJKSyn(false); // 设置关闭同义词追加
		config.setKeepUnregWords(false); // 设置去除不识别的词条
		ADictionary dic = DictionaryFactory.createSingletonDictionary(config);
		ISegment seg = SegmentFactory.createJcseg(type, new Object[] { config, dic });

		// 2, 构建TextRankKeywordsExtractor关键字提取器
		TextRankKeywordsExtractor extractor = new TextRankKeywordsExtractor(seg);
		extractor.setMaxIterateNum(100); // 设置pagerank算法最大迭代次数，非必须，使用默认即可
		extractor.setWindowSize(5); // 设置textRank计算窗口大小，非必须，使用默认即可
		extractor.setKeywordsNum(10); // 设置最大返回的关键词个数，默认为10

		// 3, 从一个输入reader输入流中获取关键字
		// String str =
		// "现有的分词算法可分为三大类：基于字符串匹配的分词方法、基于理解的分词方法和基于统计的分词方法。按照是否与词性标注过程相结合，又可以分为单纯分词方法和分词与标注相结合的一体化方法。";
		List<String> keywords = extractor.getKeywords(new StringReader(str));
		for (String string : keywords) {
			System.out.println(string);
		}
		System.out.println("------------");
	}
}
