package cn.edu.scut.suggestion.corpus;

/**
 * 用于生成本地语料库，以及为相关的数据库插入数据
 * @author Tian.yuchen
 *  
 */

import java.util.Vector;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.index.TermEnum;

public class BuildMain {
	public static int defaultNumTerms = 500000;

	
	/*为了防止意外执行，重建语料库，所以注释掉，需要执行时再去掉注释
   
	public static void main(String[] args) throws Exception {

		String path = "/home/search/data/TenUniversities/univerSite-index-data4/uestc/index";
		File indexFile = new File(path);
		Directory dir = FSDirectory.open(indexFile);
		TermInfo[] terms = getTerms(IndexReader.open(dir), defaultNumTerms,
				new String[] { "content" });

		FileDBWriter fwDbWriter = new FileDBWriter(new File(
				"/home/tian.yuchen/querysuggestion_data/corpus.db"), terms);
		terms = fwDbWriter.write();

		MysqlHelper mysqlHelper = new MysqlHelper();
		mysqlHelper.clearDB();
		int numDoc = getNumDocs(IndexReader.open(dir));
		int maxfreq = getMaxFreq(terms);
		for (int i = 0; i < terms.length; i++) {
			terms[i].normaldocfreq = (double) terms[i].docFreq/(double) maxfreq;
			terms[i].idf = Math.log(numDoc / terms[i].docFreq);
			mysqlHelper.insert(terms[i].term.text(), terms[i].docFreq,
					terms[i].normaldocfreq, terms[i].idf, terms[i].pos);

		}
	}
	*/

	public static int getNumDocs(IndexReader reader) {
		return reader.numDocs();
	}

	public static int getMaxFreq(TermInfo[] terms) {
		int max = 0;
		for (TermInfo termInfo : terms) {
			if (termInfo.docFreq > max) {
				max = termInfo.docFreq;
			}
		}
		return max;
	}

	public static TermInfo[] getTerms(IndexReader reader, int numTerms,
			String[] fields) throws Exception {
		if (reader == null || fields == null)
			return null;
		Vector<TermInfo> tvec = new Vector<TermInfo>();
		TermEnum terms = reader.terms();

		while (terms.next()) {
			String field = terms.term().field();
			if (tvec.size() >= numTerms)
				break;
			// System.out.println(field);
			if (fields != null && fields.length > 0) {
				boolean skip = true;
				for (int i = 0; i < fields.length; i++) {
					if (field.equals(fields[i])) {
						skip = false;
						break;
					}
				}
				if (skip)
					continue;
			}
			if (isIllegaleWord(terms.term().text()))
				continue;
			Vector<Integer> termvecs = new Vector<Integer>();
			TermDocs tDocs = reader.termDocs(terms.term());
			while (tDocs.next()) {
				termvecs.add(tDocs.doc());
			}
			tvec.add(new TermInfo(terms.term(), terms.docFreq(), termvecs));

		}
		TermInfo[] res = new TermInfo[tvec.size()];
		for (int i = 0; i < res.length; i++) {
			res[i] = (TermInfo) tvec.get(i);
		}
		return res;
	}

	public static boolean isIllegaleWord(String word) {
		String pstr = "[\u4e00-\u9fa5]+";
		Pattern p = Pattern.compile(pstr);
		Matcher m = p.matcher(word);
		return !m.find();
	}
}
