package com.yaoandw.lucene;

import java.io.File;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Date;

import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.StoredField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LogDocMergePolicy;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;

import com.chenlb.mmseg4j.analysis.MMSegAnalyzer;
import com.yaoandw.crawl.MyCrawl;

public class PageIndexer {

	private IndexWriter writer;

	public PageIndexer(String indexDir) throws IOException {
		File file = new File(indexDir);
		if (!file.exists())
			file.mkdirs();
		Directory dir = FSDirectory.open(file);
		
		IndexWriterConfig writerConfig = new IndexWriterConfig(Version.LUCENE_42,
				new MMSegAnalyzer());
		writerConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
		LogDocMergePolicy mergePolicy = new LogDocMergePolicy();
		mergePolicy.setMergeFactor(100);//达到100个doc时 合并
		mergePolicy.setMinMergeDocs(100);//缓存量ram，增大这2个参数的值会提高性能，但也会使用更多内存
		writerConfig.setMergePolicy(mergePolicy);
		writer = new IndexWriter(dir, writerConfig);
		
		 // -----------------   
        // 在索引库没有建立并且没有索引文件的时候首先要commit一下让他建立一个  
        // 索引库的版本信息  
        writer.commit();   
        // -----------------   
	}

	public void close() throws IOException {
		writer.close();
	}
	public int index(Page page) throws Exception {
		Document doc = getDocument(page);
		writer.addDocument(doc);
		return writer.numDocs();
	}
	public void deleteIndex(String url) throws CorruptIndexException, IOException{
		writer.deleteDocuments(new Term("fullpath",url));
	}
	public int updateIndex(Page page) throws Exception{
		Document doc = getDocument(page);
		writer.updateDocument(new Term("fullpath",page.getUrl()), doc);
		return writer.numDocs();
	}
	SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
    private String format(Date date){
    	if(date != null)
    		return sdf.format(date);
    	return "";
    }
	protected Document getDocument(Page page) throws Exception {
		Document doc = new Document();//System.out.println(page.getContent());System.out.println();
//		doc.add(new Field("contents", new StringReader(page.getContent())));
//		doc.add(new Field("contents", page.getContent(), Field.Store.YES,Field.Index.ANALYZED));
//		doc.add(new Field("title", page.getTitle(), Field.Store.YES,Field.Index.ANALYZED));
//		doc.add(new Field("crawltime", format(page.getCrawlTime()), Field.Store.YES,
//				Field.Index.NOT_ANALYZED));//Field.Index.ANALYZED 会加入索引
//		doc.add(new Field("fullpath", page.getUrl(), Field.Store.YES,
//				Field.Index.NOT_ANALYZED));
//		if(page instanceof TxtPage){
//			doc.add(new Field("txtpath", ((TxtPage)page).getTxtFileUrl(), Field.Store.YES,
//					Field.Index.NOT_ANALYZED));
//		}
		

		doc.add(new Field("contents", page.getContent(), TextField.TYPE_NOT_STORED));
		doc.add(new Field("title", page.getTitle(), TextField.TYPE_STORED));
		doc.add(new Field("crawltime", format(page.getCrawlTime()), StoredField.TYPE));
		doc.add(new Field("fullpath", page.getUrl(), StoredField.TYPE));
		if(page instanceof TxtPage){
			doc.add(new Field("txtpath", ((TxtPage)page).getTxtFileUrl(), StoredField.TYPE));
		}
		return doc;
	}
	public static void main(String[] args) throws IOException{
		PageIndexer pageIndexer = new PageIndexer(MyCrawl.indexDir);
		pageIndexer.deleteIndex("http://192.168.1.186/test/index.html");
		pageIndexer.close();
	}
}
