/**
 * 
 */
package cn.jhz.filesharingsystem.dao;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException;
import org.springframework.stereotype.Repository;

import cn.jhz.filesharingsystem.model.FileArticle;
import cn.jhz.filesharingsystem.model.Pager;
import cn.jhz.filesharingsystem.model.SharedFile;
import cn.jhz.filesharingsystem.model.SystemContext;
import cn.jhz.filesharingsystem.util.LuceneUtil;

/**
 * IndexDao的实例,通过LuceneUtil工具类获取IndexWriter实例实现对索引库的增加,删除,修改
  *  声明为Spring的Repository组件
 * @author asus
 */
@Repository("indexDao")
public class IndexDaoImpl implements IndexDao{
	

	@Override
	public void addIndex(SharedFile index) throws IOException {
		//一个Document实例代表一条记录
		Document doc = new Document();
		/**
		 * StringField不会对关键字进行分词
		 * Store.YES：会对数据进行存储并分词，如果为NO则不会对数据进行存储，索引还是会创建 
		 * */
		
		doc.add(new StringField("articleId", index.getIndex(), Store.YES));
		doc.add(new TextField("fileName", index.getFileName().substring(0, index.getFileName().lastIndexOf(".")), Store.YES));
		doc.add(new TextField("size", String.valueOf(index.getFileSize()), Store.YES));
		doc.add(new TextField("type", index.getFileType(), Store.YES));
		doc.add(new TextField("provider", index.getProvider().getUsername(), Store.YES));
		doc.add(new TextField("notes", index.getNotes(), Store.YES));
		doc.add(new TextField("link", "/filesharingsystem/files?id="+index.getId(), Store.YES));
		
		IndexWriter writer = LuceneUtil.getIndexWriter();
		writer.addDocument(doc);
		writer.commit();
	}

	@Override
	public void deleteIndex(String id) throws IOException {
		IndexWriter writer = LuceneUtil.getIndexWriter();
		writer.deleteDocuments(new Term("articleId",id));
		writer.commit();
	}

	@Override
	public void updateIndex(SharedFile index) throws IOException {
		//一个Document实例代表一条记录
		Document doc = new Document();
		/**
		 * StringField不会对关键字进行分词
		 * Store.YES：会对数据进行存储并分词，如果为NO则不会对数据进行存储，索引还是会创建
		 *   
		 * */
		doc.add(new StringField("articleId", index.getIndex(), Store.YES));
		doc.add(new TextField("fileName", index.getFileName().substring(0, index.getFileName().lastIndexOf(".")), Store.YES));
		doc.add(new TextField("size", String.valueOf(index.getFileSize()), Store.YES));
		doc.add(new TextField("type", index.getFileType(), Store.YES));
		doc.add(new TextField("provider", index.getProvider().getUsername(), Store.YES));
		doc.add(new TextField("notes", index.getNotes(), Store.YES));
		doc.add(new TextField("link", "/filesharingsystem/files?id="+index.getId(), Store.YES));
		
		IndexWriter writer = LuceneUtil.getIndexWriter();
		writer.updateDocument(new Term("articleId", index.getIndex()), doc);
		writer.commit();
	}
	
	/**
	  * 查询多条索引,支持分页,返回结果封装在Pager<T>模型类
	 */
	@Override
	public Pager<FileArticle> getSearchPage(Query query, IndexSearcher indexSearcher) throws IOException, InvalidTokenOffsetsException {
		
		TopDocs topDocs = indexSearcher.search(query, 1000);
		ScoreDoc[] scoreDocs = topDocs.scoreDocs;
		
		Pager<FileArticle> pager = new Pager<>();
		List<FileArticle> articles = new ArrayList<>();
		
		
		int endSize = pager.getSize() < scoreDocs.length ? scoreDocs.length:(pager.getOffset()*pager.getSize());
		

		
		setPager(pager);
		pager.setTotal(scoreDocs.length);
		
		
		FileArticle fileArticle;
		Document document;
		Analyzer analyzer =LuceneUtil.getAnalyzer();
		Highlighter titleHighlighter = LuceneUtil.getHighlighter(query,31);
		Highlighter contentHighlighter = LuceneUtil.getHighlighter(query,80);
		for(int i= pager.getOffset(); i<endSize; i++) {
			fileArticle = new FileArticle();
			document = indexSearcher.doc(scoreDocs[i].doc);	
			
			String title = titleHighlighter.getBestFragment(analyzer, "fileName", document.get("fileName").length() > 31?document.get("fileName")+"...":document.get("fileName"));
			String content = contentHighlighter.getBestFragment(analyzer, "notes", document.get("notes").length() > 31?document.get("notes")+"...":document.get("notes"));
			
//			String title = titleHighlighter.getBestFragment(analyzer, "fileName", document.get("fileName"));
//			String content = contentHighlighter.getBestFragment(analyzer, "notes", document.get("notes"));
			
			fileArticle.articleId = document.get("articleId");
			fileArticle.fileName = title == null ? document.get("fileName").length()<=31 ? document.get("fileName"):document.get("fileName").substring(0, 31)+"..." : title;
			fileArticle.size =  document.get("size");
			fileArticle.notes = content == null ? document.get("notes").length()<=80 ? document.get("notes"):document.get("notes").substring(0, 80)+"..." : content;
			fileArticle.type = document.get("type");
			fileArticle.provider = document.get("provider");
			fileArticle.link = document.get("link");
			
			articles.add(fileArticle);
		}
		
		pager.setRows(articles);
		
		return pager;
	}

	
	/**
	  *  设置查询的起始页,页数
	 * @param pager
	 */
	private void setPager(Pager<FileArticle> pager) {
		// TODO Auto-generated method stub
		Integer pageSize = SystemContext.getPageSize();
		Integer pageOffset = SystemContext.getPageOffset();
		
		if(pageOffset == null|| pageOffset < 0) {
				pageOffset = 0;  //pageOffset默认值
		}
		if(pageSize == null|| pageSize < 0) {
				pageSize = 10; //没有设置每页大小,默认每页显示10条
		}
		pager.setOffset(pageOffset);
		pager.setSize(pageSize);		
	}
	
}
