package com.wing.lucene;

import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.apache.html.dom.HTMLDocumentImpl;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.util.Version;
import org.cyberneko.html.parsers.DOMFragmentParser;
import org.w3c.dom.DocumentFragment;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.wltea.analyzer.lucene.IKAnalyzer;
import org.wltea.analyzer.lucene.IKQueryParser;
import org.wltea.analyzer.lucene.IKSimilarity;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;

import com.wing.common.util.ApplicationPath;

/**
 * 管理文章时，对索引的操作
 * 
 * @author wyf
 * 
 */
public class ArticleIndex {

	/**
	 * 
	 * 添加索引
	 * 
	 */
	public void addIndex(String articleid, String art_name, String summary,
			String keyword, String author, String content, String siteId,boolean isOptimize) {
		try {
			File indexpath = new File(ApplicationPath.getParameter("appRoot")
					+ "/lucene/articleIndex");
			Directory directory = FSDirectory.open(indexpath);
			Analyzer analyzer = new IKAnalyzer(false);
			IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_33,
					analyzer);
			if (!indexpath.exists()) {
				iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
			} else {
				iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
			}
			IndexWriter writer;
			writer = new IndexWriter(directory, iwc);
			writer.addDocument(buildDoc(articleid, art_name, summary, keyword,
					author, extractTextFromHTML(content), siteId));
			writer.commit();
			if(isOptimize){
				writer.optimize();
			}
			writer.close();
		} catch (CorruptIndexException e) {
			e.printStackTrace();
		} catch (LockObtainFailedException e) {
			e.printStackTrace();
		} catch (IOException e) {
			e.printStackTrace();
		}

	}

	public void deleteIndexByArtId(String articleid) {
		try {
			Term term = new Term("articleid", articleid);
			File indexpath = new File(ApplicationPath.getParameter("appRoot")
					+ "/lucene/articleIndex");
			if(!indexpath.exists())return;
			Directory directory = FSDirectory.open(indexpath);
			IndexReader reader = IndexReader.open(directory, false);
			reader.deleteDocuments(term);
			reader.close();

		} catch (CorruptIndexException e) {
			e.printStackTrace();
		} catch (LockObtainFailedException e) {
			e.printStackTrace();
		} catch (IOException e) {
			e.printStackTrace();
		}
	}
	
	public void deleteIndexBySiteId(String siteId) {
		try {
			Term term = new Term("siteId", siteId);
			File indexpath = new File(ApplicationPath.getParameter("appRoot")
					+ "/lucene/articleIndex");
			if(!indexpath.exists())return;
			Directory directory = FSDirectory.open(indexpath);
			IndexReader reader = IndexReader.open(directory, false);
			reader.deleteDocuments(term);
			reader.close();

		} catch (CorruptIndexException e) {
			e.printStackTrace();
		} catch (LockObtainFailedException e) {
			e.printStackTrace();
		} catch (IOException e) {
			e.printStackTrace();
		}
	}

	public Map searchArticle(String keyword, int pageNo, int pageSize,
			String siteId) throws IOException, ParseException {
		Map mapResult = new HashMap();
		List<String> sl = new ArrayList<String>();
		if (keyword == null || "".equals(keyword)) {
			return mapResult;
		}
		IndexSearcher searcher = new IndexSearcher(FSDirectory.open(new File(
				ApplicationPath.getParameter("appRoot")
						+ "/lucene/articleIndex")));
		searcher.setSimilarity(new IKSimilarity());

		System.out.println("索引里的文章数"+searcher.maxDoc());
		
		// 多字段，单条件查询
		Analyzer aStandardAnalyzer = new IKAnalyzer();
		
		QueryParser parser = new QueryParser(Version.LUCENE_33, "art_name",
				aStandardAnalyzer);
		parser.setDefaultOperator(QueryParser.OR_OPERATOR);// 设置检索的条件.OR_OPERATOR表示"或"
		Query query0 = parser.parse(keyword);
		
		QueryParser querysummary = new QueryParser(Version.LUCENE_33,
				"summary", aStandardAnalyzer);// 检索content列
		querysummary.setDefaultOperator(QueryParser.OR_OPERATOR);// 设置检索的条件.OR_OPERATOR表示"或"
		Query query1 = querysummary.parse(keyword);
 
		QueryParser querykeyword = new QueryParser(Version.LUCENE_33,
				"keyword", aStandardAnalyzer);// 检索content列
		querykeyword.setDefaultOperator(QueryParser.OR_OPERATOR);// 设置检索的条件.OR_OPERATOR表示"或"
		Query query2 = querykeyword.parse(keyword);
		
		QueryParser queryauthor = new QueryParser(Version.LUCENE_33,
				"author", aStandardAnalyzer);// 检索content列
		queryauthor.setDefaultOperator(QueryParser.OR_OPERATOR);// 设置检索的条件.OR_OPERATOR表示"或"
		Query query3 = queryauthor.parse(keyword);
		
		QueryParser querycontent = new QueryParser(Version.LUCENE_33,
				"content", aStandardAnalyzer);// 检索content列
		querycontent.setDefaultOperator(QueryParser.OR_OPERATOR);// 设置检索的条件.OR_OPERATOR表示"或"
		Query query4 = querycontent.parse(keyword);
		

		BooleanQuery m_BooleanQuery = new BooleanQuery();// 得到一个组合检索对象

		BooleanQuery bq = new BooleanQuery();
		bq.add(query0, BooleanClause.Occur.SHOULD);
		bq.add(query1, BooleanClause.Occur.SHOULD);
		bq.add(query2, BooleanClause.Occur.SHOULD);
		bq.add(query3, BooleanClause.Occur.SHOULD);
		bq.add(query4, BooleanClause.Occur.SHOULD);

		m_BooleanQuery.add(bq, BooleanClause.Occur.MUST);
		m_BooleanQuery.add(IKQueryParser.parse("siteId",siteId), BooleanClause.Occur.MUST);

		// 执行查询 。Filter: 用来过虑搜索结果的对象 n指的是最多返回的Document的数量 。 //搜索相似度最高的n条记录
		TopDocs ts = searcher.search(m_BooleanQuery, 500);
		int totalHits = ts.totalHits; // 获取命中数
		//System.out.println("命中数：" + totalHits);
		// 获取命中的文档信息对象 查询结果信息 。 它包括符合条件的Document的内部编号(doc)及评分(score) 。
		ScoreDoc[] hits = ts.scoreDocs; // 老版本中 Hits --length
		mapResult.put("count", hits.length);
		for (int i = 0; i < hits.length; i++) {
			if(i < pageSize*(pageNo-1))continue;
			if(i >= pageSize*pageNo)break;
			// 根据命中的文档的内部编号获取该文档 老版本中 Hits --doc(n)
			Document hitDoc = searcher.doc(hits[i].doc);
			// 输出该文档指定域的值 老版本中 Hits --id(n)
			sl.add(hitDoc.get("articleid"));
			//System.out.println(hitDoc.get("articleid") + "^^^^^^^^"+ hitDoc.get("art_name"));
			// 以上的方法需要在文档还没有被放入缓存之前，就将其从索引中读取出来。因此我们建议你，
			// 若非真正需要显示或者访问这些文档，就不要调用这些方法。
		}
		searcher.close();
		mapResult.put("artids", sl);
		return mapResult;
	}

	/**
	 * 从html中抽取纯文本
	 * 
	 * @param content
	 * @return
	 * @throws UnsupportedEncodingException
	 */
	private String extractTextFromHTML(String content)
			throws UnsupportedEncodingException {
		if (content == null || content.trim().length() == 0) {
			return "";
		}
		content = content.replaceAll("&nbsp;", "");
		DOMFragmentParser parser = new DOMFragmentParser();
		DocumentFragment node = new HTMLDocumentImpl().createDocumentFragment();
		InputStream is = new ByteArrayInputStream(content.getBytes());
		try {
			parser.parse(new InputSource(is), node);
		} catch (IOException e) {
			e.printStackTrace();
		} catch (SAXException se) {
			se.printStackTrace();
		}

		StringBuffer newContent = new StringBuffer();
		this.getText(newContent, node);

		String str = (new String(
				newContent.toString().getBytes("Windows-1252"), "GBK"));
		return str;
	}

	private void getText(StringBuffer sb, Node node) {
		if (node.getNodeType() == Node.TEXT_NODE) {
			sb.append(node.getNodeValue());
		}
		NodeList children = node.getChildNodes();
		if (children != null) {
			int len = children.getLength();
			for (int i = 0; i < len; i++) {
				getText(sb, children.item(i));
			}
		}
	}

	private Document buildDoc(String articleid, String art_name,
			String summary, String keyword, String author, String content,
			String siteId) {
		Document doc = new Document();
		doc.add(new Field("articleid", articleid, Field.Store.YES,
				Field.Index.NOT_ANALYZED));
		doc.add(new Field("siteId", siteId, Field.Store.YES,
				Field.Index.NOT_ANALYZED));
		doc.add(new Field("art_name", art_name, Field.Store.NO,
				Field.Index.ANALYZED));
		doc.add(new Field("summary", summary, Field.Store.NO,
				Field.Index.ANALYZED));
		doc.add(new Field("keyword", keyword, Field.Store.NO,
				Field.Index.ANALYZED));
		doc.add(new Field("author", author, Field.Store.NO,
				Field.Index.ANALYZED));
		doc.add(new Field("content", content, Field.Store.NO,
				Field.Index.ANALYZED));
		return doc;
	}

	public static void main(String[] args) {
		ArticleIndex index = new ArticleIndex();
//		 index.addIndex("1", "中华人民共和国", "是一个结合词典分词和文法分词", "", "", "", "1");
//		 index.addIndex("2", "中华人民共和国", "是一个结合词典分词和文法分词帝国真好", "", "", "","1");
//		 index.addIndex("3", "中华人民共和国", "是一个结合词典分词和文法分词真好大厦", "", "", "", "1");
//		 index.addIndex("4", "中华人民共和国", "是一个结合词典分词和文法分词大厦", "", "", "", "1");
//		 index.addIndex("5", "中华人民共和国", "啊", "", "", "", "1");
//		 index.addIndex("6", "中华人民共和国", "", "", "", "", "1");
//		 index.addIndex("7", "中华人民共和国", "", "", "", "", "1");
//		 index.addIndex("8", "中华人民共和国", "", "", "", "", "1");
//		 index.addIndex("9", "中华人民共和国", "", "", "", "", "1");
		 
//		 index.addIndex("1", "中华人民共和国", "", "", "", "", "00000001a0013225c35aa8");
//		 index.addIndex("2", "", "中华人民共和国万岁", "", "", "","00000001a0013225c35aa8");
//		 index.addIndex("3", "", "", "中华人民共和国万岁", "", "", "00000001a0013225c35aa8");
//		 index.addIndex("4", "", "", "", "中华人民共和国万岁", "", "00000001a0013225c35aa8");
//		 index.addIndex("5", "", "", "", "", "中华人民共和国万岁", "00000001a0013225c35aa8");
//		 index.addIndex("6", "中华人民共和国万岁", "", "", "", "", "0000000200132244fe4f");
//		 index.addIndex("6", "", "中华人民共和国万岁", "", "", "", "0000000200132244fe4f");
		try {
			Map li = index.searchArticle("朋友", 1, 15, "0000000500130c066bb0");
			System.out.println(li.get("count"));
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}

	}
}
