/*******************************************************************************
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 * KIND, either express or implied.  See the License for the
 * specific language governing permissions and limitations
 * under the License.
 *******************************************************************************/
package org.ofbiz.lucenetools;

import java.io.File;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.TermVector;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryparser.classic.MultiFieldQueryParser;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.ofbiz.base.util.Debug;
import org.ofbiz.base.util.UtilMisc;
import org.ofbiz.entity.Delegator;
import org.ofbiz.entity.GenericValue;
import org.ofbiz.entity.model.ModelEntity;
import org.ofbiz.lucenetools.fire.DocumentIndexer;
import org.ofbiz.service.DispatchContext;
import org.ofbiz.service.ServiceUtil;

/**
 * SearchServices Class
 */
public class LuceneOppoServices {

    public static final String module = LuceneOppoServices.class.getName();
    public static final String resource = "ContentUiLabels";

    

//    public static Map<String, Object> indexLuceneProduct(DispatchContext dctx, Map<String, ? extends Object> context) {
//        Delegator delegator = dctx.getDelegator();
//        String productId = (String) context.get("productId");
//        DocumentIndexer indexer = DocumentIndexer.getInstance(delegator, "products");
//        indexer.queue(new ProductDocument(productId));
//        return ServiceUtil.returnSuccess();
//    }
    //
    
    public static Map<String, Object> reIndexAllLuceneOpportunity(DispatchContext dctx, Map<String, ? extends Object> context) {
    	
  	  Map result = ServiceUtil.returnSuccess();
  	  Delegator delegator = dctx.getDelegator();
  	
		IndexWriter indexWriter = null;
		try
		{
			Directory directory =  FSDirectory.open(new File(DocumentIndexer.getIndexPath("opportunity")).toPath());
			
			Analyzer analyzer = new StandardAnalyzer();
			IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzer);
			indexWriter = new IndexWriter(directory, indexWriterConfig);
			indexWriter.deleteAll();// 清除以前的index
			List<GenericValue> dbColtOpportunityInfo =	delegator.findAll("ColtOpportunity", true);
     		for(GenericValue dbColtOpportunity:dbColtOpportunityInfo) {
     			//String dataSourceId=dbColtOpportunity.getString("dataSourceId");
     			
     			Document document = new Document();
    			//document.add(new Field("dataSourceId" , dataSourceId, TextField.TYPE_STORED));
    			//https://gitee.com/Myzhang/luceneplus/blob/master/src/main/java/com/ld/lucenex/core/MyDocument.java
    			 //TODO 判断字段类型  例如数字 还有日期
    			 for(String fieldName:dbColtOpportunity.getAllKeys()) {
    				 String fieldValue =(String) dbColtOpportunity.get(fieldName);
    				 document.add(new Field(fieldName, fieldValue, TextField.TYPE_STORED));
    			 }
    			
    			indexWriter.addDocument(document);
     		
     		
     		}
			
			
		}
		catch (Exception e)
		{
			e.printStackTrace();
			Debug.logError(e.getMessage(), module);
		}
		finally
		{
			try
			{
				if(indexWriter != null) indexWriter.close();
			}
			catch (Exception e)
			{
				e.printStackTrace();
				Debug.logError(e.getMessage(), module);
			}
		}
		return result;
	}
    public static Map<String, Object> indexLuceneOpportunity(DispatchContext dctx, Map<String, ? extends Object> context) {
	
    	  Map result = ServiceUtil.returnSuccess();
    	  Delegator delegator = dctx.getDelegator();
    	  String dataSourceId = (String) context.get("dataSourceId");
		IndexWriter indexWriter = null;
		try
		{
			Directory directory =  FSDirectory.open(new File(DocumentIndexer.getIndexPath("opportunity")).toPath());
			
			Analyzer analyzer = new StandardAnalyzer();
			IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzer);
			indexWriter = new IndexWriter(directory, indexWriterConfig);
			//indexWriter.deleteAll();// 清除以前的index
			
			
			Document document = new Document();
			
			document.add(new Field("dataSourceId" , dataSourceId , Field.Store.YES , Field.Index.NOT_ANALYZED , TermVector.YES));

//			document.add(new Field("id", "1", TextField.TYPE_STORED));
		
			
			 ModelEntity modelEntity = delegator.getModelEntity("ColtOpportunity");
			 List<String> noPkFieldNames =modelEntity.getNoPkFieldNames();
			 //TODO 判断字段类型  例如数字 还有日期
			 for(String fieldName:noPkFieldNames) {
				 String fieldValue =(String) context.get(fieldName);
				 document.add(new Field(fieldName, fieldValue, TextField.TYPE_STORED));
				 //document.add(new Field("title", "android 上海 中国", TextField.TYPE_STORED));
			 }
			Term id=new Term("dataSourceId",dataSourceId);
            indexWriter.updateDocument(id,document);
			//indexWriter.addDocument(document);
			
		}
		catch (Exception e)
		{
			e.printStackTrace();
			Debug.logError(e.getMessage(), module);
		}
		finally
		{
			try
			{
				if(indexWriter != null) indexWriter.close();
			}
			catch (Exception e)
			{
				e.printStackTrace();
				Debug.logError(e.getMessage(), module);
			}
		}
		return result;
	}
	
	/**
	 * 搜索
	 */
    public static Map<String, Object> searchLuceneOpportunity(DispatchContext dctx, Map<String, ? extends Object> context) {
        Delegator delegator = dctx.getDelegator();
      //  String productId = (String) context.get("productId");
        Map result = ServiceUtil.returnSuccess();
//        String pageIndex = (String) context.get("pageIndex");
//        String pageSize = (String) context.get("pageSize");
        
        String keyWord = (String) context.get("keyWord");
		DirectoryReader directoryReader = null;
		try
		{
			// 1、创建Directory
			Directory directory =  FSDirectory.open(new File(DocumentIndexer.getIndexPath("products")).toPath());
			// 2、创建IndexReader
			directoryReader = DirectoryReader.open(directory);
			// 3、根据IndexReader创建IndexSearch
			IndexSearcher indexSearcher = new IndexSearcher(directoryReader);
			// 4、创建搜索的Query
			 Analyzer analyzer = new StandardAnalyzer();
			
			// 简单的查询，创建Query表示搜索域为content包含keyWord的文档
			//Query query = new QueryParser("content", analyzer).parse(keyWord);
			
			String[] fields = {"title", "content", "tag"};
			// MUST 表示and，MUST_NOT 表示not ，SHOULD表示or
			BooleanClause.Occur[] clauses = {BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD};
			// MultiFieldQueryParser表示多个域解析， 同时可以解析含空格的字符串，如果我们搜索"上海 中国" 
			Query multiFieldQuery = MultiFieldQueryParser.parse(keyWord, fields, clauses, analyzer);
			
			// 5、根据searcher搜索并且返回TopDocs
			TopDocs topDocs = indexSearcher.search(multiFieldQuery, 200); // 搜索前200条结果
			Debug.log("共找到匹配处：" + topDocs.totalHits);
			// 6、根据TopDocs获取ScoreDoc对象
			ScoreDoc[] scoreDocs = topDocs.scoreDocs;
			Debug.log("共找到匹配文档数：" + scoreDocs.length);
			List nfsList=new ArrayList();
			for (ScoreDoc scoreDoc : scoreDocs)
			{
				// 7、根据searcher和ScoreDoc对象获取具体的Document对象
				Document document = indexSearcher.doc(scoreDoc.doc);
//				System.out.println("文章标题："+document.get("title"));
//				System.out.println("文章内容：" + document.get("content"));

				String productId = document.get("productId");
				GenericValue letProduct =delegator.findOne("LetProduct", true, UtilMisc.toMap("productId", productId));
				nfsList.add(letProduct);
				// 8、根据Document对象获取需要的值
			}
			result.put("nfsList", nfsList);
		}
		catch (Exception e)
		{
			e.printStackTrace();
			Debug.logError(e.getMessage(), module);
		}
		finally
		{
			try
			{
				if(directoryReader != null) directoryReader.close();
			}
			catch (Exception e)
			{
				e.printStackTrace();
				Debug.logError(e.getMessage(), module);
			}
		}
		
		
		return result;
	}

   
    

}
