package com.sduty.lucene.index;

import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.StringReader;
import java.nio.file.FileSystems;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.index.IndexableFieldType;
import org.apache.lucene.index.MultiDocValues;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.store.FSDirectory;
import org.wltea.analyzer.lucene.IKAnalyzer;

import com.sduty.lucene.FileRW;

import opennlp.tools.ml.TrainerFactory.TrainerType;

/**
 * 创建索引
 * @author ygsong.abcft
 *
 */
public class IndexHandler {
	
	Map<String, IndexableFieldType> field;
	
	public IndexHandler() {
		field = new HashMap<>();
		FieldType id = new FieldType();
		id.setIndexOptions(IndexOptions.DOCS);
		id.setStored(true);
		field.put("id", id);
		
		FieldType title = new FieldType();
		title.setTokenized(true);
		title.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
		title.setStored(true);
		title.setStoreTermVectorOffsets(true);
		title.setStoreTermVectorPayloads(true);
		title.setStoreTermVectorPositions(true);
		title.setStoreTermVectors(true);
		field.put("title", title);
		FieldType publish_at = new FieldType();
		publish_at.setIndexOptions(IndexOptions.DOCS);
		publish_at.setStored(true);
		field.put("publish_at", publish_at);
		FieldType industry_id = new FieldType();
		industry_id.setIndexOptions(IndexOptions.DOCS);
		industry_id.setDocValuesType(DocValuesType.SORTED_SET);
		industry_id.setStored(true);
		field.put("industry_id", industry_id);
		
		FieldType industry = new FieldType();
		industry.setIndexOptions(IndexOptions.DOCS);
		industry.setStored(true);
		field.put("industry", industry);
		
		FieldType industry_last = new FieldType();
		industry_last.setIndexOptions(IndexOptions.DOCS);
		industry_last.setStored(true);
		industry_last.setDocValuesType(DocValuesType.SORTED_SET);
		field.put("industry_last", industry_last);
		
		FieldType stockcode = new FieldType();
		stockcode.setIndexOptions(IndexOptions.DOCS);
		stockcode.setStored(true);
		stockcode.setDocValuesType(DocValuesType.SORTED);
		field.put("stockcode", stockcode);
		
		FieldType stockname = new FieldType();
		stockname.setIndexOptions(IndexOptions.DOCS);
		stockname.setStored(true);
		stockname.setDocValuesType(DocValuesType.SORTED);
		field.put("stockname", stockname);
		
		FieldType file_type = new FieldType();
		file_type.setIndexOptions(IndexOptions.DOCS);
		file_type.setStored(true);
		file_type.setDocValuesType(DocValuesType.SORTED);
		field.put("file_type", file_type);
		
		FieldType file_size = new FieldType();
		file_size.setIndexOptions(IndexOptions.NONE);
		file_size.setStored(true);
		field.put("file_size", file_size);
		FieldType page_count = new FieldType();
		page_count.setIndexOptions(IndexOptions.NONE);
		page_count.setStored(true);
		
		FieldType category = new FieldType();
		category.setIndexOptions(IndexOptions.DOCS);
		category.setStored(true);
		category.setDocValuesType(DocValuesType.SORTED_SET);
		field.put("category", category);
		
		FieldType category_id = new FieldType();
		category_id.setIndexOptions(IndexOptions.DOCS);
		category_id.setStored(true);
		category_id.setDocValuesType(DocValuesType.SORTED_SET);
		field.put("category_id", category_id);
		
		FieldType content = new FieldType();
		content.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);;
		content.setStored(true);
		content.setTokenized(true);
		content.setStoreTermVectorOffsets(true);
		content.setStoreTermVectorPayloads(true);
		content.setStoreTermVectorPositions(true);
		content.setStoreTermVectors(true);
		field.put("content", content);
		FieldType src_id = new FieldType();
		src_id.setIndexOptions(IndexOptions.NONE);
		src_id.setStored(true);
		field.put("src_id", src_id);
		
		FieldType tag = new FieldType();
		tag.setIndexOptions(IndexOptions.DOCS);
		tag.setStored(true);
		tag.setDocValuesType(DocValuesType.SORTED_SET);
		field.put("tag", tag);
		
		
		FieldType crawler_type = new FieldType();
		crawler_type.setIndexOptions(IndexOptions.NONE);
		crawler_type.setStored(true);
		field.put("crawler_type", crawler_type);
		
		FieldType oss_path = new FieldType();
		oss_path.setIndexOptions(IndexOptions.NONE);
		oss_path.setStored(true);
		field.put("oss_path", oss_path);
		
		FieldType create_at = new FieldType();
		create_at.setIndexOptions(IndexOptions.NONE);
		create_at.setStored(true);
		field.put("create_at", create_at); 
		
		FieldType url = new FieldType();
		url.setIndexOptions(IndexOptions.NONE);
		url.setStored(true);
		field.put("url", url); 
		
	}
	
	public void createIndex(int id) {
		IndexWriter indexWriter = null;
		try {
			
			//创建ik分词器
			Analyzer analyzer = new IKAnalyzer();
			
			//1、创建Directory
			//Directory directory = FSDirectory.open(FileSystems.getDefault().getPath("E:\\search\\lucene\\index\\product"));//指定索引地址
					//new RAMDirectory();//内存索引
			FSDirectory directory = FSDirectory.open(FileSystems.getDefault().getPath("E:\\search\\lucene\\index\\product"));
			//2、设置索引库的打开模式：新建、追加、新建或追加
			IndexWriterConfig config = new IndexWriterConfig(analyzer);
			config.setOpenMode(OpenMode.CREATE_OR_APPEND);
			//3、创建IndexWriter
			indexWriter = new IndexWriter(directory, config);
			//4、创建Document对象，文档
			Document doc = null;
			//5、为Document添加Field，域
			File file = new File("D:\\Mallet\\sample-data\\web\\de");
			List<Document> docList = new ArrayList<Document>(file.listFiles().length);
			//6、通过IndexWriter添加文档到索引
			indexWriter.deleteAll();//清除以前的索引
			for (Document document : docList) {
				indexWriter.addDocument(document);
			}
			indexWriter.addDocuments(docList);
			indexWriter.commit();
		} catch (IOException e) {
			e.printStackTrace();
		}finally{
			if (indexWriter != null) {
				try {
					indexWriter.close();
				} catch (IOException e) {
					e.printStackTrace();
				}
			}
		}
	}
	
	
	public void createIndex(Map<String, String> data) {
		IndexWriter indexWriter = null;
		try {
			
			//创建ik分词器
			Analyzer analyzer = new IKAnalyzer();
			
			//1、创建Directory
			FSDirectory directory = FSDirectory.open(FileSystems.getDefault().getPath("E:/log/data"));
			//2、设置索引库的打开模式：新建、追加、新建或追加
			IndexWriterConfig config = new IndexWriterConfig(analyzer);
			config.setOpenMode(OpenMode.CREATE_OR_APPEND);
			//3、创建IndexWriter
			indexWriter = new IndexWriter(directory, config);
			//4、创建Document对象，文档
			Document doc = null;
			//5、为Document添加Field，域
			List<Document> docList = new ArrayList<Document>(data.size());
			for (Entry<String, String> entry : data.entrySet()) {
				doc = new Document();
				Field field = new Field(entry.getKey(), entry.getValue(), this.field.get(entry.getKey()));
				doc.add(field);
				
				docList.add(doc);
			}
			//6、通过IndexWriter添加文档到索引
			/*for (Document document : docList) {
				indexWriter.addDocument(document);
			}*/
			indexWriter.addDocuments(docList);
			indexWriter.commit();
		} catch (IOException e) {
			e.printStackTrace();
		}finally{
			if (indexWriter != null) {
				try {
					indexWriter.close();
				} catch (IOException e) {
					e.printStackTrace();
				}
			}
		}
	}

	
	public static void main(String[] args) {
		FileRW rw = new FileRW();
		Map<String, String> data = rw.load("huanqiu.log");
		IndexHandler handler = new IndexHandler();
		handler.createIndex(data);
	}
}
