package edu.unika.aifb.graphindex.searcher.keyword;

/**
 * Copyright (C) 2009 Lei Zhang (beyondlei at gmail.com)
 * Copyright (C) 2009 G�nter Ladwig (gla at aifb.uni-karlsruhe.de)
 * 
 * This file is part of the graphindex project.
 *
 * graphindex is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2
 * as published by the Free Software Foundation.
 * 
 * graphindex is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 * 
 * You should have received a copy of the GNU General Public License
 * along with graphindex.  If not, see <http://www.gnu.org/licenses/>.
 */

import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Scanner;
import java.util.Set;
import java.util.StringTokenizer;
import java.util.TreeMap;

import joptsimple.OptionParser;
import joptsimple.OptionSet;

import org.apache.log4j.Logger;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.KeywordAnalyzer;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.document.SetBasedFieldSelector;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.index.TermFreqVector;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;

import edu.unika.aifb.graphindex.index.IndexDirectory;
import edu.unika.aifb.graphindex.model.IAttribute;
import edu.unika.aifb.graphindex.model.IEntity;
import edu.unika.aifb.graphindex.model.INamedConcept;
import edu.unika.aifb.graphindex.model.IRelation;
import edu.unika.aifb.graphindex.model.impl.Attribute;
import edu.unika.aifb.graphindex.model.impl.Entity;
import edu.unika.aifb.graphindex.model.impl.NamedConcept;
import edu.unika.aifb.graphindex.model.impl.Relation;
import edu.unika.aifb.graphindex.searcher.keyword.model.Constant;
import edu.unika.aifb.graphindex.searcher.keyword.model.KeywordElement;
import edu.unika.aifb.graphindex.searcher.keyword.model.KeywordSegment;
import edu.unika.aifb.graphindex.storage.StorageException;
import edu.unika.aifb.graphindex.util.StringSplitter;
import edu.unika.aifb.graphindex.util.TypeUtil;

public class KeywordSearcher {
	private IndexReader reader; 
	private IndexReader valueReader;
	private IndexSearcher searcher;
	private IndexSearcher valueSearcher;
	
	private double maxScore = 1.0;
	private boolean m_searchSchema = true;
	
	public static final double ENTITY_THRESHOLD = 0.8;
	public static final double SCHEMA_THRESHOLD = 1.8;
	public static int MAX_KEYWORDRESULT_SIZE = 1000;
	
	private static final Logger log = Logger.getLogger(KeywordSearcher.class);
	
	public KeywordSearcher(edu.unika.aifb.graphindex.index.IndexReader idxReader) throws StorageException {
		try {
			reader = IndexReader.open(FSDirectory.open( idxReader.getIndexDirectory().getDirectory(IndexDirectory.KEYWORD_DIR)));
			searcher = new IndexSearcher(reader);

			valueReader = IndexReader.open(FSDirectory.open(idxReader.getIndexDirectory().getDirectory(IndexDirectory.DB_IDX_DIR)));
			valueSearcher = new IndexSearcher(valueReader);
			
			// change the default scoring model in Lucene by removing queryWeight 
//			valueSearcher.setSimilarity(new SimilarityDelegator(valueSearcher.getSimilarity()) {
//				private static final long serialVersionUID = -2325909669081067422L;
//
//				public float idf(int docFreq, int numDocs) {
//					return (float) (Math.sqrt(Math.log(numDocs / (double) (docFreq + 1)) + 1.0));
//				}
//
//				public float queryNorm(float sumOfSquaredWeights) {
//					return (float) 1.0f;
//				}
//			});
			
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}
	
	public static void setCutoff(int cutoff) {
		MAX_KEYWORDRESULT_SIZE = cutoff;
	}
	
	public void enableSearchSchema(boolean searchSchema) {
		m_searchSchema  = searchSchema;
	}
	
	public Map<KeywordSegment, Collection<KeywordElement>> searchKeywordElements(List<String> queries) throws StorageException, IOException {
		resetMaxScore();
		Map<String, Collection<KeywordElement>> conceptsAndRelations = new HashMap<String, Collection<KeywordElement>>();
		Map<String, Collection<KeywordElement>> attributes = new HashMap<String, Collection<KeywordElement>>();
		
		try {
			parseQueries(queries, conceptsAndRelations, attributes);
		} catch (IOException e) {
			throw new StorageException(e);
		}
		
		Map<KeywordSegment, Collection<KeywordElement>> segments2Entities = new TreeMap<KeywordSegment, Collection<KeywordElement>>();

		try {
			searchEntitiesByValues(queries, segments2Entities);
		} catch (IOException e) {
			throw new StorageException(e);
		}
		
		int size = 0;
		
		for(Collection<KeywordElement> coll : segments2Entities.values()) {
			size += coll.size();
		}
		
		if (m_searchSchema) {
			for(String keyword : conceptsAndRelations.keySet()) {
				KeywordSegment ks = new KeywordSegment(keyword);
				Collection<KeywordElement> uris = segments2Entities.get(ks);
				if (uris == null) {
					uris = new HashSet<KeywordElement>();
					segments2Entities.put(ks, uris);
				}
				uris.addAll(conceptsAndRelations.get(keyword));
				for(KeywordElement keywordElement : uris)
					keywordElement.setKeywordSegment(ks);
			}
	
			for(String keyword : attributes.keySet()) {
				KeywordSegment ks = new KeywordSegment(keyword);
				Collection<KeywordElement> uris = segments2Entities.get(ks);
				if (uris == null) {
					uris = new HashSet<KeywordElement>();
					segments2Entities.put(ks, uris);
				}
				uris.addAll(attributes.get(keyword));
				for(KeywordElement keywordElement : uris)
					keywordElement.setKeywordSegment(ks);
			}
		}
		
		return segments2Entities;
	}
	
	private void parseQueries(List<String> queries, 
			Map<String, Collection<KeywordElement>> conceptsAndRelations, 
			Map<String, Collection<KeywordElement>> attributes) throws IOException {
		
		
		if (m_searchSchema)
			searchSchema(queries, conceptsAndRelations, attributes);
		
		for(String attributeKeyword : attributes.keySet()){
			queries.remove(attributeKeyword);
		}
		
		for(String conceptsAndRelationsKeyword : conceptsAndRelations.keySet()){
			queries.remove(conceptsAndRelationsKeyword);
		}

	}
	
	private Collection<String> searchSchema(List<String> queries, 
			Map<String, Collection<KeywordElement>> conceptsAndRelations, 
			Map<String, Collection<KeywordElement>> attributes) throws IOException {

		Map<String,Collection<KeywordElement>> keywordElements = getSchemaKeywordElements(queries);
		log.debug("schema keyword elements " + keywordElements);
		Set<String> queriesWithResults = new HashSet<String>();
		for (int i = 0; i < queries.size(); i++) {
			String keyword = queries.get(i);
			if (keywordElements.get(keyword) == null)
				continue;
			
			queriesWithResults.add(keyword);
			
			for(KeywordElement resource : keywordElements.get(keyword)) {
				if(resource.getType() == KeywordElement.ATTRIBUTE) {
					Collection<KeywordElement> coll = attributes.get(keyword);
					if(coll == null) {
						coll = new HashSet<KeywordElement>(); 
						attributes.put(keyword, coll);
					} 
					coll.add(resource);
				}
				else if(resource.getType() == KeywordElement.CONCEPT || resource.getType() == KeywordElement.RELATION){
					Collection<KeywordElement> coll = conceptsAndRelations.get(keyword);
					if(coll == null) {
						coll = new HashSet<KeywordElement>(); 
						conceptsAndRelations.put(keyword, coll);
					} 
					coll.add(resource);
				}
			}
			
		}
		
		return queriesWithResults;
	}
	
	private Map<String,Collection<KeywordElement>> getSchemaKeywordElements(Collection<String> queries) throws IOException {
		StandardAnalyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT);
		QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, Constant.SCHEMA_FIELD, analyzer);
		parser.setDefaultOperator(QueryParser.AND_OPERATOR);
		
		Map<String,Collection<KeywordElement>> keywords = new HashMap<String,Collection<KeywordElement>>();
		for(String keyword : queries) {
			Query q = null;
			try {
				q = parser.parse(keyword);
			} catch (ParseException e) {
				e.printStackTrace();
				continue;
			}
			
			Collection<KeywordElement> tmp = searchSchemaWithClause(q, keyword);
			
			if(tmp != null && tmp.size() > 0)
				keywords.put(keyword, tmp);
		}
		
		return keywords;
	}
	
	private Collection<KeywordElement> searchSchemaWithClause(Query clause, String keyword) throws IOException {
		Collection<KeywordElement> result = new HashSet<KeywordElement>();
		TopScoreDocCollector collector=TopScoreDocCollector.create(MAX_KEYWORDRESULT_SIZE, false);
		
		searcher.search(clause,collector);
		ScoreDoc[] hits = collector.topDocs().scoreDocs;

		for (int i = 0; i < Math.min(hits.length, MAX_KEYWORDRESULT_SIZE); i++) {
			int docId = hits[i].doc;
			Document doc = searcher.doc(docId);

			float score = hits[i].score;
			
			if (score >= SCHEMA_THRESHOLD) {
 				String type = doc.get(Constant.TYPE_FIELD);
				if(type == null) {
					log.error("type is null!");
					continue;
				}

				KeywordElement ele = null;
				if(type.equals(TypeUtil.CONCEPT)){
					INamedConcept con = new NamedConcept(pruneString(doc.get(Constant.URI_FIELD)), doc.get(Constant.EXTENSION_FIELD));
					ele = new KeywordElement(con.getExtension(), KeywordElement.CONCEPT, score);
					ele.concepts.add(con);
				}
				else if(type.equals(TypeUtil.RELATION)){
					IRelation rel = new Relation(pruneString(doc.get(Constant.URI_FIELD)));
					ele = new KeywordElement(rel.getUri(), KeywordElement.RELATION, score);
					ele.relations.add(rel);
				}
				else if(type.equals(TypeUtil.ATTRIBUTE)){
					IAttribute attr = new Attribute(pruneString(doc.get(Constant.URI_FIELD)));
					ele = new KeywordElement(attr.getUri(), KeywordElement.ATTRIBUTE, score);
					ele.attributes.add(attr);
				}
				
				if (ele != null){
					KeywordElement topScoredElement = null;
					for(KeywordElement keywordElement : result){
						if(keywordElement.getType() == ele.getType()){
							topScoredElement = keywordElement;
						}
					}
					
					if(topScoredElement != null){
						if(topScoredElement.getMatchingScore() < ele.getMatchingScore()){
							result.remove(topScoredElement);
							result.add(ele);
						}
					}else{
						result.add(ele);
					}
				}
			}
			else 
				break;
		}

		return result;
	}

	
	private void searchEntitiesByValues(List<String> keywords, Map<KeywordSegment, Collection<KeywordElement>> segments2Entities) throws IOException {
		StandardAnalyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT);
		try {
			QueryParser queryParser = new QueryParser(Version.LUCENE_CURRENT, Constant.DPO_FIELD, analyzer);
			queryParser.setDefaultOperator(QueryParser.OR_OPERATOR);
			String query = "";
			for(String keyword : keywords)
				query += keyword + " ";
			BooleanQuery booleanQuery = new BooleanQuery();
			booleanQuery.add(queryParser.parse(query), Occur.SHOULD);
			searchEntitiesWithClause(keywords, booleanQuery, segments2Entities);
		} catch (ParseException e) {
			e.printStackTrace();
		}
		
	}		
		
	private void searchEntitiesWithClause(List<String> keywords, BooleanQuery query, Map<KeywordSegment, Collection<KeywordElement>> segments2Entities) throws IOException {
		ScoreDoc[] docHits = getTopValueDocuments(query, MAX_KEYWORDRESULT_SIZE);
		
		if (docHits.length == 0 || docHits[0] == null)
	    	return;

		maxScore = Math.max(maxScore, docHits[0].score);
		
	    Map<String, Collection<KeywordElement>> segmentToKeywordElements = new TreeMap<String, Collection<KeywordElement>>();
		Map<String, Set<String>> ext2inProperties = new HashMap<String, Set<String>>();
		Map<String, Set<String>> ext2outProperties = new HashMap<String, Set<String>>();
		Map<String,KeywordElement> ext2Element = new HashMap<String,KeywordElement>();
		
		
		
	    for (int i = 0; i < docHits.length; i++) {
	   		Document valueDoc = valueReader.document(docHits[i].doc);
	   		
	   		String uri = valueDoc.getFieldable(Constant.ENT_FIELD).stringValue();
	   		String attributeUri = valueDoc.getFieldable(Constant.TYPE_FIELD).stringValue();
	   		String ext = valueDoc.getFieldable(Constant.EXT_FIELD).stringValue();
	   		
	   		float score = docHits[i].score;
	   		
	   		Set<String> inProperties = ext2inProperties.get(ext);
	   		Set<String> outProperties = ext2outProperties.get(ext);
	   		
	   		if(inProperties == null || outProperties == null){
		   		inProperties = new HashSet<String>();
				Fieldable inPropField = valueDoc != null ? valueDoc.getFieldable(Constant.IN_PROPERTIES_FIELD) : null;
				if (inPropField != null) {
					String s = inPropField.stringValue();
					StringSplitter splitter = new StringSplitter(s, "\n");
					while ((s = splitter.next()) != null)
						inProperties.add(s.trim());
				}
	    		outProperties = new HashSet<String>();
	    		Fieldable outPropField = valueDoc != null ? valueDoc.getFieldable(Constant.OUT_PROPERTIES_FIELD) : null;
				if (outPropField != null) {
					String s = outPropField.stringValue();
					StringSplitter splitter = new StringSplitter(s, "\n");
					while ((s = splitter.next()) != null)
						outProperties.add(s.trim());
				}
				
				ext2inProperties.put(ext, inProperties);
		   		ext2inProperties.put(ext, outProperties);
		   		
//				if(inPropField == null && outPropField == null){
//					continue;
//				}
				
	   		}
	   		
	   		TermFreqVector termFreqVector = valueReader.getTermFreqVector(docHits[i].doc, Constant.DPO_FIELD);
	   		IEntity entity = new Entity(pruneString(uri), ext, termFreqVector.getTerms(), termFreqVector.getTermFrequencies());
	   		List<String> terms = Arrays.asList(entity.getTerms());
	   		
	   		String segment = "";
			for(String keyword : keywords){
				if(terms.contains(keyword.toLowerCase())){
					segment += (":" +keyword);
				}
			}
			
	   		KeywordElement schemaElement = ext2Element.get(ext + "__" + attributeUri + segment);
			if (schemaElement == null) {
				schemaElement = new KeywordElement(ext, KeywordElement.ENTITY, (double)score);
				schemaElement.setElementType(attributeUri);
				ext2Element.put(ext + "__" + attributeUri + segment, schemaElement);
				schemaElement.addOutProperties(outProperties);
				schemaElement.addInProperties(inProperties);
			}else{
				schemaElement.setMatchingScore(Math.max(schemaElement.getMatchingScore(), (double)score));
			}
			
			schemaElement.entities.add(entity);
			
			Collection<KeywordElement> elements = segmentToKeywordElements.get(segment);
			if(elements == null){
				elements = new HashSet<KeywordElement>();
				segmentToKeywordElements.put(segment, elements);
			}
			elements.add(schemaElement);
    	}
	    
	    for(String segment : segmentToKeywordElements.keySet()){
	    	StringTokenizer st = new StringTokenizer(segment, ":");
	    	Collection<String> segmentKeywords = new HashSet<String>();
	    	while(st.hasMoreElements()){
	    		segmentKeywords.add(st.nextToken());
	    	}
	    	KeywordSegment keywordSegment = new KeywordSegment(segmentKeywords);
	    	segments2Entities.put(keywordSegment, segmentToKeywordElements.get(segment));
	    	for(KeywordElement keywordElement : segmentToKeywordElements.get(segment))
	    		keywordElement.setKeywordSegment(keywordSegment);
	    }
	}
	
	public void getProperties(String entity, Set<String> inProperties, Set<String> outProperties) throws StorageException {
	    Set<String> loadFieldNames = new HashSet<String>();
	    loadFieldNames.add(Constant.IN_PROPERTIES_FIELD);
	    loadFieldNames.add(Constant.OUT_PROPERTIES_FIELD);
	    Set<String> lazyFieldNames = new HashSet<String>();
	    SetBasedFieldSelector fieldSelector = new SetBasedFieldSelector(loadFieldNames, lazyFieldNames);

   		
   		try {
   		    TermDocs td = reader.termDocs(new Term(Constant.URI_FIELD, entity));
   	   		if (!td.next())
   	   			return;
   	   		
			Document doc = reader.document(td.doc(), fieldSelector);

			Fieldable field = doc.getFieldable(Constant.IN_PROPERTIES_FIELD);
			String s = field.stringValue();
			StringSplitter splitter = new StringSplitter(s, "\n");
			while ((s = splitter.next()) != null)
				inProperties.add(s.trim());
			
			field = doc.getFieldable(Constant.OUT_PROPERTIES_FIELD);
			s = field.stringValue();
			splitter = new StringSplitter(s, "\n");
			while ((s = splitter.next()) != null)
				outProperties.add(s.trim());

   		} catch (CorruptIndexException e) {
			throw new StorageException(e);
		} catch (IOException e) {
			throw new StorageException(e);
		} 
	}
	
	private String pruneString(String str) {
		return str.replaceAll("\"", "");
	}

	private ScoreDoc[] getTopValueDocuments(Query q, int top) throws IOException {
		ScoreDoc[] docs;
		TopScoreDocCollector collector = TopScoreDocCollector.create(top, true);  
		valueSearcher.search(q, collector);
		docs = collector.topDocs().scoreDocs;
		log.debug(q + " " + docs.length);
		return docs;
	}
	
	private void resetMaxScore() {
		maxScore = 1.0;
	}
	
	public static void main(String[] args) throws Exception {
		OptionParser op = new OptionParser();
		op.accepts("o", "output directory")
			.withRequiredArg().ofType(String.class).describedAs("directory");

		OptionSet os = op.parse(args);
		
		if (!os.has("o")) {
			op.printHelpOn(System.out);
			return;
		}

		String directory = (String)os.valueOf("o");
		
		Scanner scanner = new Scanner(System.in);
		while (true) {
			System.out.println("Please input the keywords:");
			String line = scanner.nextLine();
			edu.unika.aifb.graphindex.index.IndexReader ir = new edu.unika.aifb.graphindex.index.IndexReader(new IndexDirectory(directory));
			
			KeywordSearcher searcher = new KeywordSearcher(ir);
			Map<KeywordSegment, Collection<KeywordElement>> results = searcher.searchKeywordElements(getKeywordList(line));
			int i = 0;
			for(KeywordSegment ks : results.keySet()) {
				System.out.println(++i + ": " + ks);
				for(KeywordElement ke : results.get(ks)) {
					System.out.println(ke.getUri() + " | " + ke.getMatchingScore());
				}
				System.out.println();
			} 
		}
	} 
	
	 public static LinkedList<String> getKeywordList(String line) {
		LinkedList<String> ll = new LinkedList<String>();

		// Boolean set to true if a " is opened
		Boolean opened = false;
		// Temporary string
		String acc = "";
		// Browse the string
		for (int i = 0; i < line.length(); i++) {
			// Get the character
			String str = String.valueOf(line.charAt(i));
			// If it is an opening "
			if (str.equals("\"") && !opened) {
				opened = true;
				continue;
			}
			// If it is a closing "
			if (str.equals("\"") && opened) {
				opened = false;
				// Put the acc string into the list
				ll.add(acc);
				acc = "";
				continue;
			}
			// If it is a space not between "
			if (str.equals(" ") && !opened) {
				if (acc != "") {
					ll.add(acc);
					acc = "";
				}
				continue;
			}
			// If it is a space between "
			if (str.equals(" ") && opened) {
				acc += " ";
				continue;
			}
			// Else, add the char
			acc += str;
		}
		if (!acc.equals(""))
			ll.add(acc);

		return ll;
	}
}
