package com.morphiastic.tests.fieldbridges;

import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.UnsupportedEncodingException;
import java.lang.reflect.AccessibleObject;
import java.util.HashMap;
import java.util.IdentityHashMap;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.Map;

import org.elasticsearch.common.xcontent.XContentBuilder;

import com.morphiastic.ElasticStore;
import com.morphiastic.IndexedFieldBridge;
import com.morphiastic.annotations.Analyze;
import com.morphiastic.annotations.Store;
import com.morphiastic.annotations.TermVector;

public class GeonameFeatureCodeExpanderFieldBridge extends  IndexedFieldBridge{

	
public static String featureNameFieldPrefix = "feat";
	
	
	//hash map in the form <String:featurecode, HashMap<String:language, String:name in language>>
	public static LinkedHashMap<String, HashMap<String,String>> featureNames = new  LinkedHashMap<String, HashMap<String,String>>();
	
	static LinkedList<String> langCodes = new LinkedList<String>();
	static LinkedList<String> analyzers = new LinkedList<String>();
	
	static{
		
	}
	
	
	
	public GeonameFeatureCodeExpanderFieldBridge(){
		System.out.println("GeonameFeatureCodeExpanderFieldBridge constructor hit ");
		try {
			//loads the geonameFeatureCodeNames.csv file into the HashMap for easy retrieval
			
			String basePath = "E:\\dataimports\\";
			String featureCodesFilePath = basePath+"geonames_featureCode_intl.csv";
			BufferedReader fh;
		
				fh = new BufferedReader(new InputStreamReader(
						new FileInputStream(featureCodesFilePath), "UTF8"));
			
			
			// Escape the first line
			String langNames = fh.readLine(); //not used for now
			String langCodesRaw = fh.readLine();
			String analyzerUsedRaw = fh.readLine();
			
			String[] langCodesSplit = langCodesRaw.split("\t");
			for(int i = 1;i<langCodesSplit.length;i++){
				langCodes.add(langCodesSplit[i]);
			}
			
			String[] analyzersSplit = analyzerUsedRaw.split("\t");
			for(int i = 1;i<analyzersSplit.length;i++){
				analyzers.add(analyzersSplit[i]);
			}
			
			
			if(langCodes.size() != analyzers.size())throw new IllegalArgumentException("analyzers and lang codes dont match");
			
			String s;
			while ((s = fh.readLine()) != null) {
				String[] lineSplit = s.split("\t");
				
				String code = lineSplit[0];
				
				HashMap<String,String> categoryTranslations = new HashMap<String, String>();
				for(int i = 1; i<lineSplit.length; i++){
					String langCode = langCodes.get(i-1);
					categoryTranslations.put(langCode, lineSplit[i]);
				}
				
				featureNames.put(code, categoryTranslations);
			}
		} catch (UnsupportedEncodingException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (FileNotFoundException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}
	
	
 

	
	
	public static void main(String[] args){
		
		for (Map.Entry<String, HashMap<String,String>> feature: featureNames.entrySet()) {
			System.out.println(feature.getKey() + " ----  " + feature.getValue());
		}
	}


	 


	@Override
	public void addIndexFieldSettings(XContentBuilder mappingBuilder)
			throws IOException {
		 //Since we want to expand the original field to different tranlations, we want one index field 
		//per translated language. Each field might have a different analyzer
		System.out.println("GeonameFeatureCodeExpanderFieldBridge  addIndexFieldSettings fired :  "   );
		
		//iterate over langCodes and create a new field for each of them
		for(int i = 0 ; i<langCodes.size(); i++){
			
			
			String langCode = langCodes.get(i);
			String indexFieldName = featureNameFieldPrefix + langCode;
			String analyzerName = analyzers.get(i);
			
			System.out.println("doing -->  " + langCode + " indexFieldName -----> " + indexFieldName + " analyzerName ----> " + analyzerName );
			
			mappingBuilder.startObject(indexFieldName);
			mappingBuilder.field("type", "string");
			mappingBuilder.field("store", Store.no.toString());
			mappingBuilder.field("index", Analyze.analyzed.toString());		
			mappingBuilder.field("term_vector", TermVector.no); // term vector is set to NO for now but we should see what we could do with it == maybe find similar docs
			mappingBuilder.field("analyzer", analyzerName);
			mappingBuilder.endObject();
			
			
		}
	}


	
	@Override
	public void addFieldSettingsMap(HashMap<String, Object> props){
		for(int i = 0 ; i<langCodes.size(); i++){
			
			Map<String,Object> m = new HashMap<String, Object>();
			
			String langCode = langCodes.get(i);
			String indexFieldName = featureNameFieldPrefix + langCode;
			String analyzerName = analyzers.get(i);
		
			
			m.put("type","string");
			m.put("store", false);
			m.put("index", Analyze.analyzed.toString());
			m.put("term_vector", TermVector.no);
			m.put("analyzer", analyzerName);
			
			
			//filed mapping is built, add it to the main props map
			props.put(indexFieldName, m );
			
		}
	}
	
	
	@Override
	public XContentBuilder writeIndexedField(XContentBuilder mappingBuilder,
			String fieldName, Object o, ElasticStore es,
			Integer maxCircularRefDepth,
			IdentityHashMap<AccessibleObject, Integer> known)
			throws IOException {
		
		
		//write each language field 
		
		if(o==null)return mappingBuilder;
		
		String featureCode = (String)o;
		HashMap<String, String> namez = featureNames.get(featureCode);
		for(int i = 0 ; i<langCodes.size(); i++){
			String langCode = langCodes.get(i);
			String indexFieldName = featureNameFieldPrefix + langCode;
			String name = namez.get(langCode);
			mappingBuilder.field(indexFieldName, name);
		}
		 
		return mappingBuilder;
	}



	@Override
	public void init(ElasticStore es, HashMap<?, ?> params) {
		
	}

	
	
	
	
}
