package sis.ra.rapbe;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;

import org.tartarus.snowball.ext.porterStemmer;

import sis.ra.sap.nameentity.ExtractEntity;
import sis.ra.sap.nameentity.InxEntity;
import sis.ra.utility.Config;
import sis.ra.utility.Utils;

public class RelationIdentification {
	
	/**
	 * part-of-speech, extract the word as the structure of position + wordindex (including word + pos )
	 * @param text
	 * @param htformat
	 * @return
	 */
	public static HashMap<Integer, wordindex> pos (String text, String htformat)
	{
		String linguistxpath="c:\\Program Files\\INXIGHT\\LinguistxPlatform3.7\\lx-3\\";
		Utils.writeFile(linguistxpath+"windows-2003-i686-msvc-7.1-32\\test.txt ", text);
		String s="";
		Integer count=0;
		HashMap<Integer,wordindex> sentenceindex=new HashMap<Integer, wordindex> ();
		ArrayList<wordindex> sentenceindexArray=new ArrayList<wordindex> ();
		String cmd="\""+linguistxpath+"windows-2003-i686-msvc-7.1-32\\test_platform.exe\" -k 79565b494953591a735442535d524e1a7653595f54495f49 "+
//		" -tag " +
		" -tokenize " + 
		"-d " + "\""+linguistxpath + "lang\"" + 
//		" -stem " +
		" -"+htformat+" " +"\"" +linguistxpath + "windows-2003-i686-msvc-7.1-32\\test.txt\" ";
		try {
			System.out.println(cmd);
		    Process p = Runtime.getRuntime().exec(cmd);
		    int sentenceStart=0;
		    BufferedReader stdInput = new BufferedReader(new InputStreamReader(p.getInputStream()));
		    BufferedReader stdError = new BufferedReader(new InputStreamReader(p.getErrorStream()));
		    while ((s = stdInput.readLine()) != null)   
		    {		    	
		    	/**
		    	 *   In	  (alphabetic)	start=0	end=2
  					 a	  (alphabetic)	start=3	end=4
  					 move	  (alphabetic)	start=5	end=9
  					 extract word + position

		    	 */
		  //  	System.out.println(s);
		    	// analyze token part
		    	if (s.contains("segment tokenization -- tokens:"))
		    	{
		   // 		System.out.println(s);
		    		while (!(s= stdInput.readLine()).contains("segment tokenization -- normalized tokens:"))
		    		{
		   // 			System.out.println(s);
		    			if (s.trim().startsWith("sentence:	start="))
	    				{	    				
		    				int end = s.trim().indexOf("end=")-1;
		    				String t = s.trim().substring(16, end).trim();
		    //				System.out.println(end+"\t"+t);
	    					sentenceStart=Integer.parseInt(t);
	    				}
		    			if (s.startsWith("  ")&&s.contains("start=")&&s.contains("end=")&&!s.startsWith("     "))
		    			{
		    				String [] t=s.split("\t");
		    				if (t.length<4) continue;
		    				//wordindex(String pos, Integer position, String token, String type, String word)
		    				wordindex wi=new wordindex("", Integer.parseInt(t[2].substring(6).trim()),"","", t[0].trim());
		    				wi.setSentNo(sentenceStart+"");
		    				sentenceindex.put(Integer.parseInt(t[2].substring(6).trim()), wi);
		    				sentenceindexArray.add(wi);
		    			}
		    		}
		    	}
		    	
		    	if (s.contains("segment tokenization -- tags:"))
		    	{		    		
		    		while ((s= stdInput.readLine())!=null&&!s.contains("The character encoding of this segment is"))
		    		{
		    			
		    			if (s.startsWith("  ")&&s.trim().length()>2)
		    			{
		   	//			System.out.println("=="+s);
		    				String [] t=s.split("\t");
		    				//public wordindex(String entity, String entitytype, String pos,Integer position, String token, String type, String word) {
//		    				System.out.println(count+" "+sentenceindexArray.get(count).getWord());
		    				sentenceindexArray.get(count).setPos(t[1].trim());
		    				
		    				count=count+1;
		    			}		    			
		    		}
		    	}
		     }
	    
	//	    while ((s = stdError.readLine()) != null)   System.out.println(s);
		}
		catch (Exception e) {
			    e.printStackTrace();    
			}
		return sentenceindex;
	}
	
	/**
	 * extract start and end position
	 * @param text
	 * @param htformat
	 * @return
	 * @throws IOException
	 */
	public static HashMap<Integer, Integer > sentence (String text, String htformat) throws IOException
	{
		String linguistxpath="c:\\Program Files\\INXIGHT\\LinguistxPlatform3.7\\lx-3\\";
		Utils.writeFile(linguistxpath+"windows-2003-i686-msvc-7.1-32\\test.txt ", text);
		String s="";
	//	System.out.println(text);
		HashMap<Integer, Integer> sentenceindex= new HashMap<Integer, Integer> ();
		String cmd="\""+linguistxpath+"windows-2003-i686-msvc-7.1-32\\test_platform.exe\" -k 79565b494953591a735442535d524e1a7653595f54495f49 "+
//		" -tag " +
		" -tokenize " + 
		"-d " + "\""+linguistxpath + "lang\"" + 
//		" -stem " +
		" -"+htformat+" " +"\"" +linguistxpath + "windows-2003-i686-msvc-7.1-32\\test.txt\" ";
			System.out.println(cmd);
		     Process p = Runtime.getRuntime().exec(cmd);
		    
		    BufferedReader stdInput = new BufferedReader(new InputStreamReader(p.getInputStream()));
		    BufferedReader stdError = new BufferedReader(new InputStreamReader(p.getErrorStream()));
		    while ((s = stdInput.readLine()) != null)   
		    {
	//	    	System.out.println(s);
		    	if (s.trim().startsWith("sentence:	start="))
		    	{
		    		String st = s.substring(s.indexOf("start=")+6, s.indexOf("end=")).trim();
		    		String ed = s.substring(s.indexOf("end=")+4);
	//	    		System.out.println(st+"\t"+ed);
		    		sentenceindex.put(Integer.parseInt(st), Integer.parseInt(ed));
		    	}
		    }
		   
		return sentenceindex;
	}
	
	/**
	 * input content and sentence start and end position index => Sentence ID + Entities List
	 * Sentence_ID + Entity_Offset + Entity_Surface

	 * @param content
	 * @param sentence
	 * @return
	 */
	
	public static  HashMap<Integer, HashMap<Integer, String>>  entities (String content, HashMap<Integer, Integer > sentence )
	{
		 HashMap<Integer, HashMap<Integer, String>>  entitiess = new  HashMap<Integer, HashMap<Integer, String>>  ();
		String xmlne=ExtractEntity.getNE(content);
		ArrayList<InxEntity> entities = ExtractEntity.parseIMSXMLContentList(xmlne);
		for (int i=0;i<entities.size();i++)
		{
			String type = entities.get(i).getEntityType();
			if (!type.equals("COMPANY")&&!type.equals("CITY")&&!type.equals("COUNTRY")
					&&!type.equals("STATE")&&!type.equals("PROP_MISC"))
				continue;
			String surface = entities.get(i).getSurface();
			Integer offset = Integer.parseInt(entities.get(i).getOffset());
			Integer sentno= 0;
			Iterator<Integer> sts=sentence.keySet().iterator();
			while (sts.hasNext())
			{
				Integer start = sts.next();
				Integer end = sentence.get(start);
				if (offset<end&&offset>start)
				{
					sentno = start;
		//			System.out.println(surface +" \t "+offset+"\t"+sentno);
				}
			}
			if (entitiess.containsKey(sentno))
			{
				HashMap<Integer, String> en = entitiess.get(sentno);
				en.put(offset, surface +"\t"+entities.get(i).getEntityType() );
				entitiess.remove(sentno);
				entitiess.put(sentno, en);
			}else
			{
				HashMap<Integer, String> en = new HashMap<Integer, String> ();
				en.put(offset, surface +"\t"+entities.get(i).getEntityType() );
				entitiess.put(sentno, en);
			}
	//		System.out.println(surface +"\t"+type+"\t"+offset );
		}
		return entitiess;
	}
	
	/**
	 * verb extraction, using POS part, 
	 * @param content
	 * @return
	 */
	public static HashMap<Integer, HashMap<Integer, String>>  verbExtraction (String content)
	{
		HashMap<Integer, HashMap<Integer, String>>  verbs = new HashMap<Integer, HashMap<Integer, String>>  ();
		HashMap<Integer, wordindex>  words = pos(content, "text");
		Iterator<Integer> it = words.keySet().iterator();
		while (it.hasNext())
		{
			Integer position = it.next();
			wordindex w = words.get(position);
			if (w.getPos().startsWith("V"))
			{
				if (verbs.containsKey(Integer.parseInt(w.getSentNo())))
				{
					HashMap<Integer, String> keyele = verbs.get(Integer.parseInt(w.getSentNo()));
					keyele.put(position, w.getWord());
					verbs.remove(Integer.parseInt(w.getSentNo()));
					verbs.put(Integer.parseInt(w.getSentNo()), keyele);
				}else
				{
					HashMap<Integer, String> keyele = new HashMap<Integer, String> ();
		    		keyele.put(position, w.getWord());
		    		verbs.put(Integer.parseInt(w.getSentNo()), keyele);
				}
			}
		}
		
		return verbs;
	}
	
	/**
	 * extracting a document into a sentence set.
	 * every sentence includes a set of entities and verbs
	 * 1. extract all verb
	 * 2. extract all entities
	 * 3. identify the sentence, start point
	 * 4. put all verb + entities together, according to the every sentence
	 * @param content
	 * @return
	 * @throws IOException
	 */
	public static HashMap<Integer, HashMap<Integer, String>> sentenceExtraction (String content) throws IOException
	{
		HashMap<Integer, HashMap<Integer, String>> verbs = verbExtraction (content);
		HashMap<Integer, HashMap<Integer, String>> sentences = verbs;
		HashMap<Integer, Integer> sentence = sentence(content, "text");
		HashMap<Integer, HashMap<Integer, String>> ens = entities(content, sentence);
	
		Iterator<Integer> enit = ens.keySet().iterator();
		while (enit.hasNext())
		{
			Integer position = enit.next();
			HashMap<Integer, String> lists = ens.get(position);
	
			if (sentences.containsKey(position))
			{
				HashMap<Integer, String> list = sentences.get(position);
		//		System.out.println(position +"\t"+list.size());
				list.putAll(lists);
				sentences.remove(position);
				sentences.put(position, list);
			}else
			{
				sentences.put(position, lists);
			}
		}
		
		return sentences;
	}
	
	public static HashMap<String, Double> patternWeight(String relation, String patten)
	{
		HashMap<String, Double> patternWeight = new HashMap <String, Double> ();
		String weightfile = Config.getProjPath()+"\\src\\sis\\ra\\rapbe\\model\\cp.dw";
		if (patten.equals("cldw")) weightfile = Config.getProjPath()+"\\src\\sis\\ra\\rapbe\\model\\cl.dw";
		String content = Utils.readFile(weightfile );
		String [] lines = content.split("\n");
		for (int i=0; i<lines.length;i++)
		{
			String term = lines[i].split("\t")[0];
			Double score = Double.parseDouble(lines[i].split("\t")[1]);
			porterStemmer ps=new porterStemmer();
			ps.setCurrent(term);
			ps.stem();
			String stem=ps.getCurrent();
			patternWeight.put(stem, score);
		}
		return patternWeight;
	}
	
	public static Relation isRelation(HashMap<Integer, String> lists)
	{
		HashMap<String, Double> cp = patternWeight("relation", "cpdw");
		HashMap<String, Double> cl = patternWeight("relation", "cldw");
		Relation relation = new Relation();
		
		Object[] key = lists.keySet().toArray();  
	    Arrays.sort(key);  	    
	    		    
	    String company = "";
	    String verb="";
	    String product = "";
	    String location = "";
	    for (int i=0;i<key.length;i++)
	    {
	    	Integer position = Integer.parseInt(key[i].toString());
	    	
	    	String word = lists.get(position);
	 //   	System.out.println(word + "\t" + position);
	    	String type = "V";
	    	if (word.split("\t").length>1)
	    	{
	    		type = word.split("\t")[1];
	    		word = word.split("\t")[0];
	    	}
			if (type.equals("COMPANY"))
			{
				company = word;
				for (int ii=i;ii<key.length;ii++)
				{
				  	position = Integer.parseInt(key[ii].toString());
				   	word = lists.get(position);
				   	type = "V";
				   	if (word.split("\t").length>1)
				   	{
				   		type = word.split("\t")[1];
				   		word = word.split("\t")[0];   	
				   	}
				   	
				   	if (type.equals("V"))
				   	{
				   		verb = word;
				   		for (int iii=ii;iii<key.length;iii++)
				   		{
				   			position = Integer.parseInt(key[iii].toString());
				   			word = lists.get(position);
				   			type = "V";
				   			if (word.split("\t").length>1)
				   			{
				   				type = word.split("\t")[1];
				   				word = word.split("\t")[0];				   				
				   			}
				   			
				   			if (type.equals("PROP_MISC"))
				   			{
				   				product=word;
				   				relation.setEntity1(company);
				   				relation.setEntity2(product);
				   				relation.setRelation("CP");
				   				porterStemmer ps=new porterStemmer();
				   				ps.setCurrent(verb);
				   				ps.stem();
				   				String term=ps.getCurrent();
				   				Double relevance = 0.0;
				   				if (!cp.containsKey(term)) continue;
				   				relevance = cp.get(term);
				   				relation.setSig(relevance);
				   	//			System.out.println(company +"\t"+verb +"\t"+product+"\t"+relevance);
				   				return relation;
				   			}
				   			
				   			if (type.equals("CITY")||type.equals("COUNTRY")||type.equals("STATE")||type.equals("LOCATION"))
				   			{
				   				location=word;
				   				relation.setEntity1(company);
				   				relation.setEntity2(location);
				   				relation.setRelation("CL");
				   				porterStemmer ps=new porterStemmer();
				   				ps.setCurrent(verb);
				   				ps.stem();
				   				String term=ps.getCurrent();
				   				Double relevance = 0.0;
				   				if (!cl.containsKey(term)) continue;
				   				relevance = cl.get(term);
				   				relation.setSig(relevance);
			//	   				System.out.println(company +"\t"+verb +"\t"+product+"\t"+relevance);
				   				return relation;
				   			}
				   		}				   		
				   	}
				 }
			}
		}
	    
	//    System.out.println("nothing");
		return null;
	}
	
	/**
	 * input sentences in the structure of SentenceID + entity & verbs lists
	 * decide if there is the structure of ENTITY1 + VERB + ENTITY2: isRelation, & return Relation
	 * summary the relations in the whole documents, esp. summary the score
	 * return the relations set with score
	 * @param sentence
	 * @return
	 */
	
	public static HashMap <Relation, Double> relations(HashMap<Integer, HashMap<Integer, String>> sentence)
	{
		HashMap <String, Double> relations =new HashMap <String, Double> ();
		HashMap <Relation, Double> rels =new HashMap <Relation, Double> ();
		Iterator<Integer> enit = sentence.keySet().iterator();
		while (enit.hasNext())
		{
			Integer sentno = enit.next();
			HashMap<Integer, String> lists = sentence.get(sentno);
			
			Relation relation = isRelation(lists);
		    if (relation==null) continue;
			if (relations.containsKey(relation.getEntity1()+"\t"+relation.getEntity2()+"\t"+relation.getRelation()))
			{
				String key = relation.getEntity1()+"\t"+relation.getEntity2()+"\t"+relation.getRelation();
				Double reln = relations.get(key);
				relations.remove(key);
				relations.put(key, reln+relation.getSig());				
			}else
			{
				String key = relation.getEntity1()+"\t"+relation.getEntity2()+"\t"+relation.getRelation();
				relations.put(key, relation.getSig());	
			}
		}
		
		Iterator <String> it = relations.keySet().iterator();
		while (it.hasNext())
		{
			String cur = it.next();
			Double score = relations.get(cur);
			Relation rel = new Relation ();
			rel.setEntity1(cur.split("\t")[0]);
			rel.setEntity2(cur.split("\t")[1]);
			rel.setRelation(cur.split("\t")[2]);
			rel.setSig(score);
			rels.put(rel, score);
		}
		return rels;
	}
	
	public String getPath()
	{
		String path = this.getClass().getResource("").getPath();
		return path;
	}
	public static void main(String[] args) throws IOException {
		
		String testingfile = Config.getProjPath() + "/src/sis/ra/rapbe/Translation.txt";
		String content = Utils.readFile(testingfile);
		HashMap<Integer, HashMap<Integer, String>> sent = sentenceExtraction (content);
		HashMap <Relation, Double> relations = relations(sent);
		System.out.println(relations.size());
		Iterator <Relation> it = relations.keySet().iterator();
		while (it.hasNext())
		{
			Relation cur = it.next();
			Double score = relations.get(cur);
			System.out.println(cur.getEntity1()+"\t"+cur.getEntity2()+"\t"+cur.getRelation()+"\t"+score);
		}
	}
}
