package relations;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;

import nlp.PortugueseTokenizer;
import nlp.PortugueseVerbNormalizer;

import com.google.common.collect.Collections2;
import com.google.common.collect.HashMultimap;

public class Disambiguate {
	
	//Set<id, ent1, rel_phrase, ent2, rel_phrase_with_POS-tags>
	static Set<Relation> relations = new HashSet<Relation>();
		
	//id -> sentence
	static HashMap<Integer,String> sentences = new HashMap<Integer, String>();

	//rel_phrase_normalized -> sentences where it occurs	
	static HashMultimap<String, String> rel_phrase_sentences = HashMultimap.create();
	
	//tokenizer based on Linguateca regular expressions
	static PortugueseTokenizer tokenizer = new PortugueseTokenizer();
	
	//portuguese stowords
	static Set<String> stopwords = new HashSet<String>();
	
	public static void readRelations(String inputFile) throws IOException{		
		BufferedReader br = new BufferedReader(new FileReader(new File(inputFile)));
		Integer id = null;
		String ent1 = null;
		String ent2 = null;
		String relation = null;
		String rel_phrase_with_tags = null;
		String rel_phrase_normalized = "";
		String[] rel_phrase_pos;		
		int count = 0;
		while ((relation = br.readLine()) != null) {			
			String parts[] = relation.split("\t");
			id = Integer.parseInt(parts[0]);	// sentence id
			ent1 = parts[1];					// entity1
			relation = parts[2];				// relation
			ent2 = parts[3];					// entity2
			rel_phrase_with_tags = parts[4];									
			rel_phrase_pos = parts[4].split("\\s");
			
			//algumas classificações de "verb" são erradas e sistemáticas, evitar isso
			//ArrayList<String> erros = mesmo, ainda, hoje, também,ontem, ao, mais, já, depois, igualmente, agora, apenas, actualmente, amanhã, assim, muito, não
			//Set<String> errors = new HashSet<String>();
			
			for (int i = 0; i < rel_phrase_pos.length; i++) {
				String token_tag[] = rel_phrase_pos[i].split("/");				
				if (token_tag[1].endsWith("verb")) {
					//passar para lower case e retirar o reflectivo: -me, -te, -se, -nos, -vos, -se
					String verb = token_tag[0].toLowerCase().replaceAll("-me$", "").replaceAll("-te$", "").replaceAll("-se$", "").replaceAll("-nos$", "").replaceAll("-vos$", "").replaceAll("-l?os?$", "").replaceAll("-se-á$", "");
					String normalized = PortugueseVerbNormalizer.normalize(verb);
					if (normalized != null) rel_phrase_normalized += ' ' + normalized;
					else {
						//System.err.println(token_tag[0]);
						rel_phrase_normalized += ' ' + token_tag[0];					
					}
				}
				else rel_phrase_normalized += ' ' + token_tag[0];
			}			
			rel_phrase_sentences.put(rel_phrase_normalized,sentences.get(id));		
			Relation r = new Relation(id, ent1, relation, ent2, rel_phrase_normalized);
			relations.add(r);
			if (count % 50000 == 0) System.out.print(".");
			count++;
			rel_phrase_normalized = "";
		}
		System.out.println();
		System.out.println(relations.size() + " relations");
		System.out.println(rel_phrase_sentences.keySet().size() + " normalized relations");
		br.close();
	}
	
	//Jaccard similarity between two given relations	
	public static float similarity(String s1, String s2){	
		Set<String> sentence1_tokens = new HashSet<String>(Arrays.asList(tokenizer.tokenize(s1)));
		Set<String> sentence2_tokens = new HashSet<String>(Arrays.asList(tokenizer.tokenize(s2)));
		
		Set<String> sentence1_tokens_lower = new HashSet<String>();
		Set<String> sentence2_tokens_lower = new HashSet<String>();
		
		for (String string : sentence1_tokens) sentence1_tokens_lower.add(string.toLowerCase());
		for (String string : sentence2_tokens) sentence2_tokens_lower.add(string.toLowerCase());
		
		sentence1_tokens_lower.removeAll(stopwords);
		sentence2_tokens_lower.removeAll(stopwords);
		
		Set<String> union = new HashSet<String>(sentence1_tokens_lower);
		union.addAll(sentence2_tokens_lower);
		
		Set<String> intersection = new HashSet<String>(sentence1_tokens_lower);
		intersection.retainAll(sentence2_tokens_lower);
		
		return (float) intersection.size() / (float) union.size();
	}
	 
	
	public static void readSentences(String sentencesFile) throws IOException{
		BufferedReader br = new BufferedReader(new FileReader(new File(sentencesFile)));
		String line;
		int count=0;
		int id = 0;
		while ((line = br.readLine()) != null) {
			if (line.length()!=0) {
				String parts[] = line.split("\\t");
				id = Integer.parseInt(parts[0]);
				String sentence = parts[1];
				sentences.put(id, sentence);
				if (count % 250000 == 0) System.out.print(".");
				count++;
			}
		}
		System.out.println( " " + count + " sentences loaded");
		br.close();
	}
	
	public static void loadStopWords(String inputFile) throws IOException {
		BufferedReader br = new BufferedReader(new FileReader(new File(inputFile)));
		String word;
		while ((word = br.readLine()) != null) {
			stopwords.add(word.toLowerCase().trim());
		}
	}
	
	//an idea: if entityA occurs many times with entityB and if the rel_phrases that connects them is similary (but different verbs) then probably the relations are synonym
	//cada rel_phrase, é representada pelas frases onde ocorre, aplicar algoritmos de clustering
	public static void cluster(String sentences, String relations, String stopWordsFile) throws IOException {
		
		PortugueseVerbNormalizer.initialize();
		tokenizer = new PortugueseTokenizer();
		loadStopWords(stopWordsFile);
		
		/*
		String s1 = "Desde os anos 1920 que se sabia que Universo está a expandir-se em consequência do Big Bang, a explosão cataclísmica primordial que criou o tempo e o espaço há cerca de 15 mil milhões de anos.";		
		String s2 = "A expansão iria mesmo continuar para sempre, até tudo acabar numa imensidão de rochas mortas e geladas?";		
		System.out.println(similarity(s1, s2));
		*/
		
		//aggregate sentences by normalized relationship
		System.out.print("Reading sentences ");
		readSentences(sentences);
		System.out.print("Reading relations ");
		readRelations(relations);
		
		//hierarchical agglomerative clustering
		//TODO: fazer a matrix de comparação de todos com todos
		// - para o caso de frases relacionais com varias frases, usar todos os tokens
		// - qual o threshold ideal: 0.7 ?
		// ver este pacote: http://sape.inf.usi.ch/hac
		
		for (String rel_phrase : rel_phrase_sentences.keySet()) {
			if (rel_phrase_sentences.get(rel_phrase).size()>20) {
				System.out.println(rel_phrase + '\t' + rel_phrase_sentences.get(rel_phrase).size());
				//
			}			
		}		
	}	
}

























