package entityopinions;

import com.aliasi.chunk.Chunk;
import com.aliasi.chunk.Chunker;
import com.aliasi.chunk.Chunking;
import com.aliasi.classify.BaseClassifierEvaluator;
import com.aliasi.classify.Classification;
import com.aliasi.classify.Classified;
import com.aliasi.classify.ConditionalClassification;
import com.aliasi.classify.DynamicLMClassifier;
import com.aliasi.classify.JointClassification;
import com.aliasi.classify.JointClassifier;
import com.aliasi.classify.JointClassifierEvaluator;
import com.aliasi.classify.RankedClassifierEvaluator;
import com.aliasi.classify.ScoredClassifier;
import com.aliasi.classify.ScoredClassifierEvaluator;
import com.aliasi.lm.NGramProcessLM;
import com.aliasi.tokenizer.EnglishStopTokenizerFactory;
import com.aliasi.tokenizer.IndoEuropeanTokenizerFactory;
import com.aliasi.tokenizer.TokenLengthTokenizerFactory;
import com.aliasi.util.AbstractExternalizable;
import com.aliasi.util.BoundedPriorityQueue;
import com.aliasi.util.Files;
import com.aliasi.util.ScoredObject;
import com.sleepycat.je.DatabaseException;
import entityopinions.ComputePMI.Kernel;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintWriter;
import java.net.URL;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import telex.LEXEvaluator;
import telex.QueryExtractor;
import telex.QueryRouter;
import telex.QueryRunner;
import telex.QuerySorter;

public class EntityOpinion {

    File mPolarityDir;
    File mSubjectivityDir;
    String[] mCategories;
    DynamicLMClassifier<NGramProcessLM> mClassifier;
    DynamicLMClassifier<NGramProcessLM> mSubjectivityClassifier;
    Cache cache;
    
    EntityOpinion(){
    }

    EntityOpinion(Cache c) throws DatabaseException{
        cache = c;
    }

    EntityOpinion(String args[]) throws ClassNotFoundException, Exception, IOException {
        File f = new File("/home/dsantos/entityopinionsdropbox/entityTesting/cache");
        cache = new Cache(f, 100);
        //mPolarityDir = new File(args[0]);
        //mSubjectivityDir = new File(args[1]);
        //mCategories = mPolarityDir.list();
        int nGram = 8;
        /* cria classificador de polaridade consoante as categorias do ficheiro mPolarityDir
           utilizando um modelo de linguagem até nGram-gramas */
        //mClassifier = DynamicLMClassifier.createNGramProcess(mCategories,nGram);
        //cria classificador de subjectividade
        //mSubjectivityClassifier = trainSubjectivity();
    }

    boolean isTrainingFile(File file) {
        return (file.getName().charAt(2) != '9');
    }

    //função para treinar classificador de subjectividade
    DynamicLMClassifier<NGramProcessLM> trainSubjectivity() throws IOException {
        int numTrainingChars = 0;
        int nGram = 8;
        System.out.println("\nTraining Subjectivity Model.");
        String mCategories2[] = {"plot", "quote"};
        //cria classificador de subjectividade com as categorias plot(objectiva) e quote(subjectiva)
        DynamicLMClassifier<NGramProcessLM> mc = DynamicLMClassifier.createNGramProcess(mCategories2, nGram);
        // ciclo para treinar o classificador para cada ficheiro de cada categoria
        for (int i = 0; i < mCategories2.length; ++i) {
            String category = mCategories2[i];
            Classification classification = new Classification(category);
            File file = new File(mSubjectivityDir, mCategories2[i] + ".tok.gt9.5000");
            String data = Files.readFromFile(file, "ISO-8859-1");
            //separar as frases do texto
            String[] sentences = data.split("\n");
            // calculo para apenas utilziar 90% dos dados para treino
            int numTraining = (sentences.length * 9) / 10;
            //adicionar cada frase do texto ao classificador
            for (int j = 0; j < numTraining; ++j) {
                String sentence = sentences[j];
                //numero total de caracteres de todas as frases
                numTrainingChars += sentence.length();
                mc.handle(new Classified<CharSequence>(sentence, classification));
            }
        }
        System.out.println("  # Training Cases=" + 9000);
        System.out.println("  # Training Chars=" + numTrainingChars);
        return mc;
    }

    //função para treinar classificador de polaridade
    void train() throws IOException {
        int numTrainingCases = 0; //numero de reviews
        int numTrainingChars = 0; //numero total de todos os caracteres de todas as reviews
        System.out.println("\nTraining Polarity Model.");
        // ciclo para treinar o classificador para cada categoria
        for (int i = 0; i < mCategories.length; ++i) {
            String category = mCategories[i];
            Classification classification = new Classification(category);
            File file = new File(mPolarityDir, mCategories[i]);
            File[] trainFiles = file.listFiles();
            // ciclo para treinar o classificador para cada ficheiro de cada categoria do dataset de treino
            for (int j = 0; j < trainFiles.length; ++j) {
                File trainFile = trainFiles[j];
                if (isTrainingFile(trainFile)) {
                    ++numTrainingCases;
                    String review = Files.readFromFile(trainFile, "ISO-8859-1");
                    numTrainingChars += review.length();
                    mClassifier.handle(new Classified<CharSequence>(review, classification));
                }
            }
        }
        System.out.println("  # Training Cases=" + numTrainingCases);
        System.out.println("  # Training Chars=" + numTrainingChars);
    }

    //classificação de polaridades das reviews
    void evaluate() throws IOException {
        boolean storeInstances = false;
        BaseClassifierEvaluator<CharSequence> evaluator = new BaseClassifierEvaluator<CharSequence>(mClassifier, mCategories, storeInstances);
        for (int i = 0; i < mCategories.length; ++i) {
            String category = mCategories[i];
            File file = new File(mPolarityDir, mCategories[i]);
            //listar todos os ficheiros
            File[] trainFiles = file.listFiles();
            for (int j = 0; j < trainFiles.length; ++j) {
                //apenas avaliar os ficheiros que não são de treino
                File trainFile = trainFiles[j];
                if (!isTrainingFile(trainFile)) {
                    String review = Files.readFromFile(trainFile, "ISO-8859-1");
                    //extrair as frases subjectivas de cada review
                    String subjReview = subjectiveSentences(review);
                    Classification classification = mClassifier.classify(subjReview);
                    Classified<CharSequence> classified = new Classified<CharSequence>(subjReview, classification);
                    evaluator.handle(classified);
                }
            }
        }
        System.out.println();
        System.out.println(evaluator.toString());
    }

    /*
     * dada uma frase retorna o sentimento associado a essa frase se frase for objectiva retorna neutral
     */
    String getSentiment(String data) {
        ConditionalClassification subjClassification = (ConditionalClassification) mSubjectivityClassifier.classify(data);
        if (subjClassification.category(0).equals("quote")) {
            return "neutral";
        }
        Classification classification = mClassifier.classify(data);
        return classification.bestCategory();
    }

    /*
     * Método que recebe um texto e divide-o nas várias e classifica as várias frases em positivas ou negativas
     * e de acordo com essa classificação, classifica as entidades identificadas
     */
    public void entitiesSentimentBaseline(String textPath, String ngramPath) throws Exception{
        File text = new File(textPath);
        BufferedReader brIn = new BufferedReader(new FileReader(text));
        String sentence;
        String sentiment;
        int index = -1;

        while ((sentence = brIn.readLine()) != null) {
            if(!sentence.equals("")){
                sentiment = getSentiment(sentence);
                sentence = recognizeEntities(ngramPath, sentence);
                for(index = sentence.indexOf("<E>"); index != -1; index = sentence.indexOf("<E>")){
                    sentence = sentence.substring(0, index + 2) + " sentiment=\"" + sentiment + "\"" +
                            sentence.substring(index + 2);
                }
            }
            System.out.println(sentence);
        }
    }

    /*
     * Método que devolve as frases subjectivas presentes num texto
     */
    String subjectiveSentences(String review) {
        int MIN_SENTS = 5;
        int MAX_SENTS = 25;
        //separa o texto em frases
        String[] sentences = review.split("\n");
        //fila com as 25 "melhores" frases do texto
        BoundedPriorityQueue<ScoredObject<String>> pQueue = new BoundedPriorityQueue<ScoredObject<String>>(ScoredObject.comparator(), MAX_SENTS);
        for (int i = 0; i < sentences.length; ++i) {
            String sentence = sentences[i];
            //classificação da frase como subjectiva ou objectiva
            ConditionalClassification subjClassification = (ConditionalClassification) mSubjectivityClassifier.classify(sentences[i]);
            double subjProb;
            // cálculo da probabilidade condicionada da frase dado ser objectiva ou subjectiva
            if (subjClassification.category(0).equals("quote")) {
                subjProb = subjClassification.conditionalProbability(0);
            } else {
                subjProb = subjClassification.conditionalProbability(1);
            }
            pQueue.offer(new ScoredObject<String>(sentence, subjProb));
        }
        StringBuilder reviewBuf = new StringBuilder();
        Iterator<ScoredObject<String>> it = pQueue.iterator();
        //ciclo para apenas colocar no reviewBuf apenas as frases mais subjectivas
        for (int i = 0; it.hasNext(); ++i) {
            ScoredObject<String> so = it.next();
            if (so.score() < .5 && i >= MIN_SENTS) {
                break;
            }
            reviewBuf.append(so.getObject() + "\n");
        }
        String result = reviewBuf.toString().trim();
        return result;
    }

    public static double getWebFrequency(String word, String word2) throws Exception {
        String url = "http://boss.yahooapis.com/ysearch/web/v1/" + word + (word2 != null ? "+NEAR+" + word2 : "") + "?appid=px6hSDvV34FDtdJ6HxgRUOqbdMUiuO0QI6JFaI20rPcokg681dooXAV28jS94kPPN1c-&format=xml";
        //String url = "http://api.search.live.net/xml.aspx?Appid=01DA94968CD6209D940B7B3AAFCE950E3DCB9A03&sources=web&query=" + word + ( word2 != null ? "+" + word2 : "");
        BufferedReader input = new BufferedReader(new InputStreamReader(new URL(url).openConnection().getInputStream()));
        String aux = null;
        while ((aux = input.readLine()) != null) {
            if (aux.indexOf("<web:Total>") != -1) {
                aux = aux.substring(aux.indexOf("<web:Total>") + 11);
                aux = aux.substring(0, aux.indexOf("</"));
                return new Long(aux) + 0.001;
            }
            if (aux.indexOf(" totalhits=\"") != -1) {
                aux = aux.substring(aux.indexOf(" totalhits=\"") + 12);
                aux = aux.substring(0, aux.indexOf("\""));
                return new Long(aux) + 0.001;
            }
        }
        return 0.001;
    }

    /*
     * Método não-supervisionado para o reconhecimento de entidades
     */
    public String recognizeEntities(String ngramPath, String sentence) throws Exception {
        System.out.println("Recognizing entities");
        File auxf = new File("/home/dsantos/entityopinionsdropboxv4/tmp/aux-lex");
        auxf.mkdirs();
        auxf.deleteOnExit();
        File text = new File(auxf, "text.tmp");
        text.deleteOnExit();

        //vai escrever no ficheiro text.tmp a sentence recebida como argumento
        PrintWriter auxp = new PrintWriter(new FileWriter(text));
        auxp.println(sentence);
        auxp.close();

        File aux1 = new File(auxf, "aux-ngrams-extractor.tmp");
        aux1.deleteOnExit();
        File aux2 = new File(auxf, "aux-ngrams-sorter.tmp");
        aux2.deleteOnExit();
        File aux3 = new File(auxf, "aux-ngrams-runner.tmp");
        aux3.deleteOnExit();
        double tau = 0.35, delta = 1E-8;
        boolean use_scp = false;

        /* vai colocar no ficheiro aux-ngrams-extractor as queries necessárias a realizar à coleccção
           de ngramas para a correcta identificação das entidades */
        System.out.println("Extracting queries");
        new QueryExtractor().extract(text.getAbsolutePath(), aux1.getAbsolutePath(), text.getName());
        /* vai colocar no ficheiro aux-ngrams-sorter as queries ordenadas por ordem alfabetica para
           facilitar a sua pesquisa na colecção dos ngramas */
        System.out.println("Sorting queries");
        new QuerySorter().sortFile(aux1.getAbsolutePath(), aux2.getAbsolutePath());
        /* vai colocar no ficheiro aux-ngrams-runner as contagens das queries realizadas à colecção
           de ngramas */
        System.out.println("Running queries");
        new QueryRunner(ngramPath).run(aux2.getAbsolutePath(), aux3.getAbsolutePath());
        new QueryRouter().route(aux3.getAbsolutePath(), ngramPath, 10000000, 0);
        /* vai identificar as entidades e efectuar os cáculos necessários para a correcta identificação, casos
           de primeira palavra de um frase e nomes de entidades com palavras com a primeira letra em
           minuscula no meio */
        System.out.println("Evaluating queries");
        new LEXEvaluator().evaluate(text.getAbsolutePath(), aux3.getAbsolutePath(), ngramPath, use_scp, delta, tau, text.getAbsolutePath());

        BufferedReader br = new BufferedReader(new FileReader(text));
        String result = br.readLine();
        String review = "";
        while(result != null){
            review += result;
            result = br.readLine();
        }
        br.close();
        review = review.replaceAll("<E> ", "<E>").replaceAll(" </E>", "</E>");
        aux3.delete();
        aux2.delete();
        aux1.delete();
        text.delete();
        auxf.delete();
        return review;
    }

    public String recognizeEntitiesSupervised(String modelPath, String sentence) throws Exception {
        File modelFile = new File(modelPath);
        Chunker chunker = (Chunker) AbstractExternalizable.readObject(modelFile);
        Chunking chunking = chunker.chunk(sentence);
        Set<Chunk> aux = chunking.chunkSet();
        int pos = 0;
        for (Chunk c : aux) {
            sentence = sentence.substring(0, pos + c.start()) + "<E>" + chunking.charSequence().subSequence(c.start(), c.end()) + "</E>" + sentence.substring(pos + c.end());
            pos += 7;
        }
        return sentence;
    }

    public String entitiesAndSentiment(String path, String sentence, Kernel k, double sigma) throws Exception {
        double neutralThreshold = 0.5;
        String wordpos = "excellent", wordneg = "poor";
        boolean debug = false;
        

        String result = recognizeEntities(path, sentence);
        result = " " + result + " ";
        
        
        ComputePMI c = new ComputePMI(cache, k);
        result = c.computePolarity(result, "EN", sigma);
        return result;
    }


    
    public static void main(String[] args) throws Exception {
        if (args.length == 0) {
            args = new String[]{
            //"C:/Users/Diogo/Desktop/treino/review_polarity/txt_sentoken",
            //"C:/Users/Diogo/Desktop/treino/rotten_imdb",
            "/home/dsantos/WikipediaNgrams"
            //"C:/Users/Diogo/Desktop/ngrams_2000_reviews",
            };
        }
        //new NGramDBFromText().createNGramDB(args[2]+ File.separator + "traindata",args[2],5);
        EntityOpinion aux = new EntityOpinion(args);
        System.out.println(aux.recognizeEntities(args[0], "John and Mary run."));
        
        System.out.println();
        //aux.train();
        System.out.println();
        //aux.evaluate();
        System.out.println();
        System.out.println();
        System.out.println("\nTesting the extraction of entities with opinions.");
        //String sentence = "My opinion towards a coward like Eddie.";
        //String sentence1 = "My opinion on a coward like George and Eddie is very negative, although i liked Matt very much.";
        //aux.entitiesSentimentBaseline("c:/users/diogo/desktop/xpto.txt", args[2]);
        //String sentiment = aux.getSentiment(sentence);
        //System.out.println("Textual sentence : " + sentence);
        //System.out.println("Opinion mined from sentence : " + sentiment);
        //System.out.println("Supervised extraction of entities : " + aux.recognizeEntitiesSupervised(args[3],sentence));
        //System.out.println("Unsupervised extraction of entities : " + aux.recognizeEntities(args[2], sentence1));
        //System.out.println("Extraction of entities and opinions : " + aux.entitiesAndSentiment(args[2],sentence1, Kernel.Gaussian, 3));
    }
}









