package analisador;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.sql.*;
import java.util.*;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.TermVector;
import org.apache.lucene.index.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.util.Version;

public class Significancia {

    final static Version v = Version.LUCENE_35;
    final static String pathIndexDir = "./arquivosGerados/out/";
    final static String pathFilesView = "./arquivosGerados/view/";
    static Map<Integer, List<String>> mapIssue = new HashMap<Integer, List<String>>();
    static Map<String, Integer> idf = new HashMap<String, Integer>();
    static Map<String, Double> significancia = new HashMap<String, Double>();
    static List<String> termosRetirar = new ArrayList<String>();
    static Connection con;
    static int branco = 0;

    public static void main(String[] args) throws Exception {
        System.out.println("Inciando processo de indexação!");

        conectar();
        consulta();
        criarIndice();
        criarArquivoDeTokensComFrequencia();
        criarArquivoDeTokensComFrequenciaInternaAoDocumento();
        Map<String, Double> map = ordenar();
        extrairTerms(map);
        retirarTokens();
        desconectar();

        System.out.println("Processo finalizado!!");
    }

    private static void consulta() throws SQLException {
        Statement stmt = con.createStatement();
        ResultSet rs = stmt.executeQuery("SELECT tokens, issue_id FROM comentario ");

        while (rs.next()) {
            int issue = rs.getInt("issue_id");
            String tokens = rs.getString("tokens");

            if (!mapIssue.containsKey(issue)) {
                mapIssue.put(issue, new ArrayList<String>());
            }

            try {
                mapIssue.get(issue).add(tokens);
            } catch (NullPointerException ex) {
                branco++;
            }
        }
        System.out.println(branco);
    }

    public static void conectar() throws Exception {
        con = DriverManager.getConnection(
                "jdbc:mysql://localhost:3306/hadoop",
                "root",
                "root");
    }

    public static void desconectar() throws Exception {
        con.close();
    }

    private static void criarIndice() throws CorruptIndexException, LockObtainFailedException, IOException {
        Analyzer an = new WhitespaceAnalyzer(v);
        Directory dir = FSDirectory.open(new File(pathIndexDir));

        IndexWriterConfig config = new IndexWriterConfig(v, an);
        IndexWriter indexWriter = new IndexWriter(dir, config);
        indexWriter.deleteAll();

        for (Integer issue : mapIssue.keySet()) {
            Document d = new Document();
            d.add(new Field("issue", String.valueOf(issue), Store.YES, Index.NOT_ANALYZED, TermVector.YES));

            StringBuilder tokens = new StringBuilder();
            for (String t : mapIssue.get(issue)) {
                tokens.append(t).append(" ");
            }
            d.add(new Field("tokens", tokens.toString(), Store.YES, Index.ANALYZED, TermVector.YES));
            indexWriter.addDocument(d);
        }
        indexWriter.close();
        dir.close();
    }

    private static void criarArquivoDeTokensComFrequenciaInternaAoDocumento() throws IOException {
        Directory dir = FSDirectory.open(new File(pathIndexDir));
        Map<String, List<Double>> sig = new HashMap<String, List<Double>>();

        System.out.println("Criando os arquivos de frequência interna ao documento ..........");
        IndexReader reader = IndexReader.open(dir);

        int cont = 0;
        for (int i = 0; i < reader.numDocs(); i++) {
            Document d = reader.document(i);
            String issue = d.get("issue");

            PrintStream arquivoFrequenciaInterna = new PrintStream(new FileOutputStream(pathFilesView + "files/" + issue + ".freq"));
            TermFreqVector vector = reader.getTermFreqVector(i, "tokens");
            if (vector != null) {
                for (int j = 0; j < vector.size(); j++) {
                    String token = vector.getTerms()[j];
                    Integer _idf = idf.get(token);
                    Integer _tf = vector.getTermFrequencies()[j];

                    if (!sig.containsKey(token)) {
                        sig.put(token, new ArrayList<Double>());
                    }
                    sig.get(token).add(tf(_tf, vector.size()) * idf(_idf, reader.numDocs()));
                    arquivoFrequenciaInterna.println("[" + tf(_tf, vector.size()) * idf(_idf, reader.numDocs()) + " | " + vector.getTerms()[j] + "]");
                }
            } else {
                cont++;
            }
            arquivoFrequenciaInterna.close();
        }
        System.out.println(cont);
        reader.close();
        dir.close();



        for (String token : sig.keySet()) {
            Double soma = 0.0;
            for (Double score : sig.get(token)) {
                soma += score;
            }
            significancia.put(token, soma / sig.get(token).size());

        }

        System.out.println("Arquivo criado com sucesso!\n");
    }

    private static void criarArquivoDeTokensComFrequencia() throws IOException {
        System.out.println("Criando arquivo com frequência ..........");
        Directory dir = FSDirectory.open(new File(pathIndexDir));
        IndexReader reader = IndexReader.open(dir);

       // PrintStream arquivoFrequencia = new PrintStream(new FileOutputStream(pathFilesView + "_analisador.freq"));

        TermEnum termReader = reader.terms();
        while (termReader.next()) {
            Term t = termReader.term();
         //   arquivoFrequencia.println("[" + reader.docFreq(t) + " | " + t.text() + "]");
            idf.put(t.text(), reader.docFreq(t));
        }
        reader.close();
        dir.close();

        System.out.println("Arquivo criado com sucesso!\n");

    }

    static double idf(int numDoc, int totalDocuments) {
        return Math.log((double) totalDocuments / (double)numDoc );
    }

    static double tf(int num, int numTerm) {
        return (double) num / (double) numTerm;
    }

    private static Map<String, Double> ordenar() {
        Object[] entries = significancia.entrySet().toArray();
        Arrays.sort(entries, new Comparator() {

            public int compare(Object lhs, Object rhs) {
                Map.Entry le = (Map.Entry) lhs;
                Map.Entry re = (Map.Entry) rhs;
                return ((Comparable) re.getValue()).compareTo(
                        (Comparable) le.getValue());
            }
        });
        Map<String, Double> h = new LinkedHashMap<String, Double>();
        for (int i = 0; i < entries.length; i++) {
            Map.Entry entry = (Map.Entry) entries[i];
            h.put((String) entry.getKey(), (Double) entry.getValue());
        }
        return h;
    }

    static void extrairTerms(Map<String, Double> map) throws FileNotFoundException {
        int total = map.keySet().size();
        int porcentagem = (int) (0.5 * total);

        int i = 0;
        for (String token : map.keySet()) {
            i++;
            if (i >= porcentagem) {
                termosRetirar.add(token);
            }
        }
    }

    private static void retirarTokens() throws SQLException {
        Statement stmt = con.createStatement();
        ResultSet rs = stmt.executeQuery("SELECT id,tokens FROM comentario ");
        while(rs.next()){
            long id = rs.getLong("id");
            String tokens = rs.getString("tokens");
            if(tokens == null){
                continue;
            }
            StringBuilder novosTokens =  new StringBuilder();
            StringTokenizer tokenizer = new StringTokenizer(tokens);
            while (tokenizer.hasMoreElements()){
                String token = tokenizer.nextToken();
                if (!termosRetirar.contains(token))
                    novosTokens.append(token).append(" ");
            }
            PreparedStatement p = con.prepareStatement("UPDATE comentario SET TOKENSREDUZIDO=? WHERE ID=?");
            p.setString(1, novosTokens.toString());
            p.setLong(2, id);
            p.execute();
            p.close();
        }
    }
}
