library("RTextTools")
library("RWeka")
library("tm")
library("gdata")
source("./Desktop/MO444/proj/word-prediction-mo444/lucas/functions-lucas.r")

BigramTokenizer = function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
TrigramTokenizer = function(x) NGramTokenizer(x, Weka_control(min = 3, max = 3))
QuadgramTokenizer = function(x) NGramTokenizer(x, Weka_control(min = 4, max = 4))
dirPath <- './Desktop/MO444/proj/word-prediction-mo444/data'
corpus = Corpus(DirSource(directory=dirPath, pattern="treino2.txt"))
corpus = tm_map(x=corpus, tolower);
corpus = tm_map(x=corpus, removeNumbers);

dictTDM = TermDocumentMatrix(x=corpus, control = list(tokenize = WordTokenizer))
bigramTDM = TermDocumentMatrix(x=corpus, control = list(tokenize = BigramTokenizer))
trigramTDM = TermDocumentMatrix(x=corpus, control = list(tokenize = TrigramTokenizer))
quadgramTDM = TermDocumentMatrix(x=corpus, control = list(tokenize = QuadgramTokenizer))

dictionary = as.matrix(dictTDM)
bigramTable = as.matrix(bigramTDM)
trigramTable = as.matrix(trigramTDM);
quadgramTable = as.matrix(quadgramTDM);

bigram=gramSimplifier(bigramTable,2)
trigram=gramSimplifier(trigramTable,3)
quadgram=gramSimplifier(quadgramTable,4)

loadInterface()
launchApp()



#validando algoritmo
corpus=Corpus(DirSource(directory=dirPath, pattern="validacao2.txt"))
corpus = tm_map(x=corpus, tolower);
corpus = tm_map(x=corpus, removeNumbers);
corpus = tm_map(x=corpus, removePunctuation);

quadgramTestTable=as.matrix(TermDocumentMatrix(x=corpus, control = list(tokenize = QuadgramTokenizer)))
evaluator(dictionary, bigram, trigram, quadgram, fourgramTestTable=quadgramTestTable)


cov=getCovarianceMatriz(dictionary,corpus,"./data/english")
history_cor=as.vector(rep(c(0),nrow(cov)))
names(history_cor)=rownames(cov)


evaluator=function(dictionary,bigram,trigram,quadgram,fourgramTestTable){
  total=0;
  correct=0;
  correct2=0;
  for(text in rownames(fourgramTestTable)){
    splited=unlist(strsplit(text," "))
    result=guessNextWord(text=paste(splited[1:3],collapse=" "))
    total=total + fourgramTestTable[text,1]
    if(splited[4] %in% result[1] || splited[4] %in% result[2] || splited[4] %in% result[3]){
      correct = correct + fourgramTestTable[text,1]
    }
    print(paste("Corretas:",correct , "Total:" ,total , "%:",correct/total))
  }
  return (list(correct=correct,total=total,fraction=correct/total));
}





#USELESS STUFF DOWN HERE!
dirPath <- "./Desktop/Unicamp/MO444/word-prediction-mo444/"
corpus = Corpus(DirSource(directory=dirPath, pattern="treino.txt"))
#38846
bigramDTM <- create_matrix(corpus,ngramLength=2,minWordLength=3,removeNumbers=TRUE,removeStopwords=FALSE)
bigramVector <- bigramDTM[[6]][[2]]
#74986 termos
trigramDTM <- create_matrix(corpus,ngramLength=3,minWordLength=3,removeNumbers=TRUE,removeStopwords=FALSE)
trigramVector <- trigramDTM[[6]][[2]]
#88782 termos
fourgramDTM <- create_matrix(corpus,ngramLength=4,minWordLength=3,removeNumbers=TRUE,removeStopwords=FALSE)
fourgramVector <- fourgramDTM[[6]][[2]]
###EXEMPLO DE COMO SERIA UM ALGORITMO N-GRAM
#retorna 118 ocorrencias
bigramTest = getNextWords(2, bigramVector, c("old"))
#retorna 3 ocorrencias
trigramTest = getNextWords(3, trigramVector, c("poor", "old"))
#retorna 1 ocorrencia
fourgramTest =getNextWords(4, fourgramVector, c("that","poor", "old"))
#essa funcao recebe palavra(s) e retorna uma lista de possiveis proximas palavras 
#### A DIMENSAO DE PREVIOUSWORDS DEVE SER NGRAMLENGTH - 1 ####
getNextWords <- function (ngramLength, vector, previousWords) {
  #lista de candidatos a proxima palavra
  candidateList = list()
  #indice corrente da lista
  j = 1
  #pra cada n-gram
  for (i in 1:length(vector)){
    #pega a lista de palavras
    wordList = strsplit(vector[i],"[[:space:]]+")[[1]]
    #testa cada palavra fornecida com cada palavra do n-gram
    suitable = TRUE
    for (k in 1:length(previousWords)){
      if (previousWords[k] != wordList[k]){
        suitable = FALSE
      }
    }
    if (suitable==TRUE){
      candidateList[[j]] = wordList[length(wordList)]
      j=j+1
    }
  }
  return (candidateList)
}
