#score a dataset using a saved list of trees which predict the probability of tags

library("tm")
library("SnowballC")

df_sample<-read.csv("my_test_small.csv",stringsAsFactors=F)
v.text<- tolower(df_sample$Body)
v.text<- gsub("<code>|</code>|<p>|</p>|<strong>|</strong>|<pre>|</pre>"," ",v.text, perl=T)
v.text<- gsub("\\b00\\w*\\b|\\b0x\\w*\\b"," ",v.text, perl=T)
v.text<- gsub("\n"," ",v.text, fixed=T)
v.text<- gsub("c#"," csharp ",v.text, fixed=T)               ##So c# and c++ will be recognized as term
v.text<- gsub("f#"," fsharp ",v.text, fixed=T)
v.text<- gsub("^c | c | c$"," clanguag ",v.text, perl=T, useByte=T)
v.text<- gsub("^r | r | r$"," rlanguag ",v.text, perl=T, useByte=T)
v.text<- gsub("c++","cplusplus",v.text, fixed=T)
v.text<- gsub("(?!')[[:punct:]]"," ", v.text,perl=T)

v.title<-tolower(df_sample$Title)
v.title<- gsub("<code>|</code>|<p>|</p>|<strong>|</strong>|<pre>|</pre>"," ",v.title, perl=T)
v.title<- gsub("\\b000\\w*\\b|\\b0x\\w*\\b"," ",v.title, perl=T)
v.title<- gsub("\n"," ",v.title, fixed=T)
v.title<- gsub("c#"," csharp ",v.title, fixed=T)               ##So c# and c++ will be recognized as term
v.title<- gsub("f#"," fsharp ",v.title, fixed=T)
v.title<- gsub("^c | c | c$"," clanguag ",v.title, perl=T, useByte=T)
v.title<- gsub("^r | r | r$"," rlanguag ",v.title, perl=T, useByte=T)
v.title<- gsub("c++","cplusplus",v.title, fixed=T)
v.title<- gsub("(?!')[[:punct:]]"," ", v.title,perl=T)

text.corpus<-Corpus(VectorSource(v.text))
title.corpus<-Corpus(VectorSource(v.title))

##Now clear out all stop words
mystops<-c(
 "a","about","above","after","again","against","all","am","an","and","any","appreciate","aren't","are","as","at",
"be","because","been","before","being","below","between","both","but","by","can't","cannot","couldn't","could",
"didn't","did","do","doesn't","does","doing","don't","down","during","each","few","for","from","further",
"hadn't","had","hasn't","has","haven't","have","having","he'd","he'll","he's","he","help","her","here's","here",
"hers","herself","him","himself","his","how's","how","i'd","i'll","i'm","i've","i","if","in","into","isn't",
"is","it's","it","its","itself","let's","me","more","most","mustn't","my","myself","no","nor","not","of",
"off","on","once","only","or","other","ought","our","ours ","ourselves","out","over","own","same","shan't",
"she'd","she'll","she's","shouldn't","should","she","so","some","such","than","thank","thanks","that's","that","the",
"theirs","their","them","themselves","then","there's","there","these","they'd","they'll","they're","they've","they",
"this","those","through","to","too","under","until","up","very","wasn't","was","we'd","we'll","we're","we've",
"we","were","weren't","what's","what","when's","when","where's","where","which","while","who's","who","whom",
"why's","why","with","won't","wouldn't","would","you'd","you'll","you're","you've","you","your","yours","yourself",
"your","application", "can", "code", "create", "data", "error","find", "following", "get", "just", "know","like", "need", "pre",
"use", "using", "want", "way", "will","now", "one", "problem", "something", "sure", "trying", "work", "right", "run", "running",
 "see", "seems", "set", "show", "similar", "simple", "since", "possible",  "value","method","also","app","time","works" , "please"
)

text.corpus<-tm_map(text.corpus, removeWords, mystops )
text.corpus<-tm_map(text.corpus, removeWords, stopwords("english"))
text.corpus<-tm_map(text.corpus, stripWhitespace)
text.stemmed<-tm_map(text.corpus, stemDocument)

title.corpus<-tm_map(title.corpus, removeWords, mystops )
title.corpus<-tm_map(title.corpus, removeWords, stopwords("english"))
title.corpus<-tm_map(title.corpus, stripWhitespace)
title.stemmed<-tm_map(title.corpus, stemDocument)

dtm.text <- removeSparseTerms(DocumentTermMatrix(text.stemmed), 0.999)
dtm.title <- removeSparseTerms(DocumentTermMatrix(title.stemmed), 0.999)

dtm.text2<-as.matrix(dtm.text)
rm(dtm.text)
gc()

dtm.title2<-as.matrix(dtm.title)
rm(dtm.title)
gc()

colnames(dtm.title2)<-paste("T.", colnames(dtm.title2), sep="")
dtm.combined<-cbind(dtm.title2, dtm.text2)
rm(dtm.text2)
rm(dtm.title2)
gc()
dtm.combined[dtm.combined>1]<-1

#read in the trees and other data
source("dumpdata.R")
require(rpart)

tag.probs<-matrix(0, nrow(df_sample), length(v.tag))
colnames(tag.probs)<-v.tag

for (i in 1:500)
{
  print(i)
  title.grep <- as.integer(grepl(v.regexp[i], df_sample$Title))
  body.grep <- as.integer(grepl(v.regexp[i], df_sample$Body))

  vars<- names(model.list[[i]]$ordered)
  vars<-setdiff(vars, c("body.grep", "title.grep"))
  addl.vars<-setdiff(vars, colnames(dtm.combined))
  existing.vars<-intersect(vars, colnames(dtm.combined))
  df<-data.frame(dtm.combined[,existing.vars], matrix(0, nrow(dtm.combined),length(addl.vars)),title.grep, body.grep, resp=0)
  names(df)<-c(existing.vars, addl.vars, "title.grep", "body.grep", "resp")

  tag.probs[,i]<-predict(model.list[[i]], newdata=df)
}

num.to.keep <- function(p)
{
  a <- vector("list",length(p))
  a <- lapply(1:length(a), function(i) a[[i]] <- c(0,1))
  outcomes <- expand.grid(a)
  prob.outcome <- sapply(1:nrow(outcomes),function(i)prod(p[unlist(outcomes[i,]>0)])*prod(1-p[unlist(outcomes[i,]==0)]))
  Ef <- rep(NA, length(p))
  for(j in 1:length(p))
  {
    a.try <- rep(0, length(p))
    a.try[1:j] <- 1
    tp <- as.matrix(outcomes)%*%a.try
    prec <- tp/j
    rec <- tp/apply(outcomes, 1, sum)
    f <- 2*prec*rec/(prec+rec)
    f[is.na(f)] <- 0
    Ef[j] <- sum(f*prob.outcome)
  }
  if(max(Ef)<.05) 0
  else which.max(Ef)
}

top5.tags<-matrix("", nrow(dtm.combined), 5)
top5.prob<-matrix(0, nrow(dtm.combined), 5)
keepers<-rep(0, nrow(dtm.combined))
predictions<-rep("", nrow(dtm.combined))
for(i in 1:nrow(dtm.combined))
{
  ind<-rev(order(tag.probs[i,]))
  top5.tags[i,]<- v.tag[ind[1:5]]
  top5.prob[i,]<- tag.probs[i,ind[1:5]]
  keepers[i]<-num.to.keep(top5.prob[i,])
  predictions[i]<-paste(top5.tags[i,1:keepers[i]], collapse=" ")
  cat(i," : ", predictions[i],"\n")
}

F1 <- function(pred,actual)
{
	tp <- sum(sapply(1:length(pred), function(n) max(actual == pred[n])))
	if(tp == 0) return(0)
	prec <- tp/length(pred)
	recall <- tp/length(actual)
	2*prec*recall/(prec + recall)
}

mean(sapply(1:length(predictions), function (n) F1(unlist(strsplit(predictions[n],split=" ")),
                                                  unlist(strsplit(df_sample$Tags[n], split=" ")))))

f<-sapply(1:length(predictions), function (n) F1(unlist(strsplit(predictions[n],split=" ")),
                                                  unlist(strsplit(df_sample$Tags[n], split=" "))))



#########################################################################################
num.records<-nrow(df_sample)
tag.freq <- table(unlist(strsplit(df_sample$Tags," ")))                    ##Get tag names and their frequency on the training data
tag.freq <- tag.freq[rev(order(tag.freq))]
tag.names<-names(tag.freq)

model.list <- list()

num.models <- 500
v.tag<-rep("",num.models)
v.regexp<-rep("",num.models)

for(i in 1:num.models)
{
  tag<-tag.names[i]
  cat("Model number",i,"Fitting model for: ", tag, "\n")
  resp<-as.integer(find.tag(df_sample$Tags, tag, TRUE))

  regexp.tag<-gsub("-", ".", tag, fixed=T)
  regexp.tag<-gsub("#", "\\#", regexp.tag, fixed=T)
  regexp.tag<-gsub("+", "\\+", regexp.tag, fixed=T)
  cat("regexp is: ", regexp.tag, "\n")

  title.grep <- grepl(regexp.tag, df_sample$Title)
  body.grep <- grepl(regexp.tag, df_sample$Body)

  cat("Computing correlation matrix\n")
  v.corr<-cor(resp, dtm.combined)
  v.corr<-v.corr[!is.na(v.corr)]
  names(v.corr) <- colnames(dtm.combined)
  v.corr2<-sort(v.corr, decreasing=T)[1:20]
  v.corr2<-v.corr2[v.corr2>=.1]
  vars<-intersect(names(v.corr2), colnames(dtm.combined))

  df<-data.frame(resp=resp, dtm.combined[,vars], title.grep, body.grep)
  cat("Fitting rpart tree\n")
  m<-rpart(resp~., data=df)
  m$where <- NULL
  m$y <- NULL
  print(m)
  cat("\n\n")
  model.list[[i]]<-m
  v.tag[i]<-tag
  v.regexp[i]<-regexp.tag
}

dump("model.list", file="models_1_500.R")
