#Produce the document term matrices for modeling

library("tm")
library("SnowballC")

df_sample<-read.csv("medium_train_sample.csv",stringsAsFactors=F)
v.text<- tolower(df_sample$Body)
v.text<- gsub("<code>|</code>|<p>|</p>|<strong>|</strong>|<pre>|</pre>"," ",v.text, perl=T)
v.text<- gsub("\\b00\\w*\\b|\\b0x\\w*\\b"," ",v.text, perl=T)
v.text<- gsub("\n"," ",v.text, fixed=T)
v.text<- gsub("c#"," csharp ",v.text, fixed=T)               ##So c# and c++ will be recognized as term
v.text<- gsub("f#"," fsharp ",v.text, fixed=T)
v.text<- gsub("^c | c | c$"," clanguag ",v.text, perl=T, useByte=T)
v.text<- gsub("^r | r | r$"," rlanguag ",v.text, perl=T, useByte=T)
v.text<- gsub("c++","cplusplus",v.text, fixed=T)
v.text<- gsub("(?!')[[:punct:]]"," ", v.text,perl=T)

v.title<-tolower(df_sample$Title)
v.title<- gsub("<code>|</code>|<p>|</p>|<strong>|</strong>|<pre>|</pre>"," ",v.title, perl=T)
v.title<- gsub("\\b000\\w*\\b|\\b0x\\w*\\b"," ",v.title, perl=T)
v.title<- gsub("\n"," ",v.title, fixed=T)
v.title<- gsub("c#"," csharp ",v.title, fixed=T)               ##So c# and c++ will be recognized as term
v.title<- gsub("f#"," fsharp ",v.title, fixed=T)
v.title<- gsub("^c | c | c$"," clanguag ",v.title, perl=T, useByte=T)
v.title<- gsub("^r | r | r$"," rlanguag ",v.title, perl=T, useByte=T)
v.title<- gsub("c++","cplusplus",v.title, fixed=T)
v.title<- gsub("(?!')[[:punct:]]"," ", v.title,perl=T)

text.corpus<-Corpus(VectorSource(v.text))
title.corpus<-Corpus(VectorSource(v.title))

##Now clear out all stop words
mystops<-c(
 "a","about","above","after","again","against","all","am","an","and","any","appreciate","aren't","are","as","at",
"be","because","been","before","being","below","between","both","but","by","can't","cannot","couldn't","could",
"didn't","did","do","doesn't","does","doing","don't","down","during","each","few","for","from","further",
"hadn't","had","hasn't","has","haven't","have","having","he'd","he'll","he's","he","help","her","here's","here",
"hers","herself","him","himself","his","how's","how","i'd","i'll","i'm","i've","i","if","in","into","isn't",
"is","it's","it","its","itself","let's","me","more","most","mustn't","my","myself","no","nor","not","of",
"off","on","once","only","or","other","ought","our","ours ","ourselves","out","over","own","same","shan't",
"she'd","she'll","she's","shouldn't","should","she","so","some","such","than","thank","thanks","that's","that","the",
"theirs","their","them","themselves","then","there's","there","these","they'd","they'll","they're","they've","they",
"this","those","through","to","too","under","until","up","very","wasn't","was","we'd","we'll","we're","we've",
"we","were","weren't","what's","what","when's","when","where's","where","which","while","who's","who","whom",
"why's","why","with","won't","wouldn't","would","you'd","you'll","you're","you've","you","your","yours","yourself",
"your","application", "can", "code", "create", "data", "error","find", "following", "get", "just", "know","like", "need", "pre",
"use", "using", "want", "way", "will","now", "one", "problem", "something", "sure", "trying", "work", "right", "run", "running",
 "see", "seems", "set", "show", "similar", "simple", "since", "possible",  "value","method","also","app","time","works" , "please"
)

text.corpus<-tm_map(text.corpus, removeWords, mystops )
text.corpus<-tm_map(text.corpus, removeWords, stopwords("english"))
text.corpus<-tm_map(text.corpus, stripWhitespace)
text.stemmed<-tm_map(text.corpus, stemDocument)

title.corpus<-tm_map(title.corpus, removeWords, mystops )
title.corpus<-tm_map(title.corpus, removeWords, stopwords("english"))
title.corpus<-tm_map(title.corpus, stripWhitespace)
title.stemmed<-tm_map(title.corpus, stemDocument)

dtm.text <- removeSparseTerms(DocumentTermMatrix(text.stemmed), 0.9999)
dtm.title <- removeSparseTerms(DocumentTermMatrix(title.stemmed), 0.9999)

dtm.text2<-as.matrix(dtm.text)
rm(dtm.text)
gc()

dtm.title2<-as.matrix(dtm.title)
rm(dtm.title)
gc()

colnames(dtm.title2)<-paste("T.", colnames(dtm.title2), sep="")
dtm.combined<-cbind(dtm.title2, dtm.text2)
rm(dtm.text2)
rm(dtm.title2)
gc()
dtm.combined[dtm.combined>1]<-1

find.tag<-function(v, tag, logical=FALSE)
{
  re0 <- paste("\\Q", tag, "\\E",sep="")
  re1 <- paste("^",re0," | ",re0," | ",re0,"$","|^",re0,"$",sep="")
  ind.tag <- grep(re1, v, perl=T, useBytes=T)
  if (!logical) return(ind.tag)
  ans <- rep(FALSE, length(v))
  ans[ind.tag] <- TRUE
  ans
}

require(rpart)
num.records<-nrow(df_sample)
tag.freq <- table(unlist(strsplit(df_sample$Tags," ")))                    ##Get tag names and their frequency on the training data
tag.freq <- tag.freq[rev(order(tag.freq))]
tag.names<-names(tag.freq)

model.list <- list()

num.models <- 500
v.tag<-rep("",num.models)
v.regexp<-rep("",num.models)

for(i in 1:num.models)
{
  tag<-tag.names[i]
  cat("Model number",i,"Fitting model for: ", tag, "\n")
  resp<-as.integer(find.tag(df_sample$Tags, tag, TRUE))

  regexp.tag<-gsub("-", ".", tag, fixed=T)
  regexp.tag<-gsub("#", "\\#", regexp.tag, fixed=T)
  regexp.tag<-gsub("+", "\\+", regexp.tag, fixed=T)
  cat("regexp is: ", regexp.tag, "\n")

  title.grep <- grepl(regexp.tag, df_sample$Title)
  body.grep <- grepl(regexp.tag, df_sample$Body)

  cat("Computing correlation matrix\n")
  v.corr<-cor(resp, dtm.combined)
  v.corr<-v.corr[!is.na(v.corr)]
  names(v.corr) <- colnames(dtm.combined)
  v.corr2<-sort(v.corr, decreasing=T)[1:20]
  v.corr2<-v.corr2[v.corr2>=.1]
  vars<-intersect(names(v.corr2), colnames(dtm.combined))

  df<-data.frame(resp=resp, dtm.combined[,vars], title.grep, body.grep)
  cat("Fitting rpart tree\n")
  m<-rpart(resp~., data=df)
  m$where <- NULL
  m$y <- NULL
  print(m)
  cat("\n\n")
  model.list[[i]]<-m
  v.tag[i]<-tag
  v.regexp[i]<-regexp.tag
}

dump("model.list", file="models_1_500.R")
