num.tags<-500                                                               ##Number of tags to use in prediction

df_sample<-read.csv("~/facebook/train_sample.csv", stringsAsFactors = F)    ##Read the training data

tag.freq<-table(unlist(strsplit(df_sample$Tags," ")))                       ##Get tag names and their frequency on the training data
tag.freq<-tag.freq[rev(order(tag.freq))]
tag.names<-names(tag.freq)
scrub.body<- gsub("<code>.*</code>","",df_sample$Body)                      ##Remove code blocks, hyperlinks, and words in
scrub.body<- gsub("<a.*</a>","",scrub.body)                                 ##parentheses since these seem to reduce predictiveness
scrub.body<- gsub("\\(.*\\)","",scrub.body)
scrub.body<-tolower(scrub.body)
titles<-tolower(df_sample$Title)

tag.map<-read.csv('~/facebook/tag_map.csv', header=T, stringsAsFactors=F)   ##tag_map.csv has regexps selected for top 300 tags

more.tags<-setdiff(tag.names[1:num.tags], tag.map$tag)
more.regexp<-gsub("-",".",more.tags,fixed=T)
more.regexp<-gsub("#","\\#",more.regexp,fixed=T)
more.regexp<-gsub("+","\\+",more.regexp,fixed=T)
tag.map<-rbind(tag.map, data.frame(tag=more.tags, regexp=more.regexp))


sens<-rep(NA, num.tags)
spec<-rep(NA,num.tags)
f1stat<-rep(NA,num.tags)

for(i in 1:num.tags)
{
  re0<-paste("\\Q",tag.map$tag[i],"\\E",sep="")
  re1<- paste("^",re0," | ",re0," | ",re0,"$","|^",re0,"$",sep="")
  re2<-tag.map$regexp[i]
  i1<-grep(re2, titles)
  i2<-grep(re2, scrub.body)
  ind.pred<-unique(c(i1,i2))
  ind.actual<-grep(re1, df_sample$Tags)
  if (length(ind.actual)>0)
    sensitivity<-length(intersect(ind.pred, ind.actual))/length(ind.actual) else sensitivity=0
  if(length(ind.pred)>0) specificity<-length(intersect(ind.pred, ind.actual))/length(ind.pred) else specificity=0
  sens[i]<-sensitivity
  spec[i]<-specificity
  if (sensitivity+specificity > 0) f1stat[i]<- 2*sensitivity*specificity/(sensitivity+specificity) else f1stat[i]=0
  print(c(tag.map$tag[i], tag.map$regexp[i]))
  print(c(i,sensitivity,specificity, f1stat[i]))
}

perf.info<-data.frame(tag=tag.map$tag,tag.map$regexp,sens=sens, spec=spec, f1stat=f1stat)
write.csv(perf.info, "~/facebook/perf_info.csv")


predictions<-rep("", length(df_sample$Title))


for(i in 1:num.tags)
{
  print(i)
  x<-tag.map$regexp[i]
  ind<-unique(c(grep(x, tolower(df_sample$Title)),grep(x, tolower(scrub.body))))
  predictions[ind]<-paste(predictions[ind],tag.map$tag[i])
}

predictions<-substring(predictions, 2)

##Now predictions must be ordered by specificity and only the most powerful ones retained
for(i in 1:length(predictions)){
  print(i)
  if(predictions[i]=="") next
  v_pred<-unlist(strsplit(predictions[i]," "))
  v_spec<-sapply(1:length(v_pred),function(j)perf.info$spec[perf.info$tag==v_pred[j]])
  v_pred<-v_pred[rev(order(v_spec))]
  v_spec<-v_spec[rev(order(v_spec))]
  if (length(v_pred)>5)
  {
    v_pred<-v_pred[1:5]
    v_spec<-v_spec[1:5]
  }
  if (max(v_spec)<=.2)
    predictions[i]<-""
  else
    predictions[i]<-paste(v_pred[v_spec>.2],collapse=" ")
}

predictions[predictions==""]<-"java c#"

F1<-function(pred,actual)
{
	tp<-sum(sapply(1:length(pred), function(n) max(actual==pred[n])))
	if(tp==0) return(0)
	prec<-tp/length(pred)
	recall<-tp/length(actual)
	2*prec*recall/(prec+recall)
}


mean(sapply(1:length(predictions), function (n) F1(unlist(strsplit(predictions[n],split=" ")),
                                                  unlist(strsplit(df_sample$Tags[n], split=" ")))))




#500 tags train 41352 observations: 0.3360471 - up to 0.3369774 with c# regexp change 0.3375615 0.3381457 0.3404269 0.3411447 0.3425596
#0.342535 (?)  0.343811

##500 tags 10/25/2013 0.3439156



#1000 tags train 41352 observations: 0.3577539
#2500 tags train 41352 observations: 0.3876457
#5000 tags train 207610 observations 0.4035532


#1000 tags my_test sample 0.3538224
#2500 tags my_test sample 0.3772846
