scanResumes <- function() {
  
  require(XML)
  require(stringr)
  require(plyr)
  
  matchInfoFile <- 'C:/SI/dataFiles/matchInfo.txt'
  rootInputDir <- 'c:/SI/dataFiles/resumes/'
  rootOutputDir <- 'C:/SI/dataFiles/controlFiles/'
  rootAnalysisDir <- 'C:/SI/dataFiles/analysis/'

  parseResumes(matchInfoFile,rootInputDir,rootOutputDir)
  matchWords(matchInfoFile,rootOutputDir)
  
  summarizeResumeResults(matchInfoFile,rootOutputDir,rootAnalysisDir)
  
}

summarizeResumeResults <- function(matchInfoFile,rootOutputDir,rootAnalysisDir){
  
  matchInfo <- read.table(matchInfoFile,sep='|',quote='',header=TRUE,stringsAsFactors=FALSE)
  
  for(m in 1:nrow(matchInfo)) {
    
    algoId <- matchInfo$algoId[m]
  
    inputFile_jobs <- str_c(rootOutputDir,algoId,'/',algoId,'_matchedData_jobs.csv')
    inputFile_skills <- str_c(rootOutputDir,algoId,'/',algoId,'_matchedData_skills.csv')
    
    outputDir <- str_c(rootAnalysisDir,algoId)
    if(!file.exists(outputDir)){
      dir.create(outputDir)
    }
    outputFile <- str_c(outputDir,'/',algoId,'_summaryStats.csv')
    
    capture.output(file=outputFile,{
      cat('---------------------------------------------------------------------\n')
      cat(str_c('RESUME ANALYSIS: JOB DESCRIPTION DATA FROM ',algoId,' SEARCHES\n'))
      cat('---------------------------------------------------------------------\n')
      
      jobsData <- read.csv(inputFile_jobs,stringsAsFactors=FALSE)
  
      prodKey <- unique(jobsData$prodKey)
        
      N <- nrow(jobsData)
      N_r <- length(unique(jobsData$resumeId))
      sameComp <- sum(jobsData$compWords_in_compName>0)
      
      cat(str_c('unique resumes processed: \t',N_r,'\n'))
      cat(str_c('job descriptions processed: \t',N,'\n\n'))
      cat(str_c('occurances of product being owned by the hiring company: \t', sameComp,'\n'))

      jobsData2 <- jobsData[jobsData$compWords_in_compName==0,]
      N <- nrow(jobsData2)
      N_r <- length(unique(jobsData2$resumeId))
      cat('eliminating those rows...\n')
      cat(str_c('total number of rows now at: ',N,'\n\n'))
      
      prodWordHits <- sum(jobsData2$prodWords_in_jobDesc>0)
      cat(str_c('job descriptions in which ', algoId,' came up at least once: \t',prodWordHits, '\n\n'))
      
      # resumes that  mention the product in the jobDesc (or jobTitle)
      inJobDescFlag <- ddply(jobsData2,'resumeId',function(df){
        inJobDescFlag <- sum(df$prodWords_in_jobDesc) + sum(df$prodWords_in_jobTitle)
      })
      N_r <- sum(inJobDescFlag$V1>0)
      cat(str_c('number of resumes in which ', algoId,' comes up at least once in the job description or job title: \t',N_r, '\n\n'))
      
      
      cat('job descriptions with 0 mentions of ', algoId,', 1 mention of ', algoId, ', etc...\n')
      print(table(jobsData2$prodWords_in_jobDesc))
      
      jobsData2 <- within(jobsData2,{
        startDate <- as.Date(startDate)
        startDate_y <- as.numeric(format(startDate,'%Y'))
        endDate[endDate=='Present'] <- '2050-12-31'
        endDate <- as.Date(endDate)
        endDate_y <- as.numeric(format(startDate,'%Y'))
      })
    
      jobsData3 <- jobsData2[jobsData2$prodWords_in_jobDesc>0,]
      N_r <-  length(unique(jobsData3$resumeId))
      N_c <-  length(unique(jobsData3$compName))
      
      cat('\n\nacross job descriptions with at least one product mention...\n')
      cat(str_c('there are ',N_r,' unique resumes\n'))
      cat(str_c('and ',N_c,' unique companies\n\n'))
    
      cat(str_c('Top 25 companies with mentions of ', algoId, ':\n'))
      prodHits <- as.data.frame(table(jobsData3$compName)); names(prodHits) <- c('compName','freq')
      prodHits <- prodHits[order(prodHits$freq,decreasing=TRUE),]; row.names(prodHits) <-NULL
      print(prodHits[1:25,])
      
      # SKILLS-COMPANY ANALYSIS
      cat('\n---------------------------------------------------------------------\n')
      cat(str_c('RESUME ANALYSIS: SKILLS DATA FROM ',algoId,' SEARCHES\n'))
      cat('---------------------------------------------------------------------\n')
    
      skillsData <- read.csv(inputFile_skills,stringsAsFactors=FALSE)
      
      N_r <- length(unique(skillsData$resumeId))
      cat(str_c('unique resumes processed: \t',N_r,'\n\n'))
      
      allData <- merge(jobsData2,skillsData,by=c('algoId','resumeId'))
      cat('after merging with jobs data (and removing companies that make product in quesiton)...\n')
      N_r <- length(unique(allData$resumeId))
      N_c <- length(unique(allData$compName))
      cat(str_c('unique resumes left: \t',N_r,'\n'))
      cat(str_c('unique companies left: \t',N_c,'\n\n'))
      
      # remove resumes that already mention the product in the jobDesc (or jobTitle)
      allData2 <- ddply(allData,'resumeId',function(df){
        inJobDescFlag <- sum(df$prodWords_in_jobDesc) + sum(df$prodWords_in_jobTitle)
        if(inJobDescFlag>0) return(NULL) else df
      })
      
      cat('after removing all resumes that already mention the skill/prod in at least one job description...\n')
      N_r <- length(unique(allData2$resumeId))
      N_c <- length(unique(allData2$compName))
      cat(str_c('unique resumes left: \t',N_r,'\n'))
      cat(str_c('unique companies left: \t',N_c,'\n\n'))
      
      # remove all jobs that occur before the product releaseDate
      allData3 <- allData2[matchInfo$releaseDate[m] <= allData2$endDate_y | is.na(allData2$endDate),]
      
      cat('after removing all jobs that exist before product was released...\n')
      N_old <- nrow(allData2); N_new <- nrow(allData3) 
      cat(str_c('total number of jobs before date filter: \t',N_old,'\n'))
      cat(str_c('total number of jobs after date filter: \t',N_new,'\n\n'))
      
      # remove all resumes with no mention of prod in skills section
      allData4 <- allData3[allData3$prodWords_in_skillsDesc>0,]
      
      cat('after removing all resumes that don\'t have any mention of the product in the skills section...\n')
      N_r <- length(unique(allData4$resumeId))
      N_c <- length(unique(allData4$compName))
      cat(str_c('unique resumes left: \t',N_r,'\n'))
      cat(str_c('unique companies left: \t',N_c,'\n\n'))
      
      # aggregate company occurances
      cat(str_c('Top 25 companies (loosely associated) with mentions of ', algoId, ' in the skills section:\n'))
      prodHits <- as.data.frame(table(allData4$compName)); names(prodHits) <- c('compName','freq')
      prodHits <- prodHits[order(prodHits$freq,decreasing=TRUE),]; row.names(prodHits) <-NULL
      print(prodHits[1:25,])
      
      # ADOPTION TRENDS
      cat('\n---------------------------------------------------------------------\n')
      cat(str_c('RESUME ANALYSIS: ADOPTION DATA FROM ',algoId,' SEARCHES\n'))
      cat('---------------------------------------------------------------------\n')
      
      cat(str_c('\nAdoption Trends for ', algoId, ':\n'))
      cat(str_c('\t the \'fullResumes\' column observes year-over-year occurances of ', algoId, ' across the full resume.\n',
                '\t the \'matchedJobs\' column observes year-over-year occurances of ', algoId,' only within jobs that had matches.\n\n'))
      adoptionData <- NULL
      for(y in 1970:2012){
        thisYrData <- data.frame(year=y,
                                 fullResumes=sum(y >= jobsData2$startDate_y & y <= jobsData2$endDate_y,na.rm=TRUE),
                                 matchedJobs=sum(y >= jobsData3$startDate_y & y <= jobsData3$endDate_y,na.rm=TRUE))
        adoptionData <- rbind(adoptionData,thisYrData)
      } 
      
      adoptionData <- within(adoptionData,{
        matchedJobs_cmlPer <- str_c(round(100*cumsum(matchedJobs)/sum(matchedJobs),2),'%')
        fullResumes_cmlPer <- str_c(round(100*cumsum(fullResumes)/sum(fullResumes),2),'%')
      })
      print(adoptionData)
  
   })
  
    cat(str_c('\nProcess complete. Summary data output to: ',outputDir,'\n\n'))
  }
  
}


matchWords <- function(matchInfoFile,rootOutputDir){

  matchInfo <- read.table(matchInfoFile,sep='|',quote='',header=TRUE,stringsAsFactors=FALSE)
  
  for(m in 1:nrow(matchInfo)) {
    
    algoId <- matchInfo$algoId[m]
    resumeDataLoc_jobs <- str_c(rootOutputDir,algoId,'/',algoId,'_parsedResumeData_jobs.csv')
    resumeDataLoc_skills <- str_c(rootOutputDir,algoId,'/',algoId,'_parsedResumeData_skills.csv')
    outputFile_jobs <- str_c(rootOutputDir,algoId,'/',algoId,'_matchedData_jobs.csv')
    outputFile_skills<- str_c(rootOutputDir,algoId,'/',algoId,'_matchedData_skills.csv')
  
    
    cat(str_c('\n',algoId,' | reading processed resume-job files from: ',resumeDataLoc_jobs,'\n'))
    cat(str_c(algoId,' | looking for matches... \n\n'))
    
    resumeData_jobs <- read.csv(resumeDataLoc_jobs,stringsAsFactors=FALSE)    
    
    prodWords <- tolower(eval(parse(text=matchInfo$prodWordCommand[m])))
    compWords <- tolower(eval(parse(text=matchInfo$compWordCommand[m])))

    output <- NULL
    progressBar <- txtProgressBar(max=nrow(resumeData_jobs),style=3)
    for(r in 1:nrow(resumeData_jobs)){
      this <- resumeData_jobs[r,]
      
      prodWords_in_jobDesc <- 0
      for(prodWord in prodWords){
        numOfMatches <- nrow(str_match_all(this$jobDesc,prodWord)[[1]])
        if(!is.null(numOfMatches)){
          prodWords_in_jobDesc <- prodWords_in_jobDesc + numOfMatches
        }
      }
      
      #prodWords_in_jobTitle
      prodWords_in_jobTitle <- 0
      for(prodWord in prodWords){
        numOfMatches <- nrow(str_match_all(this$jobTitle,prodWord)[[1]])
        if(!is.null(numOfMatches)){
          prodWords_in_jobDesc <- prodWords_in_jobDesc + numOfMatches
        }
      }
      
      # parentComp_in_compName
      compWords_in_compName <- 0
      for(compWord in compWords){
        numOfMatches <- nrow(str_match_all(this$compName,compWord)[[1]])
        if(!is.null(numOfMatches)){
          compWords_in_compName <- compWords_in_compName + numOfMatches
        }
      }  
      
      output <- rbind(output,data.frame(this[,c('algoId','resumeId','compName','jobTitle','startDate','endDate')],
                                              prodWords_in_jobDesc,prodWords_in_jobTitle,compWords_in_compName))
      
      setTxtProgressBar(progressBar,r)
    }
  
    write.csv(output,file=outputFile_jobs,row.names=FALSE)
  
    cat(str_c('\n\n',algoId,' | job match process complete. data sent to: ',outputFile_jobs,'\n\n'))
    browser()
    cat(str_c('\n',algoId,' | reading processed resume-skill files from: ',resumeDataLoc_jobs,'\n'))
    cat(str_c(algoId,' | looking for matches... \n\n'))
    
    resumeData_skills <- read.csv(resumeDataLoc_skills,stringsAsFactors=FALSE)
    
    output <- NULL
    progressBar <- txtProgressBar(max=nrow(resumeData_skills),style=3)
    for(r in 1:nrow(resumeData_skills)){
      this <- resumeData_skills[r,]
      prodWords_in_skillsDesc <- 0
      for(prodWord in prodWords){
        numOfMatches <- nrow(str_match_all(this$skills,prodWord)[[1]])
        if(!is.null(numOfMatches)){
          prodWords_in_skillsDesc <- prodWords_in_skillsDesc + numOfMatches
        } 
      }
      output <- rbind(output,data.frame(this[,c('algoId','resumeId')],prodWords_in_skillsDesc))
      setTxtProgressBar(progressBar,r)
    }
    
    write.csv(output,file=outputFile_skills,row.names=FALSE)
    cat(str_c('\n\n',algoId,' | skills match process complete. data sent to: ',outputFile_skills,'\n\n'))
    
  }
}

parseResumes <- function(matchInfoFile, rootInputDir, rootOutputDir){

  matchInfo <- read.table(matchInfoFile,sep='|',quote='',header=TRUE,stringsAsFactors=FALSE)
  
  for(p in 1:nrow(matchInfo)) {
   
    algoId <- matchInfo$algoId[p]
    
    resumeInputDir <- str_c(rootInputDir,algoId,'/')
    outputFile_jobs <- str_c(rootOutputDir,algoId,'/',algoId,'_parsedResumeData_jobs.csv')
    outputFile_skills <- str_c(rootOutputDir,algoId,'/',algoId,'_parsedResumeData_skills.csv')
  
    # list of files in directory
    fileList <- dir(resumeInputDir)
    
    cat(str_c('\n',algoId,' | reading raw resume files from: ',resumeInputDir,'\n'))
    cat(str_c(algoId,' | extracting relevant data items... \n\n'))
   
    # setup progressBar
    progressBar <- txtProgressBar(max=length(fileList),style=3)
    allJobs <- NULL
    allSkills <- NULL
    for(f in seq(length(fileList))){
     
      # read file info
      fullFileName <- str_c(resumeInputDir,fileList[f])
      fileInfo <- readChar(fullFileName, file.info(fullFileName)$size)
      
      thisResume <- NULL
      
      #all resume info
      htmlObj <- htmlParse(fileInfo)
      jobChunks <- getNodeSet(htmlObj,'//div[@class=\"data_display\"]')
      for(jobChunk in jobChunks){
        htmlChunk <- htmlParse(toString.XMLNode(jobChunk))
            
        compName <- unlist(xpathApply(htmlChunk,"//div[@class=\"work_company\"]/span[@class=\"bold\"]",xmlValue))
        if(length(compName)>0){
          compName <- str_replace_all(compName,'-',' ') # replace dashes with space
          compName <- str_replace_all(compName,'[[:punct:]]','') # remove other punctuation
          compName <- tryCatch({tolower(compName)},error=function(e) -1); if(compName == -1) next
        } else next
              
        jobTitle <- unlist(xpathApply(htmlChunk,"//div/p[@class=\"work_title title\"]",xmlValue))
        if(length(jobTitle)>0){
          jobTitle <- str_replace_all(jobTitle,'-',' ') # replace dashes with space
          jobTitle <- str_replace_all(jobTitle,'[[:punct:]]',' ') # remove other punctuation
          jobTitle <- str_replace_all(jobTitle,'\\d+','') # remove numbers
          jobTitle <- tryCatch({tolower(jobTitle)},error=function(e) -1); if(jobTitle == -1) next
        } else jobTitle <- NA
        
        # company location
        compLoc <- unlist(xpathApply(htmlChunk,"//div/div[@class=\"inline-block\"]/span",xmlValue))
        if(length(compLoc)==0) compLoc <- NA
        
        # parse job descriptions and clean up
        jobDesc <- unlist(xpathApply(htmlChunk,"//div/p[@class=\"work_description\"]",xmlValue))
        if(length(jobDesc)>0){
          jobDesc <- str_replace_all(jobDesc,'-',' ')
          jobDesc <- str_replace_all(jobDesc,'[[:punct:]]',' ') # remove punctuation
          jobDesc <- str_replace_all(jobDesc,'\\d+','') # remove numbers
          jobDesc <- tolower(jobDesc)
        } else next
        
        # get resume id from file name
        resumeId <- str_replace(fileList[f],'.txt','')
        
        # parse dates and convert to startDate and endDate
        dateRange <- unlist(xpathApply(htmlChunk,"//div/p[@class=\"work_dates\"]",xmlValue))
        if(length(dateRange)>0){
          breakRange <- unlist(str_split(dateRange,' to '))
          breakRange <- ifelse(breakRange=='Present','Present',as.character(as.Date(str_c(breakRange,' 1'),'%B %Y %d')))
          startDate <- breakRange[1];  endDate <- breakRange[2]
        } else {startDate <- NA; endDate <- NA}

      thisResume <- rbind(thisResume,data.frame(algoId,resumeId,compName,compLoc,jobTitle,startDate,endDate,jobDesc,stringsAsFactors=FALSE))
      
      }
      
      skills <- unlist(xpathApply(htmlObj,'//div[@class=\"section-item skills-content\"]',xmlValue))
      if(length(skills)>0){
        skills <- str_replace_all(skills,'-',' ')
        skills <- str_replace_all(skills,'[[:punct:]]',' ') # remove punctuation
        skills <- str_replace_all(skills,'\\d+','') # remove numbers
        skills <- tryCatch({tolower(skills)},error=function(e) -1); if(skills == -1) next
      } else skills <- NA
  
      thisSkills <- data.frame(algoId,resumeId,skills,stringsAsFactors=FALSE)
      
      allSkills <- rbind(allSkills,thisSkills)
      allJobs <- rbind(allJobs,thisResume)
    
      # update progress bar
      setTxtProgressBar(progressBar,f)
    }
    
    # output data.frame to file
    write.csv(allJobs,file=outputFile_jobs,row.names=FALSE)
    write.csv(allSkills,file=outputFile_skills,row.names=FALSE)
    
    cat(str_c('\n\n',algoId,' | extraction process complete. data sent to: ',outputFile_jobs,' and ',outputFile_skills,'\n\n'))
  }

}