#Version 1.0 By Wei Chen
import sys,os.path, time
from nltk import pos_tag, word_tokenize
from BeautifulSoup import BeautifulSoup
from BeautifulSoup import BeautifulStoneSoup
import re
from nltk.stem.porter import PorterStemmer
from nltk.stem.wordnet import WordNetLemmatizer

#this python program read data from html resume file and parse out information.
class resume_parser():

    def __init__(self,file):
        """initialize object"""
        fr=open(file,'r')
        html=fr.read()
        self.soup=BeautifulSoup(html)
        #print self.soup.prettify()

    def stem(self,w):
        stem=w
        while(1):
            if stem<>PorterStemmer().stem_word(w):
                stem=PorterStemmer().stem_word(w)
            else:
                break
        return stem

    def lemma(self,w):
        lemma=w
        while(1):
            if lemma<>WordNetLemmatizer().lemmatize(w):
                lemma=WordNetLemmatizer().lemmatize(w)
            else:
                break
        return lemma

    stem_verb={}
    lemma_verb={}
    stem_n={}
    lemma_n={}
    v={}
    n={}
    def parse_exp(self):
        body=self.soup.find("body")
        #1. contact
        contact=body.find('div',attrs={'id' : re.compile("contact")})
        #2. summary/objective
        summary=body.find('div',attrs={'id' : re.compile("summary")})
        #3. experience        
        exp=body.find('div',attrs={'id' : re.compile("experience")})
        #3-1. positions
        positions=exp.findAll('div',attrs={'id': re.compile("position")})
        #3-2. details of each experience 
        exp_divs=exp.findAll('div',attrs={'id': re.compile("experience")})
        v=('VBN','VBD','VB','VBP','VBZ')
        n=('NN','NNS','NNP','NNPS')
        for div in exp_divs:
            p=div.contents[0].strip()
            for s in p.split('.'):
                s=s.strip()
                if s:
                    #print len(s)
                    token_pos_pairs=zip(*pos_tag(word_tokenize('I '+s)))
                    for i in range(0,len(token_pos_pairs[1])):
                        a=token_pos_pairs[0][i].lower()
                        stem=self.stem(a)
                        lemma=self.lemma(a)
                        if token_pos_pairs[1][i] in v:
                            #print token_pos_pairs[0][i]
                            #extract stem
                            if resume_parser.stem_verb.has_key(stem):
                                resume_parser.stem_verb[stem]+=1
                            else:
                                resume_parser.stem_verb[stem]=1
                            #extract lemma
                            if resume_parser.lemma_verb.has_key(lemma):
                                resume_parser.lemma_verb[lemma]+=1
                            else:
                                resume_parser.lemma_verb[lemma]=1
                        elif token_pos_pairs[1][i] in n:
                            if resume_parser.stem_n.has_key(stem):
                                resume_parser.stem_n[stem]+=1
                            else:
                                resume_parser.stem_n[stem]=1
                            #extract lemma
                            if resume_parser.lemma_n.has_key(lemma):
                                resume_parser.lemma_n[lemma]+=1
                            else:
                                resume_parser.lemma_n[lemma]=1
                        if resume_parser.n.has_key(a):
                            resume_parser.n[a]+=1
                        else:
                            resume_parser.n[a]=1
            #print
        #process skill        
        skill=body.find('div',attrs={'id' : re.compile("skill")})
        skill_content=''.join(skill.findAll(text=True)).strip()
        lines=skill_content.split('\n')
        skill_lst=[]
        for line in lines:
            for a in line.split('\t'):
                a=a.strip()
                if len(a)<>0:
                    skill_lst.append(a)
        #print skill_lst
        education=body.find('div',attrs={'id' : re.compile("education")})
        #print body

    positions={}
    def parse_posi(self):
        body=self.soup.find("body")
        #positions
        posi_divs=body.findAll('div',attrs={'id': re.compile("position")})
        for div in posi_divs:
            lst=word_tokenize(div.contents[0].strip())
            for a in lst:
                b=a.lower()
                if resume_parser.positions.has_key(b):
                    resume_parser.positions[b]+=1
                else:
                    resume_parser.positions[b]=1
    skills={}
    def parse_skill(self):
        body=self.soup.find("body")
        #skill
        s='\t'.join(body.find('div',attrs={'id': re.compile("skill")}).findAll(text=True))
        while(s.find(',')<>-1):
            s=s.replace(',','\t')
        while(s.find('\t\t')<>-1):
            s=s.replace('\t\t','\t')
        print s
        for a in s.split('\t'):
            b=a.lower()
            if resume_parser.skills.has_key(b):
                resume_parser.skills[b]+=1
            else:
                resume_parser.skills[b]=1
    def write(self):
        fw=open('skills.txt','w')
        fw.write('skill\tcount\n')
        for k,v in resume_parser.skills.items():
            fw.write(k+'\t'+str(v)+'\n')
        """fw=open('lemma_n.txt','w')
        fw.write('lemma_n\tcount\n')
        for k,v in resume_parser.lemma_n.items():
            fw.write(k+'\t'+str(v)+'\n')"""

    def __del__(self):
        """delete object"""
        
    def clear_screen(self, numlines=100):
        """Clear the console.numlines is an optional argument used only as a fall-back."""
        import os
        if os.name == "posix":
            # Unix/Linux/MacOS/BSD/etc
            os.system('clear')
        elif os.name in ("nt", "dos", "ce"):
            # DOS/Windows
            os.system('CLS')
        else:
            # Fallback for other operating systems.
            print '\n' * numlines
     
if __name__=='__main__':
    reload(sys)
    #encoding trick!!
    sys.setdefaultencoding("utf-8")
    test_files=['Alan_Wei.html','Alex_Wei.html','Anthony_JonM.html','Augustine_JonM.html','Balachandar_Max.html','William_Max.html']
    for file in test_files:
        parser=resume_parser(file)
        #parser.parse_exp()
        #parser.parse_posi()
        parser.parse_skill()
        
    parser.write()
    
    