'''

Created on Nov 19, 2012


@author: wilson

'''


from urllib import urlopen
from urlparse import urljoin
from bs4 import BeautifulSoup
from datetime import datetime
import re
import os
import platform
import socket

class VOACrawler:
    
    def __init__(self):
        socket.setdefaulttimeout(30)  # set default timeout for connection
        self.baseUrl = 'http://www.51voa.com/'
        self.textFileSuffix = '.txt'
        self.maxConnectionRetryTimes = 3
        platformInfo = platform.platform()
        # print platformInfo
        if platformInfo.find('Windows') > -1:
            self.baseTargetDir = 'f:\\voa'
        else:
            self.baseTargetDir = '/Users/wilson/Documents/voa'
            
    def getUrlContent(self, url):
        retryTimes = 0
        urlContent=''
        while True:
            try:
                if(retryTimes <= self.maxConnectionRetryTimes):
                    urlContent=urlopen(url).read()
                break
            except IOError:
                retryTimes = retryTimes + 1
                errMsg = 'connection failed. retry {0} times'.format(retryTimes)
                print errMsg
        return urlContent
    
    def decomposeTags(self, tags):
        # print len(tags)
        for tag in tags:
            try:
                if tag.name == 'div':
                    # print tag
                    str = tag.get_text()
                    if str != None and len(str) > 200:  # do not remove a div with text content
                        continue
            
                tag.decompose()
            except AttributeError:
                pass
            
    def getFormattedContent(self, content):
        # content = unicode(content)
        # return content.replace("<br>", "\n").replace("</br>", "\n").replace("(\n){3,}", "")
        rawText = content.get_text()  # get text however without formatting
        return rawText.encode('ascii', 'ignore')  # remove unicode char
    
    def getFormattedTitle(self, link):
        rawDate = link.next_sibling
        start = rawDate.find('(') + 1
        end = rawDate.find(')')
        date = rawDate[start:end]
        d = datetime.strptime(date, "%y-%m-%d")  
        title = d.strftime("%Y%m%d") + '_' + link.string
        title = title.encode('ascii', 'ignore')  # remove unicode char
        title = title.replace('/', ' ').replace('\\', '').replace('?', ' ') \
        .replace('<', ' ').replace('>', ' ').replace('*', ' ').replace(':', ' ') \
        .replace('|', ' ').replace('"', ' ').replace('\r', ' ').replace('\n', ' ').replace('\t', ' ')
        title = title.lstrip().rstrip()  # trim the left and right space of the string
        return title
    
    def getClassifiedDir(self, navigationLink):
        name = re.match(r'(\w| )*', navigationLink.string).group(0)
        fullPath = self.baseTargetDir + os.sep + name
        if not os.path.exists(fullPath):
            os.makedirs(fullPath)  # recursively make the intermediate path, similar to (mkdir -p) cmd in linux
        return fullPath    
    
    def makeTextFile(self, dir, title, content):
        fileName = dir + os.sep + title + self.textFileSuffix
        with open(fileName, 'w') as f:
            f.write(content)
        print title
        
    def parseArticle(self, articleFullPath):
        articleSoup = BeautifulSoup(self.getUrlContent(articleFullPath))
        content = articleSoup.find('div', id='content')
        # self.decomposeTags(content.find_all('span'))
        self.decomposeTags(content.find_all('div'))
        self.decomposeTags(content.find_all('a'))
        self.decomposeTags(content.find_all('table'))
        content = self.getFormattedContent(content)
        # print content
        return content
        
    def execute(self): 
        homePage = self.getUrlContent(self.baseUrl)
        homeSoup = BeautifulSoup(homePage)
        navigationLinks = homeSoup.find('div', id='leftNav').find_all('ul')[1].find_all('a')
        for navigationLink in navigationLinks:
            dir = self.getClassifiedDir(navigationLink)
            fullPath = urljoin(self.baseUrl, navigationLink['href'])
            specialEnglishSoup = BeautifulSoup(self.getUrlContent(fullPath))
            pageLinks = specialEnglishSoup.select('span#blist ul > a')
            for pageLink in pageLinks:
                fullPageLink = urljoin(self.baseUrl, pageLink['href'])
                pageSoup = BeautifulSoup(self.getUrlContent(fullPageLink))
                articleLinks = pageSoup.select('span#blist li a')
                for link in articleLinks:
                    title = self.getFormattedTitle(link)
                    articleFullPath = urljoin(self.baseUrl, link['href'])
                    content = self.parseArticle(articleFullPath)
                    self.makeTextFile(dir, title, content)
                    

    
        print '-------------------------------------------------------------------'


if __name__ == '__main__':
    voaCrawler = VOACrawler()
    voaCrawler.execute()
    # voaCrawler.parseArticle('http://www.51voa.com/VOA_Special_English/Is-NASAs-James-Webb-Space-Telescope-a-Time-Machine-41898.html')
    # voaCrawler.parseArticle('http://www.51voa.com/VOA_Special_English/election-obama-romney-immigration-economy-race-campaign-47418.html')
    # voaCrawler.parseArticle('http://www.51voa.com/VOA_Special_English/Development_Report_27210.html')
    # voaCrawler.parseArticle('http://www.51voa.com/VOA_Special_English/Development_Report_27126.html')
    # soup=BeautifulSoup('<div id=articleBody><p>sdfasdfasdf</p><p>56756756756</p></div>')
    # print soup.find('div').get_text()
else:
    print 'not __main__'


