import urllib2
import lxn

def getGoose(url, referer=None):
    #lxn.installOpener("http://127.0.0.1:8087/", referer)
    from goose import Goose
    #g = Goose({'browser_user_agent': 'Mozilla', 'parser_class':'soup', "http_proxy":"http://localhost:8087/"})
    g = Goose()
    article = g.extract(url=url)
    #print article.title
    return article

"""
def getHtml(url, referer=None):
    op = lxn.getUrlOpener("http://localhost:8087/", referer)
    html = op.open(url).read()
    return html
"""

############# Achieve Main Page ##############
def reMainPage(stri):
    import re
    #date = """<b><a\s+href="[^"]*">[^(</b>)]*</b>"""
    date = """<b><a\s+href="[^"]*">[\w\W]+?</b>"""
    pat = re.compile(date)
    res = pat.findall(stri)
    return res

def extractUrl(stri):
    import re
    url = """archives.cfm\?d=[^"]+"""
    pat = re.compile(url)
    return pat.findall(stri)[0]

def reMainPageExtractUrl(stri):
    import re
    #date = """<b><a\s+href="[^"]*">[^(</b>)]*</b>"""
    date = """<b><a\s+href="([^"]*)">"""
    pat = re.compile(date)
    res = pat.findall(stri)
    return res

### ACM TechNews
def ACMTechNews():
    monthPages = analyseAchievesMainPage()[0]
    print monthPages
    return
    issuePages = [analyseAchieveMonthPage(i) for i in monthPages[0:1]]
    test =  analyseIssuePage(issuePages[0][0])
    for t in test:
        print t
    return issuePages, None

### After second consideration, I think it wise to build up a class arch for it.

GLOBAL_PROXY = lxn._LOCAL_GOAGENT_

class ACMTechNews(object):
    CACHE_CFG = "acm_technews.cfg"
    def __init__(self, cfg_file = CACHE_CFG, url=None):
        import ConfigParser
        config_parser = ConfigParser.ConfigParser()
        config_parser.read(ACMTechNews.CACHE_CFG)
        db_path = config_parser.get("content_cache", "db_path")
        rs_path = config_parser.get("content_cache", "rs_path")
        self.hc = lxn.UrlContentCache(db_path, rs_path)

        if url == None:
            self.url = ACMTechNews.ARCHIEVE_MAIN_PAGE
        else: self.url = url

    def __del__(self):
        self.hc.cleanup()

    def getHtml(self, url, proxy=None, referer=None):
        return self.hc.getHtml(url, GLOBAL_PROXY)

    def getMonths(self):
        if hasattr(self, "months")==False:
            self.months = self.getMonthPages()[0]
        return self.months

    ### Main Page => Month Pages : [Month Page]        
    
    ARCHIEVE_MAIN_PAGE = "http://technews.acm.org/archives.cfm"
    def getMonthPages(self):
        html = self.getHtml(self.url, GLOBAL_PROXY)[0]
        issueUrls = [extractUrl(i) for i in reMainPage(html)]
        fa = self.url[0:self.url.rindex("/")+1]
        actualUrls = [fa+i for i in issueUrls]
        return actualUrls, issueUrls

    def start(self):
        mmm = self.getMonths()
        from multiprocessing.dummy import Pool
        pool = Pool(100)
        res = pool.map(lambda e:MonthPage.calMonthPage(e, self.hc.getHtml), mmm)
        return res

### 2 Month Page => Issues : [Issue]
class MonthPage(object):
    TEST_MONTH_PAGE = "http://technews.acm.org/archives.cfm?d=2014-04-apr"
    def __init__(self, month_url, getHtml=lxn.Crawler._getHtml):
        self.url = month_url
        self.getHtml = getHtml
        self.issues = None
        self.getName()

    def getName(self):
        if hasattr(self, "name")==False:
            a = self.url.rindex("?d=")+3
            b = self.url.rindex("-")
            self.name = self.url[a:b]
            self.name = filter(lambda e:e in "0123456789", self.name)
        return self.name
        
    def getIssues(self):
        if self.issues == None:
            html = self.getHtml(self.url)[0]
            res = reMainPageExtractUrl(html)
            self.issues = [self.url[0:self.url.rindex("/")+1]+i for i in res]
        return self.issues

    @staticmethod
    def calMonthPage(month_page_url, getHtml):
        obj = MonthPage(month_page_url, getHtml)
        return (obj, obj.getIssues())

import lxl_en

### 3 Issue Page => We get items and summary, and detail Url
class IssuePage(object):
    TEST_ISSUE_PAGE = "http://technews.acm.org/archives.cfm?fo=2014-04-apr/apr-21-2014.html"
    def __init__(self, issue_url, getHtml=lxn.Crawler._getHtml):
        self.url = issue_url
        self.getHtml = getHtml
        self.items = None
        self.getName()

    def getName(self):
        if hasattr(self, "name")==False:
            self.name = self.url[self.url.rindex("/")+1:]
            self.name = self.name[:self.name.rindex(".")]
            info = self.name.split("-")
            info = [info[2], "%02d" % (lxl_en.getMonth(info[0])+1), info[1]]
            self.name = "".join(info)
        return self.name
        
    def getItems(self):
        if self.items == None:
            html = self.getHtml(self.url)[0]
            self.items = self.analyseIssuePage(html)
        return self.items
    
    @staticmethod
    def analyseIssuePage(html):
        import re
        # print html
        # reViewFullArticle = """<a target="_blank"\s*href="([^"]+)">([\w\W]*)</a>"""
        reViewFullArticle = """<a\s*href="([^"]+)"\s*target="_blank"\s*>([\w\W]*?)</a>"""
        pat = re.compile(reViewFullArticle)
        res = pat.findall(html)
        return [i[0] for i in res if i[1].strip() == "View Full Article"]   
    
if __name__ == "__main__":
    aa = ACMTechNews()
    #mm = MonthPage(MonthPage.TEST_MONTH_PAGE)
    #ii = IssuePage(IssuePage.TEST_ISSUE_PAGE)
    fff = aa.start()

