#!/usr/bin/python
#coding: utf-8

import sys
sys.path.append("..")

import time
import lxml.html
import random
from Config.Config import BaseUrl, TimeSleep, USER_AGENTS
from Downloader.UrlDownloader import DownloaderPageHtml
from PageAnalysis import SoftPageAnalysis
from Logging.UrlLogging import log_Main
from SendEmail.SendEmail import SendMain

class SoftAnalysis(object):
    u"软件页面的解析"
    def __init__(self):
        self.results = set()
        self.pageurl = set([BaseUrl, ])

    def AllPage(self):
        u"获取总共有多少页软件"
        html = DownloaderPageHtml(BaseUrl, USER_AGENTS[random.randint(0, len(USER_AGENTS) - 1)]).Downloader(3)
        tree = lxml.html.fromstring(html).cssselect("div.discuss_fangye > ul > li > a")
        # 倒数第一个a标签中存储的才是最后一页的数字
        self.page = tree[-2].text_content()
        self.href = tree[-2].get("href")
        # print self.page
        # print self.href
        log_Main(u"总页数：'%s'" % self.page, "info")
        log_Main(u"最后一页的链接 '%s'" % self.href, "info")
        n = 2
        while n <= int(self.page):
            st = self.href.split("/")
            s = st[-1].split("_")
            s[0] = str(n)
            st[-1] = "_".join(s)
            st = "/".join(st)
            n = int(n) + 1
            self.pageurl.add(st)
        # print self.pageurl
        log_Main(u"要爬取的所有页面获取成功, 准备爬取", "info")

    def Crawler(self):
        # print len(self.pageurl)
        # print self.pageurl
        s = SoftPageAnalysis(self.pageurl)
        # log_Main(u"启动多进程爬虫", "info")
        s.mp_crawler()

def UrlManagerMain():
    t1 = time.time()
    log_Main(u"第二步开始的当前时间:%s" % time.ctime(t1), "info")
    s = SoftAnalysis()
    s.AllPage()
    s.Crawler()
    t2 = time.time()
    log_Main(u"爬取结束的当前时间:%s" % time.ctime(t2), "info")
    log_Main(u"第二步中爬取所有应用的链接耗时:%f" % (t2 - t1), "info")
    # 爬取结束后，发送邮件提醒
    SendMain("""所有apk的网页链接爬取完成！！！！""")
    # log_Main(u"邮件发送成功", "info")

if __name__ == "__main__":
    UrlManagerMain()