#!/usr/bin/python
# coding: utf-8

"""
由于代码中使用了递归，所以没有想好这个多进程和多线程在递归的基础上应该怎么加，所以这里并没有加，只是个单线程的爬虫
"""

import sys
sys.path.append("..")

import time
import lxml.html
import random
from MySql_InterFace.AppMySql import ApplicationMySql
from MySql_InterFace.GetProxy import GetProxies
from Logging.AppLogging import log_Main
from Downloader.NewAppDownloader import SoftInfoDownloader
from Config.Config import USER_AGENTS, TimeSleep, CONTENT, MAX_NUM
from SendEmail.SendEmail import SendMain

class Application(object):
    def __init__(self):
        self.app = ApplicationMySql()
        self.urls = self.app.GetUrl()
        self.app.CreateSoftSurvey()
        self.app.CreateSoftInfo()
        self.app_urls = self.app.SelectUrl()
        # print self.app_urls
        self.proxies = GetProxies().getIPUrl()
        self.soft = SoftInfoDownloader()

    def AppUrl(self):
        log_Main(u"对爬取到的每一个网页进行下载", "info")
        print u"链接总个数为：", len(self.urls)
        for num, url in enumerate(self.urls):
            print u"正在爬取第%d个链接" % (num + 1)
            self.url = url[0]
            x = (self.url, ) # 转化为元组，因为从数据库中获取的数据都是元素
            if x in self.app_urls:
                print u"当前网页已经爬取过，不需要再进行爬取"
                continue
            self.name = url[1]
            # 下载网页
            # 要将args放在kwargs之前，不然会报SyntaxError: non-keyword arg after keyword arg
            self.html = self.soft.download(self.url, USER_AGENTS[random.randint(0, len(USER_AGENTS) - 1)], None, 3, data=None)
            time.sleep(TimeSleep)
            # 对网页进行解析
            self.AppAnalysis()

            if self.html is None:
                print u"链接:", self.url, u"没有用到"
                # 先不执行删除操作，所有的url都爬取完成以后再次运行的时候再删除
                # self.app.DeletePageUrl(self.url)

    def AppAnalysis(self):
        try:
            tree = lxml.html.fromstring(self.html)
            print self.url
            tree1 = tree.cssselect("div.app-msg > div.app-detail > div.msg")[0]
            self.comment = tree1.cssselect("span")[0].text_content()
            print self.comment

            tree2 = tree.cssselect("div.detail-app-other-info > ul > li > span")
            self.info = u""
            for x in range(0, len(tree2)):
                self.info += tree2[x].text_content()
                if x == len(tree2) - 1:
                    self.info += u'。'
                else:
                    self.info += u"，"
            print self.info

            # 存储到数据库中
            self.app.InsertSoftSurvery((self.url, self.name, self.comment, self.info))

            tree3 = tree.cssselect("div.detail-app-intro > div.main-right > div.other-info > div")[0]
            # 获取软件的基本信息
            self.Info = []
            for r in tree3.cssselect("p")[1:]:
                self.Info.append(r.text_content())
            print self.Info

            text = tree3.cssselect("a#checkPermissions")[0].text_content()
            # print text

            self.jurisdiction = u""
            tree4 = tree3.cssselect("div > ul.permissions-list > li")
            for x in range(len(tree4)):
                self.jurisdiction += tree4[x].text_content()
                if x == len(tree4) - 1:
                    self.jurisdiction += u'。'
                else :
                    self.jurisdiction += u'，'
            print self.jurisdiction
            self.app.InsertSoftInfo((self.url, self.name, self.Info[0], self.Info[1], self.Info[2], self.Info[3], self.Info[4], self.Info[5], self.jurisdiction))
            log_Main(u"本次存储成功！！！！", "info")
        except Exception as e:
            log_Main(e, "error")
            print e
            # 如果发送list index out of range，则证明list访问的时候越界，但是针对于每一个正常的网页来说根本不可能发生这种情况，
            # 所以 此时只可能是当前网页的应用已经删除 或者 爬取网页的时候IP被禁
            print u"IP被禁，循环爬取，直到IP可以使用"

            if len(self.proxies) <= 0:
                print u"代理ip已经被全部删除，重新获取代理ip"
                self.get_proxies()

            proxy = self.proxies[random.randint(0, len(self.proxies) - 1)]
            print proxy
            self.html = self.soft.download(self.url, USER_AGENTS[random.randint(0, len(USER_AGENTS) - 1)], None, 3)
            try:
                tree = lxml.html.fromstring(self.html)
                tr = tree.cssselect("div.app-msg > div.app-detail > div.msg")[0]

                # 如果上面执行都正确，则证明重新获取的网页可以进行爬取
                self.AppAnalysis()
            except Exception as e:
                # 当前代理ip不能使用
                self.proxies.remove(proxy)
                print e
                print u"代理不能使用，使用本机ip再进行一次爬取"
                try:
                    time.sleep(TimeSleep)
                    self.html = self.soft.download(self.url, USER_AGENTS[random.randint(0, len(USER_AGENTS) - 1)], None, 3)
                    tree = lxml.html.fromstring(self.html)
                    tr = tree.cssselect("div.app-msg > div.app-detail > div.msg")[0]
                    self.AppAnalysis()
                except Exception as e:
                    # 抛出异常，证明当前网页不能获取到想要的信息
                    print e
                    self.html = None
                    return

    def get_proxies(self):
        from ProxyManager.ProxyManager import ProxyAnalysis
        p = ProxyAnalysis()
        p.ProxyMain()
        # 不能关闭，否则在下面运行的时候会报InterfaceError (0, '')
        # p.Close()
        # 再次获取代理ip
        print u"再次获取代理ip"
        self.proxies = GetProxies().getIPUrl()
        print u"重新获取后代理ip的数量为：", len(self.proxies)
        from SendEmail.SendEmail import SendMain
        SendMain(CONTENT)

if __name__ == "__main__":
    t1 = time.time()
    print u"当前时间为：", time.ctime(t1)
    app = Application()
    app.AppUrl()
    t2 = time.time()
    print u"爬取结束的时间为：", time.ctime(t1)
    print u"花费时间为：%s" % (t2 - t1)
    log_Main("花费时间为:%s" % (t2 - t1), "info")
    SendMain("每个网页的内容爬取完成！！！！")
    print u"邮件发送成功！！！！"