#!/usr/bin/python
# coding: utf-8

import sys
sys.path.append("..")

import time
import lxml.html
import random
import threading
from MySql_InterFace.AppMySql import ApplicationMySql
from MySql_InterFace.GetProxy import GetProxies
from Logging.AppLogging import log_Main
from Downloader.NewAppDownloader import SoftInfoDownloader
from Config.Config import USER_AGENTS, TimeSleep, CONTENT, MAX_NUM, MAX_THREADING
from SendEmail.SendEmail import SendMain

class Application(object):
    def __init__(self):
        self.app = ApplicationMySql()
        self.urls = list(self.app.GetUrl())
        self.app.CreateSoftSurvey()
        self.app.CreateSoftInfo()
        self.app.CreateClassification()
        self.app.CreateSoftClassification()
        self.app_urls = self.app.SelectUrl()
        # print self.app_urls
        self.proxies = GetProxies().getIPUrl()
        self.soft = SoftInfoDownloader()

    def AppUrl(self):
        # log_Main(u"对爬取到的每一个网页进行下载", "info")
        print u"链接总个数为：", len(self.urls)
        num = 1
        threads = []
        threadLock = threading.Lock()
        flag = False  # 用来判断是否每一个链接都爬取完毕
        while len(threads) < MAX_THREADING:
            for thread in threads:
                if not thread.is_alive():
                    threads.remove(thread)
            # 剔除出去已经用完的线程以后再继续开启新线程
            while len(threads) < MAX_THREADING:
                if len(self.urls) == 0:
                    print u"所有链接都已经爬取完毕，停止多线程"
                    flag = True
                    break
                print u"正在爬取第%d个链接" % (num)
                num += 1
                self.url, self.name = self.urls.pop()
                x = (self.url,)
                if x in self.app_urls:
                    print u"当前网页已经爬取过，不需要再进行爬取"
                    continue
                time.sleep(TimeSleep)
                t = threading.Thread(target=self.tdCrawl, name="threading", args=(threadLock, self.url))
                # 关于守护线程的设置问题
                # http://blog.csdn.net/yueguanghaidao/article/details/40088431
                # http://www.cnblogs.com/fnng/p/3670789.html
                # t.setDaemon(True) # 设置该线程为守护线程
                t.start()
                t.join()
                threads.append(t)
                time.sleep(TimeSleep * 2)

            # 如果线程数量达到最大允许的线程数，则循环等待，直到有线程用完释放再继续运行
            while len(threads) == MAX_THREADING:
                # time.sleep(TimeSleep) # 等待下，让线程跑会
                for thread in threads:
                    if not thread.is_alive():
                        threads.remove(thread)
            if flag:
                break

    def tdCrawl(self, lock, URL):
        lock.acquire() # 创建线程锁
        # 下载网页
        # 要将args放在kwargs之前，不然会报SyntaxError: non-keyword arg after keyword arg
        self.html = self.soft.download(URL, USER_AGENTS[random.randint(0, len(USER_AGENTS) - 1)], None, 3, data=None)
        # 对网页进行解析
        self.AppAnalysis(URL)

        if self.html is None:
            print u"链接:", URL, u"没有用到"
            # 先不执行删除操作，所有的url都爬取完成以后再次运行的时候再删除
            # self.app.DeletePageUrl(URL)
        lock.release() # 释放线程锁

    def AppAnalysis(self, URL):
        try:
            tree = lxml.html.fromstring(self.html)
            print URL
            tree1 = tree.cssselect("div.app-msg > div.app-detail > div.msg")[0]
            self.comment = tree1.cssselect("span")[0].text_content()
            print self.comment

            tree2 = tree.cssselect("div.detail-app-other-info > ul > li > span")
            self.infos = u""
            for x in range(0, len(tree2)):
                self.infos += tree2[x].text_content()
                if x == len(tree2) - 1:
                    self.infos += u'。'
                else:
                    self.infos += u"，"
            print self.infos

            tree3 = tree.cssselect("div.detail-app-intro > div.main-right > div.other-info > div")[0]
            # 获取软件的基本信息
            self.Info = []
            for r in tree3.cssselect("p")[1:]:
                self.Info.append(r.text_content())
            print self.Info

            text = tree3.cssselect("a#checkPermissions")[0].text_content()
            # print text

            self.jurisdiction = u""
            tree4 = tree3.cssselect("div > ul.permissions-list > li")
            for x in range(len(tree4)):
                self.jurisdiction += tree4[x].text_content()
                if x == len(tree4) - 1:
                    self.jurisdiction += u'。'
                else :
                    self.jurisdiction += u'，'
            print self.jurisdiction

            # 存储到数据库中
            # self.app.InsertSoftSurvery((URL, self.name, self.comment, self.infos))
            # self.app.InsertSoftInfo((URL, self.name, self.Info[0], self.Info[1], self.Info[2], self.Info[3], self.Info[4], self.Info[5], self.jurisdiction))
            self.app.Insert((URL, self.name, self.comment, self.infos), (URL, self.name, self.Info[0], self.Info[1], self.Info[2], self.Info[3], self.Info[4], self.Info[5], self.jurisdiction))
            # log_Main(u"本次存储成功！！！！", "info")
        except Exception as e:
            # log_Main(e, "error")
            print e
            # 如果发送list index out of range，则证明list访问的时候越界，但是针对于每一个正常的网页来说根本不可能发生这种情况，
            # 所以 此时只可能是当前网页的应用已经删除 或者 爬取网页的时候IP被禁
            # print u"IP被禁，循环爬取，直到IP可以使用"

            if len(self.proxies) <= 0:
                print u"代理ip已经被全部删除，重新获取代理ip"
                self.get_proxies()

            proxy = self.proxies[random.randint(0, len(self.proxies) - 1)]
            # print proxy
            self.html = self.soft.download(URL, USER_AGENTS[random.randint(0, len(USER_AGENTS) - 1)], proxy, 3)
            try:
                tree = lxml.html.fromstring(self.html)
                tr = tree.cssselect("div.app-msg > div.app-detail > div.msg")[0]

                # 如果上面执行都正确，则证明重新获取的网页可以进行爬取
                self.AppAnalysis()
            except Exception as e:
                # 当前代理ip不能使用
                # ----------------------------下面把删除代理给禁止了，要是用的时候直接加上即可
                # if proxy in self.proxies:
                #     self.proxies.remove(proxy)
                # print e
                # print u"代理不能使用，使用本机ip再进行一次爬取"
                try:
                    time.sleep(TimeSleep)
                    self.html = self.soft.download(URL, USER_AGENTS[random.randint(0, len(USER_AGENTS) - 1)], None, 3)
                    tree = lxml.html.fromstring(self.html)
                    tr = tree.cssselect("div.app-msg > div.app-detail > div.msg")[0]
                    self.AppAnalysis(URL)
                except Exception as e:
                    # 抛出异常，证明当前网页不能获取到想要的信息
                    print e
                    self.html = None
                    return

    def get_proxies(self):
        from ProxyManager.ProxyManager import ProxyAnalysis
        p = ProxyAnalysis()
        p.ProxyMain()
        # 不能关闭，否则在下面运行的时候会报InterfaceError (0, '')
        # p.Close()
        # 再次获取代理ip
        print u"再次获取代理ip"
        self.proxies = GetProxies().getIPUrl()
        print u"重新获取后代理ip的数量为：", len(self.proxies)
        from SendEmail.SendEmail import SendMain
        SendMain(CONTENT)

def AppManagerMain():
    t1 = time.time()
    print u"当前时间为：", time.ctime(t1)
    app = Application()
    app.AppUrl()
    t2 = time.time()
    print u"爬取结束的时间为：", time.ctime(t1)
    print u"花费时间为：%s" % (t2 - t1)
    log_Main(u"第三步花费时间为:%f" % (t2 - t1), "info")
    SendMain("每个网页的内容爬取完成！！！！")
    print u"不管发生成功与否，邮件已经发送完成！！！！"

if __name__ == "__main__":
    AppManagerMain()