import re
import requests
import threading
from model.Book import Book
from util.RandomIP import RandomIP

HEADER = {
    "User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Mobile Safari/537.36"}

'''
先尝试使用本地IP爬取，如果被屏蔽之后，再使用代理IP，同时开启一个定时器，5分钟之后重新使用本地IP爬取
'''


class IreadSpider:
    def __init__(self, head, mySqlOperate, esOperate):
        self.url = "http://www.ireadweek.com"
        self.indexURL = "http://www.ireadweek.com/sdfesfwsf.php?g=portal&m=index&a=index&p="
        self.head = head
        self.randIP = RandomIP()
        # self.proxy_ip = self.randIP.proxy_ip(self.url)
        self.is_proxy = False  # 先使用本地IP如果被屏蔽之后，再使用代理IP
        self.mySqlOperate = mySqlOperate
        self.esOperate = esOperate

    # 获取该页所有的文章URL
    def __get_all(self, html):
        # 正则匹配
        pattern = r'<dd>(.*)</dd>'
        all = re.findall(pattern, html)[0]
        pattern = r'<a href="([^<]*?id=\d+)">'
        allURL = re.findall(pattern, all)
        pattern = r'<a href="[^<]*?p=(\d+)">下一页'
        resultNextPage = [re.findall(pattern, html)[0]]
        resultURL = []
        for tmp in allURL:
            resultURL.append(self.url + tmp)
        return resultURL, resultNextPage

    # 获取书的：名字/作者/类型/评分/简介/下载地址
    def __get_book(self, url):
        text = self.__spider(url)
        response = text.content.decode('utf8')
        pattern = r'<img src="([^<]*?)">'
        tmp = re.findall(pattern, response)
        pic = tmp[0] if len(tmp) != 0 else ''
        picContent = self.__spider(self.url + pic).content if pic != '' else ''
        pattern = r'<p>书名：([^<]*?)</p>'
        tmp = re.findall(pattern, response)
        name = tmp[0] if len(tmp) != 0 else ''
        pattern = r'<p>作者：([^<]*?)</p>'
        tmp = re.findall(pattern, response)
        auth = tmp[0] if len(tmp) != 0 else ''
        pattern = r'<p>豆瓣评分：([^<]*?)</p>'
        tmp = re.findall(pattern, response)
        score = tmp[0] if len(tmp) != 0 else ''
        pattern = r'<p>简介：</p><p>(.*?)</p>'
        tmp = re.findall(pattern, response)
        brief = tmp[0] if len(tmp) != 0 else ''
        pattern = r'<a href="(http[s]?://pan.baidu.com/[^<]*?)"'
        tmp = re.findall(pattern, response)
        baidudownload = tmp[0] if len(tmp) != 0 else ''
        pattern = r'<a href="(http[s]?://share.weiyun.com/[^<]*?)"'
        tmp = re.findall(pattern, response)
        tenxundownload = tmp[0] if len(tmp) != 0 else ''
        pattern = r'<a href="(http[s]?://share.weiyun.com/[^<]*?)"'
        tmp = re.findall(pattern, response)
        chenyudownload = tmp[0] if len(tmp) != 0 else ''
        # print("url is: "+url)
        # print("brief is: "+brief)
        # print(name, auth, score, brief, baidudownload, tenxundownload, chenyudownload, picContent)
        return Book(name, auth, score, brief, baidudownload, tenxundownload, chenyudownload, picContent)

    # 定时函数
    def __handler(self):
        self.is_proxy = False

    # 爬取
    def __spider(self, url):
        # print("self.is_proxy: "+str(self.is_proxy))
        if self.is_proxy:  # 代理IP
            return self.randIP.proxy_spider(url)
        else:
            try:
                return requests.get(url, headers=self.head, timeout=2)
            except Exception as e:  # 重新获取可用的代理IP,同时启动一个定时器,5分钟后重新恢复
                self.is_proxy = True
                timer = threading.Timer(5, self.__handler)
                timer.start()
                return self.randIP.proxy_spider(url)

    # 对外使用的函数
    def run(self, page):
        # 爬取网页内容
        html = self.__spider(self.indexURL + str(page)).content.decode('utf8')
        # print(self.indexURL+str(page))
        # 获取该页所有的文章URL
        all = self.__get_all(html)
        allURL = all[0]
        allNextPage = all[1]
        # 插入mysql与elasticsearch
        allSql = []
        for url in allURL:
            tmpSql = []
            tmpEs = {}
            book = self.__get_book(url)
            tmpSql.append(book.name)
            tmpSql.append(book.auth)
            tmpSql.append(book.score)
            tmpSql.append(book.baidudownload)
            tmpSql.append(book.tenxundownload)
            tmpSql.append(book.chenyudownload)
            tmpSql.append(book.brief)
            tmpSql.append(book.pic)
            tmpEs['name'] = book.name
            tmpEs['auth'] = book.auth
            tmpEs['brief'] = book.brief
            print(tmpEs)
            self.esOperate.writeEs('book', tmpEs)
            # print(tmpSql)
            allSql.append(tuple(tmpSql))

        sql = "insert into book (name, auth, score, baidudownload, tenxundownload, chenyudownload, brief, pic) values " \
              "(%s, %s, %s, %s, %s, %s, %s, %s) "
        self.mySqlOperate.save_mysql(sql, allSql)
        return allNextPage[0]  # 返回下一页的页数

# 测试代码
if __name__ == '__main__':
    # reponse = requests.get("http://www.ireadweek.com/sdfesfwsf.php?m=article&a=index&id=14204")
    # text = reponse.content.decode('utf8')
    # pattern = r'<p><p>(.*?)</p>'
    # tmp = re.findall(pattern, text)
    # brief = tmp[0] if len(tmp) != 0 else ''
    # print(brief)
    # ireader = IreadSpider(HEADER, '', '')
    # ireader.func("http://www.ireadweek.com/sdfesfwsf.php?m=article&a=index&id=14204")
    pass
