from multiprocessing import Pool

import requests
import time
from requests.exceptions import RequestException
import re
from pyquery import PyQuery as pq
import pymysql


# 获取网页的html等信息,传入参数为url
# return text
def get_page(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0'
    }
    try:
        response = requests.get(url, headers=headers)
        if response.status_code == requests.codes.OK:
            return response.text
        return None
    except RequestException as e:
        print(e)
        return None


# 解析网页的信息，传入参数为html
# return 解析后的数据
def parse_page(html):
    titles = re.findall(r'<div class="cont">.*?<b>(.*?)</b>', html, re.DOTALL)
    dynasties = re.findall(r'<p class="source"><a.*?>(.*?)</a>', html, re.DOTALL)
    authors = re.findall(r'<p class="source"><a.*?>.*?</a>.*?<a.*?>(.*?)</a></p>', html, re.DOTALL)
    # 这里以Google浏览器的解析的文本为准,不然会出错
    contents_tags = re.findall(r'<div class="contson".*?>(.*?)</div>', html, re.DOTALL)
    contents = []
    for content in contents_tags:
        content = re.sub(r'<br />', '', content).strip()
        content = re.sub(r'<p>', '', content)
        content = re.sub(r'</p>', '', content)
        contents.append(content)
    # for content in contents:
    #     print(content)
    # 使用pyquery获取文本
    # doc = pq(html)
    # contents_tags = doc('.sons .cont .contson').items()
    # for content in contents_tags:
    #     content.text().replace('\n','')

    poem_info = zip(titles, dynasties, authors, contents)
    poem_list = []
    for value in poem_info:
        # 元组拆包解包
        title, dynasty, author, content = value
        poem = {
            'title': title,
            'dynasty': dynasty,
            'author': author,
            'content': content
        }
        poem_list.append(poem)

    for x in poem_list:
        print(x)
        save_to_mysql(x)


# 返回一共有几页
def get_total_page_num(url):
    html = get_page(url)
    doc = pq(html)
    page_num = doc('#FromPage .pagesright #sumPage').text()
    if page_num.isdigit():
        return int(page_num)
    return 0


# 数据保存在数据库
def save_to_mysql(poem):
    db = pymysql.connect('localhost', 'root', 'root', 'test')
    cursor = db.cursor()
    # 注意插入数据库的时候需要加双引号
    sql = 'insert into poems(title, dynasty, author, content) values("{}","{}","{}","{}")'\
        .format(poem['title'], poem['dynasty'], poem['author'], poem['content'])
    try:
        cursor.execute(sql)
        db.commit()
    except:
        print('数据库插入失败。。。')
        db.rollback()
    db.close()
    # cursor.execute(sql)
    # db.commit()
    # db.close()


def main(ttt):
    url = 'https://www.gushiwen.org/default_1.aspx'
    total_page_num = get_total_page_num(url)
    print(total_page_num)
    if total_page_num:
        for i in range(1, total_page_num+1):
            url = 'https://www.gushiwen.org/default_{}.aspx'.format(i)
            # 验证请求是否成功
            for i in range(10):
                html = get_page(url)
                if html is not None:
                    break
                else:
                    print('request 第%d请求出错' % i)
                    # 解析网页
            parse_page(html)
    else:
        print('解析页码出错')


if __name__ == '__main__':
    group = [i * 20 for i in range(0, 5)]
    pool = Pool()
    # 开启多线程
    start = time.clock()
    pool.map(main, group)
    end = time.clock()
    print('一共运行了{}秒'.format(end - start))
    pool.close()
    pool.join()
