import random

import requests
import re
from requests.exceptions import RequestException
from pyquery import PyQuery as pq

# 请求text网页
# 传入每一页的url,返回每一页的信息
import pymysql


def get_text_response(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0'
    }
    # 下面的代理ip随时都不可用
    procxy = [
        {'https': 'HTTPS://120.83.105.36:9999'},
        {'https': 'HTTPS://223.241.78.86:8010'},
        {'https': 'HTTPS://114.99.13.62:9999'}
    ]
    # 随机选择列表中的字典，直到匹配上可以使用的ip
    http = random.choice(procxy)
    try:
        response = requests.get(url, headers=headers, proxies=http)
        if response.status_code == 200:
            return response.text
        return None
    except RequestException as e:
        print(e)
        return None


# 获取每条评论的网页
# 传入每一个评论的链接，返回每一条评论的信息
def get_comment_response(url):
    return get_text_response(url)

# 解析text网页
# html:page网页，返回用户名：username，等级：level，文章：text，神评：good_comment,点赞数：agree_num(int),文章评论人数：text_comment_num(int)，
# # 调用具体评论页面解析返回的：时间：time，好笑数：funny，发表在：publish_in，
def parse_text(html):
    username = re.findall(r'<div class="author clearfix">.*?<h2>\n(.*?)\n</h2>', html, re.DOTALL)
    level = re.findall(r'<div class="articleGender .*?">(\d*)</div>', html, re.S)
    # 这里的标签之间的换行是很计较的
    text_tags = re.findall(r'<div class="content">\n<span>(.*?)</span>', html, re.S)
    text_list = []
    for t in text_tags:
        text = re.sub('<br/>', '\n', t).strip()
        text_list.append(text)
    good_comment = re.findall(r'<div class="main-text">(.*?)<div class="likenum">', html, re.S)
    good_comment_list = []
    for comment in good_comment:
        good_comment_list.append(comment.strip())
    agree_num = re.findall(r'<div class="main-text">.*?<div class="likenum">\n<img.*?>(.*?)</div>', html, re.S)
    agree_num_list = []
    for num in agree_num:
        if num.strip().isdigit():
            num = int(num)
        else:
            num = 0
        agree_num_list.append(num)
    text_comment_num = re.findall(r'<span class="stats-comments">\n<span class="dash"> · </span>\n<a.*?<i class="number">(\d*)</i>', html, re.S)
    text_comment_num_list = []
    for num in text_comment_num:
        if num.isdigit():
            num = int(num)
        else:
            num = 0
        text_comment_num_list.append(num)
    # 1、获取评论网页的链接
    # 2、解析
    time_list = []
    funny_list = []
    publish_in_list = []
    basic_url = 'https://www.qiushibaike.com'
    doc = pq(html)
    a_tags = doc('#content-left .article.block.untagged.mb15 .contentHerf').items()
    for a in a_tags:
        comment_url = basic_url + a.attr('href')
        concrete_comment = parse_comment(get_comment_response(comment_url))
        time_list.append(concrete_comment['time'])
        funny_list.append(concrete_comment['funny'])
        publish_in_list.append(concrete_comment['publish_in'])
        # print(time_list,funny_list,publish_in_list,sep='\n')
    # 拆包解包
    comment_info = zip(username, level, text_list, good_comment_list, agree_num_list, text_comment_num_list, time_list, funny_list, publish_in_list)
    comment_list = []
    for value in comment_info:
        # 元组拆包解包
        username, level, text, good_comment, agree_num, text_comment_num, time, funny, publish_in = value
        content = {
            'username': username,
            'level': int(level),
            'text': text,
            'good_comment': good_comment,
            'agree_num': agree_num,
            'text_comment_num': text_comment_num,
            'time': time,
            'funny': funny,
            'publish_in': publish_in
        }
        print(content)
        comment_list.append(content)
        # 存入数据库
        save_to_mysql(content)



# 解析每一条评论的网页
# html：评论的网页，返回的：时间：time，好笑数：funny，发表在：publish_in，
def parse_comment(html):
    start_time = re.findall(r'<span class="stats-time">(.*?)</span>', html, re.S)[0]
    time = start_time.strip()
    funny = re.findall(r'<i class="number">(\d*)</i>', html, re.S)[0]
    if funny.isdigit():
        funny = int(funny)
    publish_in = re.findall(r'<a href="/text/" class="source-column">(.*?)</a>', html, re.S)[0][4:]
    comment = {
        'time': time,
        'funny': funny,
        'publish_in': publish_in
    }
    return comment


# TODO 获取一共有几页
def get_total_page(html):
    doc = pq(html)
    page_num_list = doc('.page-numbers')
    last_position = page_num_list.text().rfind(" ")
    page_num = page_num_list.text()[last_position+1:]
    if page_num.isdigit():
        return int(page_num)
    else:
        return 1

# 数据库存储
# data：字典的形式传入数据
def save_to_mysql(data):
    db = pymysql.connect('localhost', 'root', 'root', 'test')
    cursor = db.cursor()
    # 注意插入数据库的时候需要加双引号
    sql = 'insert into qiushibaike(username, level, text, good_comment, agree_num, text_comment_num, time, funny, publish_in)' \
          ' values("{}",{},"{}","{}",{},{},"{}",{},"{}")' \
        .format(data['username'], data['level'], data['text'], data['good_comment'], data['agree_num'],
                data['text_comment_num'], data['time'], data['funny'], data['publish_in'])
    try:
        cursor.execute(sql)
        db.commit()
    except:
        print('数据库插入失败。。。')
        db.rollback()
    db.close()


def main():
    url = 'https://www.qiushibaike.com/text'
    html = get_text_response(url)
    page_num = get_total_page(html)
    print(page_num)
    if html is not None:
        for i in range(1, page_num+1):
            url = 'https://www.qiushibaike.com/text/page/{}/'.format(i)
            html = get_text_response(url)
            parse_text(html)
    else:
        print('请求出错')

if __name__ == '__main__':
    main()

