# -*- coding: utf-8 -*-
# 创建时间：2021/7/16 16:54
from bs4 import BeautifulSoup
import requests
import time, math, random, json
import pymysql
import re
from hhy import FileUtil,DateUtil,ToolUtil,HttpUtil
__author__ = 'LuckyHhy'


DOWNLOAD_URL = 'https://www.qiushibaike.com'

# 获取内容
def get_content(url):
    headers = {"User-Agent": HttpUtil.AgentRandom()}
    r = requests.get(url, headers=headers)
    return r.content


# 保存到文本中
def save_txt(*args):
    for i in args:
        with open('qiubai.txt', 'a', encoding='utf-8') as f:
            f.write(i)

# 获取数据
def get_data(html, page):
    output = """第{}页 作者：{} 性别：{} 年龄：{} 点赞：{} 评论：{}\n{}\n------------\n"""
    soup = BeautifulSoup(html, 'html.parser')
    con = soup.find(class_='old-style-col1')
    con_list = con.find_all('div', class_="article")
    for i in con_list:
        author = i.find('h2').string  # 获取作者名字
        content = i.find('div', class_='content').find('span').get_text()  # 获取内容
        stats = i.find('div', class_='stats')
        vote = stats.find('span', class_='stats-vote').find('i', class_='number').string
        comment = stats.find('span', class_='stats-comments').find('i', class_='number').string
        author_info = i.find('div', class_='articleGender')  # 获取作者 年龄，性别
        if author_info is not None:  # 非匿名用户
            class_list = author_info['class']
            if "womenIcon" in class_list:
                gender = '女'
            elif "manIcon" in class_list:
                gender = '男'
            else:
                gender = ''
            age = author_info.string   # 获取年龄
        else:  # 匿名用户
            gender = ''
            age = ''

        # 保存到txt文本
        save_txt(output.format(page, author, gender, age, vote, comment, content))

        url_next=''
        pagee = soup.find('span', attrs={'class': 'next'})  # 获取下一页

        if pagee:
            url_=pagee.find_parent('a')
            url_next=url_['href']

        if url_next:
            return DOWNLOAD_URL + url_next

        return ''


def main():
    url = "https://www.qiushibaike.com/text/page/1/"  # 必须带分页标识 不然获取不到 下一页
    i = 1
    while url:
        print("正在获取第 {} 页数据".format(i))
        html = get_content(url)
        pagg = get_data(html, i)
        url = pagg

        print("第 {} 页数据爬取完成".format(i))
        i = i + 1

    print("数据爬取完成-ok")
    pass



if __name__ == '__main__':
    main()
