import re

import requests
from bs4 import BeautifulSoup

headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                  'Chrome/90.0.4430.212 Safari/537.36'
}


def get_comment_pages(url):
    """
    获取某个帖子的评论页数
    :param url: 帖子URL
    :return: 评论页数
    """
    r = requests.get(url=url, headers=headers)
    r.close()
    pattern = re.compile(r'pageCount:(?P<page_count>\d+),')
    return pattern.search(r.text).groups('page_count')[0]


def get_comments(serial_no):
    """
    获取某个贴子的所有评论
    :param serial_no: 帖子序号
    :return: 评论数组
    """
    url = 'https://bbs.hupu.com/%s.html' % serial_no
    page_size = int(get_comment_pages(url))
    comments = []
    for page in range(page_size):
        # 获取当前页的URL
        url_ = url if page == 0 else url[:url.rfind('.')] + ('-%d.' % (page + 1)) + url[url.rfind('.') + 1:]
        try:
            r = requests.get(url=url_, headers=headers, timeout=10)
            r.close()
            bs = BeautifulSoup(r.text, 'html.parser')
            body_list = bs.find_all('tbody')
            for body in body_list:
                p = body.find_all('p')
                if p is not None and len(p) > 0 and p[-1].text.strip() != '':
                    comments.append(p[-1].text)
            print('第%d/%d页爬取成功' % (page + 1, page_size))
        except Exception as e:
            print('爬取异常：', e)
    return comments


def get_child_urls(father_url):
    """
    获取当前页所有帖子序号集合
    :param father_url:
    """
    r = requests.get(url=father_url, headers=headers)
    bs = BeautifulSoup(r.text, 'html.parser')
    lis = bs.find_all('a', class_='p-title')
    lis = [(li.get('href')[1:], li.text) for li in lis]
    print(lis)


if __name__ == '__main__':
    serial_no = '43011413'
    values = get_comments(serial_no)
    with open('文件/%s.txt' % serial_no, 'w', encoding='utf-8') as f:
        for v in values:
            f.write(v + '\n')
