# -*- coding:utf-8 -*-

import requests
import bs4
import re

url = "https://www.douban.com/group/topic/157797390/"
headers = {
    "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.192 Safari/537.36"
}

def download_page(url):
    '''
    下载页面
    :param url:
    :return:
    '''
    res = requests.get(url, headers=headers)
    bs4_obj = bs4.BeautifulSoup(res.text, "lxml")
    bs4_page_obj_list = [bs4_obj]
    # 把所有的分页下载下来后，统一去提取email
    url_set = set()
    paginator_ele = bs4_obj.find("div", attrs={"class": "paginator"})
    if paginator_ele:
        for a_ele in paginator_ele.find_all("a"):
            url_set.add(a_ele.attrs.get("href"))

        for url in url_set:
            print(f"下载分页{url}")
            page_obj = requests.get(url, headers=headers)
            bs4_page_obj = bs4.BeautifulSoup(page_obj.text, "lxml")
            bs4_page_obj_list.append(bs4_page_obj)

    return bs4_page_obj_list

def fetch_emails(page_obj_list):
    mail_list = []
    for bs4_obj in page_obj_list:
        comment_eles = bs4_obj.find_all("div", attrs={"class": "reply-doc"})
        for ele in comment_eles:
            comment_ele = ele.find("p", attrs={"class": "reply-content"})
            email_addr = re.search("\w+@\w+.\w+", comment_ele.text, flags=re.A)
            if email_addr:
                pub_time = ele.find("span", attrs={"class": "pubtime"})
                mail_list.append([email_addr.group(), pub_time.text])

    print(mail_list)

page_obj_list = download_page(url)
fetch_emails(page_obj_list)