# -*- coding: utf-8 -*-
# created by Src - 一路好时光

# PS E:\PycharmProjects\pythonProject\day4-用Python处理文件> pip install requests
# Collecting requests
#   Downloading requests-2.32.3-py3-none-any.whl.metadata (4.6 kB)

#【无偿分享】史上最全Python学习大礼包
# https://www.douban.com/group/topic/157797390

import requests #下载网页
import bs4 #beautifulSoup，解析网页
import re

headers = {
    'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    #'Accept':'text/css,*/*;q=0.1',
    #加上这个设置后，网页会乱码
    # 'Accept-Encoding':'gzip, deflate, br',
    'Accept-Language':'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
    'Cache-Cotrol':'no-cache',
    'Connection':'Keep-Alive',
    'Host':'www.douban.com',
    'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'
}

def download_page(url):
    '''
    下载页面所有的url
    :param url:
    :return:
    '''
    response = requests.get(url,headers = headers)

    #1.print(page_obj)#<Response [418]>    2.加上headers = headers才可以爬取
    #print(page_obj.text)#为空，网页信息不能直接爬取的
    #要看豆瓣的请求头里都有什么，模拟浏览器访问
    #
    # Accept:text/css,*/*;q=0.1
    # Accept-Encoding:gzip, deflate, br, zstd
    # Accept-Language:zh-CN,zh;q=0.9
    # Priority:u=0, i
    # Referer:https://m.douban.com/
    # Sec-Ch-Ua:"Chromium";v="124", "Microsoft Edge";v="124", "Not-A.Brand";v="99"
    # Sec-Ch-Ua-Mobile:?1
    # Sec-Ch-Ua-Platform:"Android"
    # Sec-Fetch-Dest:empty
    # Sec-Fetch-Mode:no-cors
    # Sec-Fetch-Site:cross-site
    # User-Agent:Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Mobile Safari/537.36 Edg/124.0.0.0

    if response.status_code == 200:
        # response.encoding = 'utf-8'
        # print(response.text)
        #https://beautifulsoup.readthedocs.io/zh-cn/v4.4.0/  文档库
        bs4_obj = bs4.BeautifulSoup(response.text,'html.parser')
        # print(response.encoding)
        # 存下所有分页的url
        url_set = set()
        # 把所有分页下载下来事，统一提取
        paginator_lists = bs4_obj.find('div',attrs={'class':'paginator'})
        # print(paginator_lists)
        if paginator_lists:
            for a_element in paginator_lists.find_all('a'):
                print(a_element.attrs.get("href"))
                url_set.add(a_element.attrs.get("href"))

            #获取所有页面内容
            bs4_page_obj_list = [bs4_obj]
            for url in url_set:
                print(f'下载分页url:{url}')
                page_obj = requests.get(url,headers = headers)
                bs4_page_obj = bs4.BeautifulSoup(page_obj.text,'html.parser')
                bs4_page_obj_list.append(bs4_page_obj)
            return bs4_page_obj_list



def fetch_emals(page_obj_list):
    mail_list = [];
    for bs4_obj in page_obj_list:
        # reply_content = soup.find_all('p', class_='reply-content')
        # reply_content = soup.find_all('p',attrs={'class':'reply-content'})
        reply_content = bs4_obj.find_all('div',attrs={'class':'reply-doc'})
        # print(reply_content)


        for reply in reply_content:
            comment_content = reply.find('p',attrs={'class':'reply-content'})
            # print(comment_content.text)
            #<p class="reply-content">正上上班，回复慢的话请见谅啊！</p>
            # <p class="reply-content">谢谢分享，610403，谢谢</p>
            # <p class="reply-content">谢谢分享 chennyqiuqiu</p>
            #要用ASCII码的可行式去匹配，否则可能匹配出中文   pyton中\w不需要转义
            email = re.search(r"\w+@\w+.\w+",comment_content.text,flags=re.A)
            # print(email)
            if email:
                # 52470457 @ qq.com
                # 417718206 @ qq.com
                pubtime = reply.find('span',attrs={'class':'pubtime'})
                mail_list.append([email.group(),pubtime.text])
        print(len(mail_list))
    return mail_list

all_bs4_page_list = download_page("https://www.douban.com/group/topic/157797390")

mail_list = fetch_emals(all_bs4_page_list);
print(mail_list)
for mail in mail_list:
    print(mail)
