import requests
import re
import time
def get_one_page(url):#请求函数：获取某一网页上的所有内容
    headers = {
    'User-agent' : 'https://open.weibo.com/wiki/2/users/show',
    'Host' : 'weibo.cn',
    'Accept' : 'application/json, text/plain, */*',
    'Accept-Language' : 'zh-CN,zh;q=0.9',
    'Accept-Encoding' : 'gzip, deflate, br',
    'Cookie' : 'ALF=1593595517; SUB=_2A25z0LkuDeRhGeBM7lMW8yrIzD6IHXVROsdmrDV6PUNbktANLXHkkW1NRPcab54t60DhPcTmYhtX5H4VuY2UtGHd; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WWsjd-CWD3fCwLMEdJOBlq75JpX5KzhUgL.FoqESK2Ne0BXS0z2dJLoIX8AwbH8SFHF1F-ReFH81FHFSCHFSEH8SC-4eF-ReEH8SbHWSbHFSFH81F-RxCHFebH8SCHWSCHFxFH8SC-4eF-4S7tt; SUHB=0ftpT3GBCYEqlM; SSOLoginState=1591003518; _T_WM=52039254819; MLOGIN=1; M_WEIBOCN_PARAMS=luicode%3D10000011%26lfid%3D100103type%253D1%2526t%253D10%2526q%253D%2523%25E5%258F%25B2%25E4%25B8%258A%25E6%259C%2580%25E6%2583%25A8%25E6%25AF%2595%25E4%25B8%259A%25E7%2585%25A7%2523; WEIBOCN_FROM=1110106030',
    'DNT' : '1',
    'Connection' : 'keep-alive'
     }#请求头的书写，包括User-agent,Cookie等
    requests.packages.urllib3.disable_warnings()
    response = requests.get(url,headers = headers,verify=False)#利用requests.get命令获取网页html
    if response.status_code == 200:#状态为200即为爬取成功
        return response.text#返回值为html文档，传入到解析函数当中
    return None
def parse_one_page(html):#解析html并存入到文档result.txt中
    pattern = re.compile('<span class="ctt">.*?</span>', re.S)
    items = re.findall(pattern,html)
    result = str(items)
    with open('test_2.txt','a',encoding='utf-8') as fp:
        fp.write(result)

for i in range(45):
    url="https://weibo.cn/comment/J4B7koT4b?uid=2610186355&rl=1&page="+str(i)
    html = get_one_page(url)
    print(html)
    print('正在爬取第 %d 页评论' % (i+1))
    parse_one_page(html)
    time.sleep(3)
