'''
    content：爬取新浪微博评论
    author：kktao
'''
import requests
import re
import time
from lxml import etree
from multiprocessing.dummy import Pool
import os

cur_path = os.path.join(os.getcwd(),"weibo_crawl")


def get_one_page(url):#请求函数：获取某一网页上的所有内容
    headers = {
        'User-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
        'Host' : 'weibo.cn',
        'Accept' : 'application/json, text/plain, */*',
        'Accept-Language' : 'zh-CN,zh;q=0.9',
        'Accept-Encoding' : 'gzip, deflate, br',
        'Cookie' : '填写您的cookie',
        'DNT': '1',
        'Connection' : 'keep-alive',
    }
    #请求头的书写，包括User-agent,Cookie等
    response = requests.get(url,headers = headers)#利用requests.get命令获取网页html
    if response.status_code == 200:#状态为200即为爬取成功
        return response#返回值为html文档，传入到解析函数当中
    return None
def parse_one_page(html_response):#解析html并存入到文档result.txt中
    pattern = re.compile('<span class="ctt">.*?</span>', re.S)
    items = re.findall(pattern,html_response.text)
    result = str(items)
    with open(os.path.join(cur_path,'data\\result.txt'),'a',encoding='utf-8') as fp:
        fp.write(result)


def parse_one_page_byxpath(html_response):#解析html并存入到文档result_xpath.txt中
    html = etree.HTML(html_response.content)
    items = html.xpath('//*[@class="c"]/span[@class="ctt"]/text()')
    str = '\n'
    with open(os.path.join(cur_path,'data\\result_xpath_kx.txt'),'a',encoding='utf-8') as fp:
        fp.writelines(str.join(items))

# 爬取微博热搜：“假如有一天通知你开学了”
def my_crawl():
    for i in range(976): 
        url = "https://weibo.cn/comment/IyyhQlWKG?uid=2835724503&rl=1&page="+str(i)
        html_response = get_one_page(url)
        print('正在爬取第 %d 页评论' % (i+1))
        parse_one_page_byxpath(html_response)
        time.sleep(2)

# 多线程测试
def my_multi_crawl(index):
    url = "https://weibo.cn/comment/F18Fc4MAO?uid=1822299354&rl=1&page="+str(index)
    html_response = get_one_page(url)
    print('正在爬取第 %d 页评论' % (index+1))
    parse_one_page_byxpath(html_response)
    time.sleep(2)

if __name__ == "__main__":
    #多线程
    # # 创建三个线程 
    # pool = Pool(4)
    # # 爬取的页码放在一个列表里 [1,2,3,...,9] 
    # orign_num = [x for x in range(100,201)]
    # # 通过映射返回结果列表 
    # pool.map(my_multi_crawl,orign_num)

    #单线程
    my_crawl()
  