import re
import csv
import urllib
import requests
import codecs
from multiprocessing.dummy import Pool


def pageDown(URL):
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.72 Safari/537.36 Edg/89.0.774.45'}
    request = urllib.request.Request(URL,headers = headers)
    response = urllib.request.urlopen(request)
    data = response.read().decode("utf-8")
    return data
result_list = []

def contentDown(url):
    # html = requests.get(url).content.decode()
    html = pageDown(url)
    pn = re.findall('<span class="red">(.*?)</span',str(html))
    list1 = []
    for i in range(int(pn[1])):
        html1 = url + '?pn=' + str(i+1)
        list1.append(html1)

#多线程爬取网页
    pool = Pool(5)
    sources = []
    sources.append(pool.map(pageDown,list1))
    for i in sources:
        source = i
        every_floor = re.findall('l_post l_post_bright j_l_post clearfix  "(.*?)p_props_tail props_appraise_wrap', str(source), re.S)
        for each in every_floor:
             result = {}
             result['name'] = re.findall('<a.*?class="p_author_name.*?.*?>(.*?)</a>', each, re.S)
             result['content'] = re.findall('<div id="post_content.*?>            (.*?)</div>', each, re.S)
             result['time'] = re.findall('<span class="tail-info.*?>(20.*?)<', each, re.S)
             result_list.append(result)

             with open('tieba.csv', 'ab+') as fileopen:#防止csv文件乱码
                 fileopen.write(codecs.BOM_UTF8)
             with open('tieba.csv', 'a+', encoding='UTF-8',newline='') as f:
               writer = csv.DictWriter(f, fieldnames=['name', 'content', 'time'])
               writer.writeheader()
               writer.writerows(result_list)

contentDown("https://tieba.baidu.com/p/7255446567")

