# coding=utf-8
import requests
import random
from multiprocessing.dummy import Pool

from bs4 import BeautifulSoup
from lxml import etree
name = ''


def getip():
    ip = ['110.189.152.86', '121.237.88.63', '39.100.135.101', '183.47.237.251', '117.68.195.214', '103.39.214.69', '47.100.18.197',
    '140.255.139.45', '113.254.178.224', '121.237.88.26', '124.205.153.20', '183.47.237.251', '175.10.222.143']
    t = random.randint(0, len(ip)-1)
    return ip[t]


def getname(page):
    tree = etree.HTML(page)
    name = tree.xpath('//*[@id="content"]/h1/text()')[0]
    return name


def getComment(page):
    html = page
    soupComment = BeautifulSoup(html, 'html.parser')
    comments = soupComment.findAll('span', 'short')
    onePageComments = []
    for comment in comments:
        onePageComments.append(comment.getText()+'\n')
    return onePageComments


def getcommit(urls):
    global name
    global comments
    headers = {
        'Cookie': 'll="108302"; bid=WqKG-SfmrD4; __utmz=30149280.1634890643.1.1.utmcsr=douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; __utmz=223695111.1634890643.1.1.utmcsr=douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; _vwo_uuid_v2=DB6E4D90D8ADEF3594337320955BC1708|47904ef61c53da1f6dfa517ba1ec8e29; __gads=ID=e12aadec16010153-22c48301bdcc0033:T=1634890644:RT=1634890644:S=ALNI_MYr0qxyNy0kkht1R1JkjJmgwq-1fg; __yadk_uid=xkv7Wz4LPxLeLlpnRkHeivCWx1Y0IUD5; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1635081775%2C%22https%3A%2F%2Fwww.douban.com%2F%22%5D; _pk_ses.100001.4cf6=*; ap_v=0,6.0; __utma=30149280.1685798448.1634890643.1634890643.1635081776.2; __utmc=30149280; __utmc=223695111; __utmb=223695111.0.10.1635081776; __utma=223695111.1350203826.1634890643.1634890643.1635081776.2; __utmt=1; __utmb=30149280.1.10.1635081776; dbcl2="247379818:kEoMwvtm2IY"; ck=mNdR; push_noty_num=0; push_doumail_num=0; ct=y; _pk_id.100001.4cf6=57667ad25ce0cdfe.1634890643.2.1635082726.1634890715.',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36 Edg/95.0.1020.30'
    }
    ip = getip()
    proxy = {'http': ip}
    new_url = urls['url']
    
    page_text = requests.get(url=new_url, headers=headers, proxies=proxy).text
    one = getComment(page_text)
    #name = getname(page_text)+'.txt'
    if one not in comments:
        comments.append(one)
    # for i in comments:
    #     for j in i:
    #         all.append(j)


all = []
comments = []
def main(URL):
    urls = []
    global comments
    comments=[]
    url = URL+'start={}'
    for i in range(0, 10):
        pnum = 20*i
        newurl = url.format(pnum)+'&limit=20&status=P&sort=new_score'
        dic = {'url': newurl}
        urls.append(dic)

    pool = Pool(40)
    pool.map(getcommit, urls)
    print('爬取完成')
    #print(all)
        # print(comments)
        # with open(name,'r',encoding='utf-8') as fp:
        #     text=fp.read()
        # print('正在分析')
        # s = SnowNLP(text)
        # sum1=s.summary(limit=20)
        # print('20条总结')
        # q=1
        # for i in sum1:
        #     print(str(q)+'、'+i)
        #     q+=1
    pool.close()
    return comments
    # return comments
