
## 获取评论
import requests
from bs4 import BeautifulSoup
import re
import json
import time
import random
import pandas as pd
import numpy as np

from fake_useragent import UserAgent

iniProxies = [
]
def get_proxies():
    headers = {'User-Agent':str(UserAgent().random)}
    np.random.seed(int(time.time()/13))
    page= np.random.choice(range(1, 10))
    time.sleep(0.2)
    if iniProxies:
        r = requests.get(f'https://www.kuaidaili.com/free/inha/{page}/',headers=headers,proxies=iniProxies[np.random.choice(range(len(iniProxies)))])
    else:
        r = requests.get(f'https://www.kuaidaili.com/free/inha/{page}/',headers=headers,proxies={'http':'http://114.231.45.155:8888'})
    soup = BeautifulSoup(r.text, 'lxml')
    trs = soup.find('table', class_='table-striped').find('tbody').find_all('tr')
    tr = trs[np.random.choice(range(len(trs)))]
    tds = tr.find_all('td')
    ip = tds[0].text
    port = tds[1].text
    iniProxies.append({'https': 'https://'+ip+':'+port})
    return {
        'https': 'https://'+ip+':'+port,
    }

def getDiscussion(url):
    headers= {'User-Agent':str(UserAgent().random)}
    time.sleep(0.2)
    r = requests.get(url, headers=headers)
    soup = BeautifulSoup(r.text, 'lxml')
    commentList = soup.find_all('div', class_='t_con')
    discussionDicts = []
    for comm in commentList:
        if comm.find('span',class_='threadlist_reply_date') is None:
            continue
        responsenum  = comm.find('span', class_='threadlist_rep_num').text
        title= comm.find('a', class_='j_th_tit').text
        replydate = comm.find('span',class_='threadlist_reply_date').text
        replydate = re.sub(r'\s+', '', replydate)
        discussionDicts.append({
            'respone':responsenum,
            'title':title,
            'date': replydate
        })
    return discussionDicts

def getAllDisscussion():
    DisDicts =[]
    for i in range(0,1000,50):
        url = 'https://tieba.baidu.com/f?kw=%E5%A4%A9%E9%81%93&ie=utf-8&pn={i}'
        DisDicts.extend(getDiscussion(url))
        print(f'第{i}个讨论抓取成功')
    return DisDicts

if __name__ == "__main__":
    import json
    # print(getComment(urlMovieReview))
    json.dump(getAllDisscussion(), open('src/data/BaiduComments.json', 'w', encoding='utf-8'), ensure_ascii=False)
