urlMovie = 'https://movie.douban.com/subject/2347485/'


urlBook = 'https://book.douban.com/subject/1322455/'

## 获取评论
import requests
from bs4 import BeautifulSoup
import re
import json
import time
import pandas as pd
import numpy as np
from fake_useragent import UserAgent
np.random.seed(int(time.time()/13))
iniProxies = [
    {
        'http': 'http://113.121.43.220:9999',
        'http': 'http://222.74.73.202:42055'
    }
]
def get_proxies():
    headers = {'User-Agent':str(UserAgent().random)}
    np.random.seed(int(time.time()/13))
    page= np.random.choice(range(1, 10))
    time.sleep(0.2)
    if iniProxies:
        r = requests.get(f'https://www.kuaidaili.com/free/inha/{page}/',headers=headers,proxies=iniProxies[np.random.choice(range(len(iniProxies)))])
    else:
        r = requests.get(f'https://www.kuaidaili.com/free/inha/{page}/',headers=headers)
    try:
        soup = BeautifulSoup(r.text, 'lxml')
        trs = soup.find('table', class_='table-striped').find('tbody').find_all('tr')
        tr = trs[np.random.choice(range(len(trs)))]
        tds = tr.find_all('td')
        ip = tds[0].text
        port = tds[1].text
        iniProxies.append({
            'http': 'http://'+ip+':'+port,
        })
        # print(ip, port)
        return {
            'http': 'http://'+ip+':'+port,
        }
    except:
        return iniProxies[np.random.choice(range(len(iniProxies)))]
    

def getBookRate(url = urlBook):
    headers = {'User-Agent':str(UserAgent().random)}
    r = requests.get(url, headers=headers)
    soup = BeautifulSoup(r.text, 'lxml')
    ratingpercent = soup.find_all('span', class_='rating_per')
    ratingDict = {i:ratingpercent[i].text for i in range(len(ratingpercent))}
    return ratingDict
def getComment(url):
    headers = {'User-Agent':str(UserAgent().random)}
    # print(headers)
    r = requests.get(url, headers=headers)
    soup = BeautifulSoup(r.text, 'lxml')
    commentList = soup.find('table', class_='olt').find_all('tr')

    commentDicts = []
    for comm in commentList[1:]:
        tds = comm.find_all('td')
        title = tds[0].find('a').text
        ## 采用正则表达式去除空格和换行符
        title = re.sub(r'\s+', '', title)
        response = tds[2].text
        date = tds[3].text
        commentDicts.append({'title':title, 'response':response, 'date':date})
    return commentDicts
def getCommentPage(url,cookies=False):
    headers= {'User-Agent':str(UserAgent().random)}
    if cookies:
        headers={
    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,ja;q=0.7",
    "cache-control": "max-age=0",
    "sec-ch-ua": "\"Google Chrome\";v=\"113\", \"Chromium\";v=\"113\", \"Not-A.Brand\";v=\"24\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"macOS\"",
    "sec-fetch-dest": "document",
    "sec-fetch-mode": "navigate",
    "sec-fetch-site": "none",
    "sec-fetch-user": "?1",
    "upgrade-insecure-requests": "1",
    "cookie": "ll=\"118172\"; bid=0jxdHyoUiJs; _pk_id.100001.4cf6=e59e5aee2c45568f.1691850722.; _vwo_uuid_v2=D6B7A1508C554B4EFEEF7CCAAF73B0550|fa8bd4e82e5ea87d3841c5d93b749c47; __utmz=30149280.1691912055.2.2.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided); __utmz=223695111.1691912055.2.2.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided); viewed=\"24748615\"; dbcl2=\"273233329:warc8dn0ViY\"; push_noty_num=0; push_doumail_num=0; __utmv=30149280.27323; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1692080064%2C%22https%3A%2F%2Fwww.google.com.hk%2F%22%5D; __utma=223695111.442711095.1691850722.1691912055.1692080065.3; ck=Ak09; ap_v=0,6.0; __utma=30149280.1384434250.1691850722.1692080065.1692155237.4; __utmc=30149280"
  }
        # headers['cookie']= "ll='118172'; bid=0jxdHyoUiJs; _pk_id.100001.4cf6=e59e5aee2c45568f.1691850722.; _vwo_uuid_v2=D6B7A1508C554B4EFEEF7CCAAF73B0550|fa8bd4e82e5ea87d3841c5d93b749c47; __utmz=30149280.1691912055.2.2.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided); __utmz=223695111.1691912055.2.2.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided); viewed='24748615'; dbcl2='273233329:warc8dn0ViY'; push_noty_num=0; push_doumail_num=0; __utmv=30149280.27323; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1692080064%2C%22https%3A%2F%2Fwww.google.com.hk%2F%22%5D; __utma=223695111.442711095.1691850722.1691912055.1692080065.3; ck=Ak09; ap_v=0,6.0; __utma=30149280.1384434250.1691850722.1692080065.1692155237.4; __utmc=30149280; __utmb=30149280.2.10.1692155237"
    r = requests.get(url, headers=headers)
    if r.status_code != 200:
        print(r.status_code)
        return []
    soup = BeautifulSoup(r.text, 'lxml')
    commentList = soup.find_all('div', class_='comment-item')
    commentDicts = []
    for comm in commentList:
        votes= comm.find('span', class_='vote-count').text
        if comm.find('span', class_='rating'):
            rating = comm.find('span', class_='rating').attrs['class'][0][-2]
        else:
            rating = 0
        date = comm.find('span', class_='comment-time').text
        coms = comm.find('span', class_='short').text
        commentDicts.append({
            'votes': votes,
            'rating': rating,
            'date': date,
            'comment': coms
        })
    return commentDicts

def getReview(url):
    headers= {'User-Agent':str(UserAgent().random)}
    r = requests.get(url, headers=headers,proxies=get_proxies())
    soup = BeautifulSoup(r.text, 'lxml')
    commentList = soup.find_all('div', class_='review-item')
    print(len(commentList))
    commentDicts = []
    for comm in commentList:
        headhd = comm.find('header', class_='main-hd')
        date = headhd.find('span', class_='main-meta').text
        if headhd.find('span', class_='main-title-rating'):
            rating = headhd.find('span', class_='main-title-rating').attrs['class'][0][-2]
        else:
            rating = 0
        headbd = comm.find('div', class_='main-bd')
        link = headbd.find('h2').find('a').attrs['href']
        title = headbd.find('h2').find('a').text
        ## 通过requests获取评论内容
        headers= {'User-Agent':str(UserAgent().random)}
        r = requests.get(link, headers=headers,proxies=get_proxies())
        soup = BeautifulSoup(r.text, 'lxml')
        content = soup.find('div', class_='review-content')
        reviewstr = ''
        if content:
            for x in content.find_all('p'):
                reviewstr += x.text
        commentDicts.append({
            'date': date,
            'rating': rating,
            'title': title,
            'link': link,
            'review': reviewstr
        })
    return commentDicts

urlBookReview = 'https://book.douban.com/subject/1322455/reviews?sort=time&start=20&limit=100'
urlMovieReview = 'https://movie.douban.com/subject/2347485/discussion/?start=20&sort_by=time'

def getAllBookreview():
    reviews = []
    for i in range(0, 1000, 20):
        url = 'https://book.douban.com/subject/1322455/reviews?sort=time&start={}&limit=20'.format(i)
        review = getReview(url)
        if len(review) == 0:
            break
        reviews.extend(review)
        time.sleep(1)
        print('已经爬取{}条评论'.format(len(reviews)))
    return reviews


def getAllMovieComments():
    reviews = []
    for i in range(0, 1000, 20):
        url = 'https://movie.douban.com/subject/2347485/reviews?start={}&limit=20&status=P&sort=time'.format(i)
        review = getReview(url)
        if len(review) == 0:
            break
        reviews.extend(review)
        time.sleep(1)
        print('已经爬取{}条评论'.format(len(reviews)))
    return reviews


def getAllMovieReview():
    reviews = []
    for i in range(0, 1000, 20):
        url = 'https://movie.douban.com/subject/2347485/discussion/?start={}&sort_by=time'.format(i)
        review = getComment(url)
        if len(review) == 0:
            break
        reviews.extend(review)
        time.sleep(1)
        print('已经爬取{}条评论'.format(len(reviews)))
    return reviews

def getMovieShortComments():
    reviews = []
    for i in range(0, 27970, 20):
        url = 'https://movie.douban.com/subject/2347485/comments?start={}&limit=20&sort=new_score&status=P'.format(i)
        review = getCommentPage(url,cookies=True)
        if len(review) == 0:
            print('已经爬取{}长评论'.format(len(reviews)))
            review = getCommentPage(url,cookies=True)
            if len(review) == 0:
                break
        reviews.extend(review)
        time.sleep(np.random.choice(np.arange(1, 3, 0.1)))
        print('已经爬取{}条评论'.format(len(reviews)))
    return reviews
# print(getBookRate(url=urlMovie))
# time.sleep(1)
if __name__ == '__main__':
    import json
    # print(getComment(urlMovieReview))
    json.dump(getMovieShortComments(), open('src/data/MoviewComments.json', 'w', encoding='utf-8'), ensure_ascii=False)
