import requests
import csv
from lxml import etree
from time import sleep

# 请求头
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 "
                  "Safari/537.36",
    "Cookie": "ll={}; bid=mLzc3onoOpg; ap_v=0,6.0".format(108290)
}
# 评论人
person_name = []
# 评星级
starts = []
# 评论
texts = []
# 评论计数器
count = 0
# URL动态参数 每+20变一次页面内容
start = 0

url = "https://movie.douban.com/subject/26213252/comments?start={}&limit=20&sort=new_score&status=P"


# 爬取资源
def spider(start):
    url_format = url.format(start)
    print("爬取中......")
    html = requests.get(url_format, headers=headers)
    if not select(html):
        return False
    return True


# 解析数据
def select(html):
    global count
    # 解析HTML文档转为Elemnt对象
    selector = etree.HTML(html.text)

    comments = selector.xpath("//div[@class='comment']")
    if len(comments) == 0:
        return False
    for comment in comments:
        # 解析取出评论人昵称
        name = str(comment.xpath("./h3/span[@class='comment-info']/a/text()")[0])
        # 解析取出星级并格式化
        start = str(comment.xpath("./h3/span[@class='comment-info']/span[2]/@class")[0]) \
            .replace("allstar", "") \
            .replace("0 rating", "")
        # 解析取出具体评论内容
        text = str(comment.xpath("./p/span/text()")[0])
        if "comment-time" in start:
            start = ""
        count += 1

        person_name.append(name)
        starts.append(start)
        texts.append(text)


# 将解析出来的数据写入本地持久化
def to_csv():
    with open("短评.csv", "w", newline="") as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(["昵称", "星级", "评论"])
        for i in range(count):
            writer.writerow([person_name[i], starts[i], texts[i]])

# 开始爬取
while True:
    if not spider(start):
        break
    sleep(2)
    start += 20
to_csv()
