import requests
import csv
from lxml import etree
from time import sleep
nick_names=list()
stars=list()
texts=list()
url1="https://movie.douban.com/subject/26213252/comments?start={}&limit=20&sort=new_score&status=P"
cnt=0
def select(html):
  # 使用lxml.etree.HTML()方法将网页的HTML文本转换为一个可以进行XPath查询的对象
  selector=etree.HTML(html.text) 
  # 通过XPath表达式从整个HTML文档中找到所有具有comment类的div元素。每个这样的div代表一条用户评论
  comments=selector.xpath('//div[@class="comment"]')
  if len(comments)==0:
    print("爬取完毕")
    return False
  global cnt
  for comment in comments:
    # 这里使用相对路径(./)开始，表示从当前节点（即每一个评论节点）开始查找。它查找包含comment-info类的span下的第一个a标签中的文本，该文本是用户的昵称
    nick_name=str(comment.xpath("./h3/span[@class='comment-info']/a/text()")[0])
    # 查找第二个span（span[2]），其class属性包含了评分信息（如allstar50）。然后通过字符串替换去掉不需要的部分（如allstar和0 rating），如果发现这个字段实际上是时间信息，则将其置为空字符串
    star=str(comment.xpath("./h3/span[@class='comment-info']/span[2]/@class")[0])
    star=star.replace("allstar", "")
    star=star.replace("0 rating","")
    if "comment-time" in star:
      star=""
    text=str(comment.xpath("./p/span/text()")[0])
    cnt+=1
    nick_names.append(nick_name)
    stars.append(star)
    texts.append(text)

def spider(start):
  url=url1.format(start)
  print("正在爬取"+url)
  headers={
    "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
    "cookie":'bid=tfLselx2A08; _pk_id.100001.4cf6=913d7ea0f1e65fcd.1739682561.; __utmc=30149280; __utmc=223695111; ap_v=0,6.0; _pk_ses.100001.4cf6=1; dbcl2="287207026:39T40hWtWUY"; ck=sCwO; __utma=30149280.1122396539.1739682561.1740190233.1740192757.3; __utmb=30149280.0.10.1740192757; __utmz=30149280.1740192757.3.2.utmcsr=accounts.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; __utma=223695111.1861454081.1739682561.1740190233.1740192757.3; __utmb=223695111.0.10.1740192757; __utmz=223695111.1740192757.3.2.utmcsr=accounts.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; push_noty_num=0; push_doumail_num=0'
  }
  html=requests.get(url,headers=headers)
  if select(html)==False:
    return False
  return True

def to_csv():
  with open("d:/项目/python/爬虫/网页/豆瓣短评.csv","w",newline="", encoding='utf-8-sig') as csvfile:  # 修改这里，增加encoding参数
    writer=csv.writer(csvfile)
    writer.writerow(["昵称","星数","短评"])
    for i in range(cnt):
      try:
        writer.writerow([nick_names[i],stars[i],texts[i]])
      except:
        print("编码错误，忽略该数据")

start=0
while True:
  if spider(start)==False:
    break
  sleep(1)
  start+=20
to_csv()
