# -*- coding: utf-8 -*-
# @Author:Thl
# @Time: 2019/4/24

import requests  # 网络请求库
from lxml import etree  # 分析HTML代码的库
import threading  # 线程库
import csv

url = "https://movie.douban.com/subject/26363254/comments?status=P"  # 影评的网址

def get_info(url):
    response = requests.get(url)  # 发送请求，把响应赋值给response
    content = response.text  # 取得网页源代码
    text = etree.HTML(content)  # 把HTML代码实例化

    comments = text.xpath('//*[@class="comment-item"]')  # 找到所有的评论栏
    data = []
    for i in comments:  # 循环提取内容
        try:
            username = i.xpath('.//span[@class="comment-info"]/a/text()')[0]  # 找到用户名，因为找到的是一个列表，所以在后面切片，取第一个
            time = i.xpath('.//span[@class="comment-time "]/text()')[0].strip()  # 同上，找到时间，strip()函数是去掉字符串中的空格
            content = i.xpath('.//span[@class="short"]/text()')[0]  # 同上，找到内容
            data.append((time, username, content))
        except:
            pass
    with open(f"战狼2短评.csv", "a", encoding="utf8", newline="") as f:
        csvwriter = csv.writer(f, dialect=('excel'))
        csvwriter.writerows(data)


if __name__ == "__main__":
    with open(f"战狼2短评.csv", "w", encoding="utf8", newline="") as f:
        csvwriter = csv.writer(f, dialect=('excel'))
        csvwriter.writerow(["时间", "用户名", "评论内容"])
    job_list = []
    # threadLock = threading.Lock()  # 线程锁
    for i in range(100):  # 爬取100 * 20 = 2000条短评
        url = "https://movie.douban.com/subject/26363254/comments?limit=20&sort=new_score&status=P&start=" + str(i * 20)
        thread = threading.Thread(target=get_info, args=(url,))
        # threadLock.acquire()
        thread.start()  # 开启线程
        # threadLock.release()
        job_list.append(thread)
    [i.join() for i in job_list]
