#爬取你好，李焕英电影短评
import csv
from xml import etree

import requests
from fake_useragent import UserAgent
import chardet
from lxml import etree
import re
import time

#从代理IP池，随机获取一个IP，环境问题，没有调好
# def get_proxy():
#     try:
#         PROXY_POOL_URL = 'http://localhost:5555/random'
#         response = requests.get(PROXY_POOL_URL)
#         if response.status_code == 200:
#             return response.text
#     except ConnectionError:
#         return None

def get_url(page,headers):
    url = 'https://movie.douban.com/subject/34841067/comments?start='+str(page*20)+'&limit=20&status=P&sort=new_score'
    print("开始爬取-------------------",url)
    # response = requests.get(url=url, headers=headers,proxies={"http": "http://{}".format(get_proxy())})
    response = requests.get(url=url, headers=headers)
    html = response.text
    return html


def parse_html(html):
    result=[]
    tree=etree.HTML(html)
    # 获取评论者字段
    reviewer = tree.xpath("//div[@class='comment-item ']//span[@class='comment-info']/a/text()")
    # print(reviewer)
    # 获取评分等级字段
    score = tree.xpath("//div[@class='comment-item ']//span[@class='comment-info']/span[2]/@title")
    # print(score)
    # 获取评论日期字段
    comment_date = tree.xpath("//div[@class='comment-item ']//span[@class='comment-time ']/text()")
    comment_date = list(map(lambda date: re.sub('\s+', '', date), comment_date))  # 去掉换行符制表符
    comment_date = list(filter(None, comment_date))  # 去掉上一步产生的空元素
    # print(comment_date)
    # 获取点赞数字段
    vote_count = tree.xpath("//div[@class='comment-item ']//span[@class='votes vote-count']/text()")
    # print(vote_count)
    # 获取评论内容字段
    comments = tree.xpath("//p[@class=' comment-content']/span/text()")
    # print(comments)
    values = []
    for i in range(len(reviewer)):
        value = [reviewer[i], score[i], comment_date[i], vote_count[i], comments[i]]
        values.append(value)

    return values


def save_to_csv(result):
    '''这里使用utf-8-sig的编码方式，否则文件会出现乱码的情况'''
    '''utf-8-sig在读取带有BOM的utf-8文件时，会将BOM单独处理，与文本内容隔离开。而utf-8则会将BOM当作文件内容来处理，导致乱码'''
    '''utf-8保存的csv格式要让excel正常打开的话，需要在文件最前面加入BOM(0xEF,0XBB,0XBF)'''
    with open('chall.csv','w',newline='',encoding='utf-8-sig') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(['评论者', '评分等级', '评论日期', '点赞数', '评论内容'])
        for value in result:
            writer.writerow(value)


def main():
    headers = {
    'user-agent':UserAgent().random,
}
    for page in range(0,21):
        html = get_url(page,headers)
        result = parse_html(html)
        save_to_csv(result)
        time.sleep(5)
    print("数据爬取成功")
if __name__ == '__main__':
    main()
