# -*- coding:utf-8 -*-

import requests
from lxml import etree
from fake_useragent import UserAgent
from http import cookiejar
import json
from datetime import datetime
import time
import re
import csv


requests.packages.urllib3.disable_warnings()  # 忽略HTTPS安全警告


"""
豆瓣-电影-短评
账号:18241255868
密码:gkx55868
一次位移20.最多480 共计500条
"""

class DouBan_Get():
    def __init__(self):
        #声明一个CookieJar对象实例来保存cookie
        self.cookie = cookiejar.CookieJar()
        # ua = UserAgent(use_cache_server=False)  # 禁用服务器缓存
        self.headers = {
            # "User-Agent": ua.random
            'Accept': '*/*',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Connection': 'keep-alive',
            'Cookie': 'll="118125"; bid=OpSVvH3iLgI; __utma=30149280.1652513340.1601614084.1601614084.1601614084.1; __utmc=30149280; __utmz=30149280.1601614084.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utmt=1; ap_v=0,6.0; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1601614098%2C%22https%3A%2F%2Fwww.douban.com%2Fsearch%3Fq%3D%25E4%25B8%2589%25E5%258D%2581%25E8%2580%258C%25E5%25B7%25B2%22%5D; _pk_ses.100001.4cf6=*; __utma=223695111.722767349.1601614098.1601614098.1601614098.1; __utmz=223695111.1601614098.1.1.utmcsr=douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/search; __utmc=223695111; __utmb=223695111.0.10.1601614098; __gads=ID=de2d6c2bde2addab:T=1601614097:S=ALNI_Mb51d0d7rWp1kmU-n3ukP3C4hwbhg; __yadk_uid=dccvh6KEmQHk31Bp2isNw7gq4zqVvA9Z; _vwo_uuid_v2=D1023E8DFE5B36FED89F694189689BBE1|34ca9d8d31b345c4714cb15bf6982b98; __utmb=30149280.3.10.1601614084; dbcl2="211869934:OYnxnwGsYtk"; ck=Wc2p; _pk_id.100001.4cf6=f251ed4056ae110d.1601614098.1.1601614498.1601614098.; push_noty_num=0; push_doumail_num=0',
            'DNT': '1',
            'Host': 'movie.douban.com',
            'Referer': 'https://movie.douban.com/subject/30252495/comments?start=0&limit=20&sort=new_score&status=P',
            'Sec-Fetch-Dest': 'empty',
            'Sec-Fetch-Mode': 'cors',
            'Sec-Fetch-Site': 'same-origin',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36',
            'X-Requested-With': 'XMLHttpRequest'
        }


    # 短评
    def get_short_comments(self, current):
        postData = {
            'percent_type': '1',
            'start': current,
            'limit': '20',
            'status': 'P',
            'sort': 'new_score',
            'comments_only': '1'
        }
        url = f"https://movie.douban.com/subject/26794435/comments"
        url = f"https://movie.douban.com/subject/26608230/comments"
        html = HttpUtils.do_request("GET", url, self.headers, postData)
        time.sleep(1)
        html_content = json.loads(html.text)['html']
        root = etree.HTML(html_content)
        for r in root.xpath('//div[@class="comment"]'):
            dict_data = dict()
            dict_data['有用数量'] = r.xpath('.//span[@class="comment-vote"]/span/text()')[0]
            dict_data['评价'] = r.xpath('.//span[@class="comment-info"]/span[2]/@title')[0]
            dict_data['用户名'] = r.xpath('.//span[@class="comment-info"]/a/text()')[0]
            dict_data['用户连接'] = r.xpath('.//span[@class="comment-info"]/a/@href')[0]
            dict_data['评论时间'] = r.xpath('.//span[@class="comment-time "]/@title')[0]
            dict_data['评论内容'] = r.xpath('.//span[@class="short"]/text()')[0]
            print(str(dict_data))
            with open("豆瓣评论_差评.csv", "a", encoding="utf-8-sig", newline="") as csvfile:
                fileheader = ["有用数量", "评价", "用户名", "用户连接", "评论时间", "评论内容"]
                writer = csv.DictWriter(csvfile, fieldnames=fileheader)
                writer.writerow(dict_data)
            # headers = {
            #     'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            #     'Accept-Encoding': 'gzip, deflate, br',
            #     'Accept-Language': 'zh-CN,zh;q=0.9',
            #     'Cache-Control': 'max-age=0',
            #     'Connection': 'keep-alive',
            #     'Cookie': 'll="118125"; bid=k3Yss9NaLgU; ap_v=0,6.0; __utma=30149280.1227745827.1583156670.1583156670.1583156670.1; __utmz=30149280.1583156670.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utmc=30149280; _vwo_uuid_v2=D990A14683B3B30D9CFF4F9A40C2C3A2C|83785c034d62bb8cdc90f8962e9720ef; __gads=ID=ebb8f5f814a0b21f:T=1583156768:S=ALNI_MaIkxEuMAQpkh2o1oedYYJsef2FfA; dbcl2="211869934:FJRK1VNcZ4k"; ck=PVU-; _pk_ref.100001.8cb4=%5B%22%22%2C%22%22%2C1583156950%2C%22https%3A%2F%2Faccounts.douban.com%2Fpassport%2Flogin%22%5D; _pk_ses.100001.8cb4=*; push_noty_num=0; push_doumail_num=0; __utmv=30149280.21186; __yadk_uid=YEAUljMueOaZgi06rEzlU2Bwkp70JltP; _pk_id.100001.8cb4=24c7065be0364d90.1583156950.1.1583157830.1583156950.; __utmt=1; __utmb=30149280.14.10.1583156670',
            #     'DNT': '1',
            #     'Host': 'www.douban.com',
            #     'Sec-Fetch-Dest': 'document',
            #     'Sec-Fetch-Mode': 'navigate',
            #     'Sec-Fetch-Site': 'none',
            #     'Sec-Fetch-User': '?1',
            #     'Upgrade-Insecure-Requests': '1',
            #     'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'
            # }
            #
            # aa = HttpUtils.do_request("GET", dict_data['user_url'], headers, "")
            # pass


    # 获取长评id 根据id提取具体内容
    def get_comment_id(self, current):
        postData = {
            'sort': 'hotest',
            'start': current
        }
        url = f"https://movie.douban.com/subject/26608230/reviews"
        html = HttpUtils.do_request("GET", url, self.headers, postData)
        time.sleep(1)
        html_content = json.loads(html.text)['html']
        root = etree.HTML(html_content)
        for r in root.xpath('//div[@class="main review-item"]'):
            dict_data = dict()
            id = r.xpath('./@id')[0]
            dict_data['用户名'] = r.xpath('.//header/a[2]/text()')[0]
            dict_data['评价'] = "".join(r.xpath('.//header/span[contains(@class,"main-title-rating")]/@title'))
            dict_data['评论时间'] = r.xpath('.//header/span[@class="main-meta"]/text()')[0]
            dict_data['标题'] = r.xpath('.//div/h2/a/text()')[0]

            dict_data['评论内容'] = self.get_long_comments(id)
            dict_data['有用'] = "".join(r.xpath('.//div[@class="action"]/a[1]//text()')).replace("\n", "").strip()
            dict_data['没用'] = "".join(r.xpath('.//div[@class="action"]/a[2]//text()')).replace("\n", "").strip()
            dict_data['回应'] = "".join(r.xpath('.//div[@class="action"]/a[3]//text()')).replace("\n", "") \
            .replace("回应", "").strip()
            print(str(dict_data))
            with open("豆瓣评论_长平.csv", "a", encoding="utf-8-sig", newline="") as csvfile:
                fileheader = ["用户名", "评价", "评论时间", "标题", "有用", "没用", "回应", "评论内容"]
                writer = csv.DictWriter(csvfile, fieldnames=fileheader)
                writer.writerow(dict_data)

    # 长评内容
    def get_long_comments(self, id):
        url = f"https://movie.douban.com/j/review/{id}/full"
        html = HttpUtils.do_request("GET", url, self.headers, "")
        time.sleep(1)
        html_content = json.loads(html.text)['html']
        root = etree.HTML(html_content)
        txt = "".join(root.xpath('//text()'))

        return txt


if __name__ == '__main__':
    douban = DouBan_Get()

    # 短评
    for i in range(0, 500, 20):
        print(f"=========={i}=============")
        douban.get_short_comments(i)

    # 长评
    # for i in range(0, 1960, 20):
    #     print(f"=========={i}=============")
    #     douban.get_comment_id(i)