# -*- coding:utf-8 -*-

import requests
from lxml import etree
import json
import time
import csv
import os
import pandas as pd


requests.packages.urllib3.disable_warnings()


"""
豆瓣
"""

class DouBan_Get():
    def __init__(self):
        self.headers = {
            'Accept': '*/*',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Connection': 'keep-alive',
            'DNT': '1',
            'Host': 'movie.douban.com',
            # 'Cookie': 'gadsTest=test; bid=KYa5MmoMvLA; gr_user_id=9483dd70-b25c-4ae1-8d89-602c014d40c5; _vwo_uuid_v2=DBDC64D99B87BED42E6E2A35BF0A08846|1037eb02f7bb1ba694116ee6583f402e; ll="118202"; viewed="35292992_30224029"; __gads=ID=bdf080ff6bf208fe-223fd838cfc5003e:T=1611539944:RT=1611539944:R:S=ALNI_MawjyolSiBjdhMvqo4uSULlTP-YyQ; __yadk_uid=lDFMgZ6eeDvGuE89ipu5FG1vYeJSjeeo; ct=y; push_doumail_num=0; __utmv=30149280.23375; push_noty_num=0; dbcl2="233753621:aOAeBDkwnJs"; ck=f0_i; __utma=30149280.379278896.1592441429.1614219461.1614230184.14; __utmc=30149280; __utmz=30149280.1614230184.14.6.utmcsr=accounts.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/passport/login; __utmt=1; __utmb=30149280.4.10.1614230184; __utma=223695111.1139808474.1612332131.1614219461.1614230329.9; __utmb=223695111.0.10.1614230329; __utmc=223695111; __utmz=223695111.1614230329.9.6.utmcsr=douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1614230329%2C%22https%3A%2F%2Fwww.douban.com%2F%3Fp%3D2%22%5D; _pk_ses.100001.4cf6=*; _pk_id.100001.4cf6=d8a4b4490ae9fceb.1612332131.7.1614230348.1614220455.',
            'Referer': 'https://movie.douban.com/subject/30252495/comments?start=0&limit=20&sort=new_score&status=P',
            'Sec-Fetch-Dest': 'empty',
            'Sec-Fetch-Mode': 'cors',
            'Sec-Fetch-Site': 'same-origin',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36',
            'X-Requested-With': 'XMLHttpRequest'
        }
        self.user_list = []
        self.movie_list = []
        self.movie_id_list = []
        self.moviefile = "电影名称.csv"
        self.userfile = "用户.csv"

    # 短评
    def get_short_comments(self, id, current):
        postData = {
            'percent_type': '1',
            'start': current,
            'limit': '20',
            'status': 'P',
            'sort': 'new_score',
            'comments_only': '1'
        }
        url = f"https://movie.douban.com/subject/{id}/comments"
        html = requests.get(url, headers=self.headers, params=postData, verify=False)
        time.sleep(1)
        html_content = json.loads(html.text)['html']
        root = etree.HTML(html_content)
        for r in root.xpath('//div[@class="comment"]'):
            username = r.xpath('.//span[@class="comment-info"]/a/text()')[0]
            userid = r.xpath('.//span[@class="comment-info"]/a/@href')[0]
            userid = userid.split('/')[-2]
            # 当前用户未存在用户表中的，则提取该用户的观影数量
            if username not in self.user_list:
                # 获取用户观影名称数量
                self.get_user_movie(userid, 0)
                # 如果list_movie>=50则保存用户
                if len(self.movie_list) >= 50:
                    flag = self.add_user(userid, username)

                    for index, value in enumerate(self.movie_list):
                        # 如果文件不存在，创建头
                        if os.path.exists(self.moviefile) == False:
                            with open(self.moviefile, "a", encoding="utf-8-sig", newline="") as csvfile:
                                writer = csv.writer(csvfile)
                                writer.writerow(['电影ID', '电影名称'])
                        with open(self.moviefile, "a+", encoding="utf-8-sig", newline="") as f:
                            csv_writer = csv.writer(f)
                            csv_writer.writerow([self.movie_id_list[index], value])
                        print(f"==========保存电影名称{value}==========")
                    self.movie_list.clear()
                    self.movie_id_list.clear()
        # 翻页
        if current <= 500:
            self.get_short_comments(id, current + 20)

    def get_user_movie(self, userid, current_user):
        time.sleep(1)
        url = f"https://movie.douban.com/people/{userid}/collect?start={current_user}&sort=time&rating=all&filter=all&mode=grid"
        html_user = requests.get(url, headers=self.headers, verify=False)
        root_user = etree.HTML(html_user.text)
        lookNum = root_user.xpath('//span[@class="subject-num"]/text()')[0].strip()
        lookNum = int(lookNum.split('/')[-1])
        if lookNum >= 50 and lookNum <= 150:
            for r_user in root_user.xpath('//div[@class="grid-view"]/div'):
                movie_name = "".join(r_user.xpath('.//li[@class="title"]/a/em//text()'))
                movie_id = r_user.xpath('.//li[@class="title"]/a/@href')[0]
                movie_id = movie_id.split('/')[-2]
                self.movie_list.append(movie_name)
                self.movie_id_list.append(movie_id)
                print(f"========{movie_name}===========")

            # 翻页
            thispage = int(root_user.xpath('//span[@class="thispage"]/@data-total-page')[0])
            current_page = int(root_user.xpath('//span[@class="thispage"]/text()')[0])

            if thispage != current_page:
                self.get_user_movie(userid, current_user + 15)
        else:
            print(f"========该用户{userid}不符合条件=====数量{lookNum}======")

    # 短评
    def get_comments(self, current, id, moviename):
        postData = {
            'percent_type': '1',
            'start': current,
            'limit': '20',
            'status': 'P',
            'sort': 'new_score',
            'comments_only': '1'
        }
        url = f"https://movie.douban.com/subject/{id}/comments"
        html = requests.get(url, headers=self.headers, params=postData, verify=False)
        time.sleep(1)
        html_content = json.loads(html.text)['html']
        root = etree.HTML(html_content)
        count = 0
        for r in root.xpath('//div[@class="comment"]'):
            dict_data = dict()
            dict_data['用户名'] = r.xpath('.//span[@class="comment-info"]/a/text()')[0]
            dict_data['评论时间'] = r.xpath('.//span[@class="comment-time "]/@title')[0]
            dict_data['评论内容'] = r.xpath('.//span[@class="short"]/text()')[0]

            if count < 5:
                with open("评论内容.csv", "a+", encoding="utf-8-sig", newline="") as f:
                    csv_writer = csv.writer(f)
                    csv_writer.writerow([moviename, dict_data['用户名'], dict_data['评论时间'], dict_data['评论内容']])
                print("==========保存评论内容==========")
            else:
                break
            count += 1


    def add_user(self, userid, username):
        """
        添加用户
        :param userid: 用户id
        :param username: 用户名称
        :return:0:新增 1:已经存在
        """
        flag = 1
        if username not in self.user_list:
            self.user_list.append(username)
            with open(self.userfile, "a+", encoding="utf-8-sig", newline="") as f:
                csv_writer = csv.writer(f)
                csv_writer.writerow([userid, username])
            print(f"==========保存用户{username}==========")
            flag = 0

        return flag

    def clean_data(self):
        print("=======载入数据=========")
        df = pd.read_csv('./电影名称.csv', encoding="utf_8_sig")
        print("=======清洗数据=========")
        id_count = df['电影名称'].value_counts()
        id_df = pd.DataFrame(id_count)
        id_less_50 = id_df[(id_df['电影名称'] < 4) & (id_df['电影名称'] > 1)]
        # 将索引更改为列名
        id_less_50.reset_index(inplace=True)
        # 更改列名
        id_less_50.rename(columns={'index': '电影名称', '电影名称': '次数'}, inplace=True)
        inner_df = pd.merge(id_less_50, df, on=['电影名称'])
        inner_df.drop_duplicates(['电影名称'], keep='last', inplace=True)
        inner_df.reset_index(drop=True)
        inner_df.to_csv('./清洗后数据.csv', encoding="utf_8_sig")
        print("=======清洗数据完成=========")


if __name__ == '__main__':
    douban = DouBan_Get()

    # list_movie_id = ['24733428', '2609258', '30458949']
    # # 分别提取电影短评中的用户
    # for id in list_movie_id:
    #     douban.get_short_comments(id, 0)

    # 清洗数据保存电影名称
    douban.clean_data()

    # 根据清洗后的数据提取短评内容
    df_movie = pd.read_csv('./清洗后数据.csv', encoding="utf_8_sig")
    for index, row in df_movie.iterrows():
        print(f"获取评论{row[1]}")
        douban.get_comments(0, row[3], row[1])
