# coding: utf-8
import csv
import os

import requests
from selenium import webdriver
import math
import time
import re

chrome_options = webdriver.ChromeOptions()
# 使用headless无界面浏览器模式
chrome_options.add_argument('--headless')  # 增加无界面选项
chrome_options.add_argument('--disable-gpu')  # 如果不加这个选项，有时定位会出现问题

# 启动浏览器，获取网页源代码
browser = webdriver.Chrome(executable_path=r'/home/chan/IdeaProjects/study_daily/spider/chromedriver',
                           chrome_options=chrome_options)

p_counts = 3  # 下载短评页数
r_counts = 3  # 下载影评页数

save_p_dir = '/media/chan/软件/LTLDataset/豆瓣影评/短评/'
save_reviews_dir = '/media/chan/软件/LTLDataset/豆瓣影评/reviews/'
jpeg_dir = '/media/chan/软件/LTLDataset/豆瓣影评/jpeg/'

if not os.path.exists(save_p_dir):
    os.makedirs(save_p_dir)
if not os.path.exists(save_reviews_dir):
    os.mkdir(save_reviews_dir)
if not os.path.exists(jpeg_dir):
    os.mkdir(jpeg_dir)


def validateTitle(title):
    # 去除非法字符
    rstr = r"[\/\\\:\*\?\"\<\>\|.， 。,]"  # '/ \ : * ? " < > |'
    new_title = re.sub(rstr, "_", title)  # 替换为下划线
    return new_title


def down_pic(img_url, img_name):
    """
    下载封面图片
    :param each:
    :return:
    """
    img_url = img_url.replace('&amp;', '&')
    try:
        pic = requests.get(img_url, timeout=10)
    except requests.exceptions.ConnectionError:
        print('【错误】当前图片无法下载')

    save_dir = os.path.join(jpeg_dir, img_name + '.jpeg')
    fp = open(save_dir, 'wb')
    fp.write(pic.content)
    fp.close()


def p(each, movie_title, introduction_title, brief_introduction):
    """
    下载短评
    :param each:
    :param movie_title:
    :param introduction_title:
    :param brief_introduction:
    :return:
    """
    # 短评 /comments?status=P
    with open(save_p_dir + movie_title + '.csv', 'w', encoding='utf-8-sig', newline='') as csf:
        writer = csv.writer(csf)
        writer.writerow([movie_title, introduction_title, brief_introduction])
        heads = ['评论员', '有用', '短评']
        writer.writerow(heads)  # 头
        p_count_web = browser.find_element_by_xpath('//*[@id="comments-section"]/div[1]/h2/span/a').text
        r_count_web = browser.find_element_by_xpath('//*[@id="reviews-wrapper"]/header/h2/span/a').text
        num = re.compile(r'\d+')  # 查找数字
        p_count_web = math.ceil(int(num.findall(p_count_web)[0]) / 20)  # 短评实际可用页数
        r_count_web = math.ceil(int(num.findall(r_count_web)[0]) / 20)  # 影评实际可用页数
        global p_counts
        global r_counts
        if p_count_web < p_counts:
            p_counts = p_count_web
        if r_count_web < r_counts:
            r_counts = r_count_web
        for i in range(p_counts):  # 短评多少页
            p_url = each.split('?')[0] + 'comments?start=' + str(i * 20) + '&limit=20&status=P&sort=new_score'
            if i == 0:
                p_url = each.split('?')[0] + 'comments?status=P&sort=new_score'
            browser.get(p_url)  # 短评详情页
            time.sleep(5)
            p_txt_s = browser.find_element_by_xpath('//*[@id="comments"]').text.split('\n')[:-1]
            for j in range(len(p_txt_s) // 3):
                reviewer = p_txt_s[j * 3 + 1].split('看过')[0].replace('*', '').replace('�', '')
                remark = p_txt_s[j * 3 + 2].replace('*', '').replace('�', '')
                w_txt = [reviewer, p_txt_s[j * 3].replace('有用', ''), remark]
                writer.writerow(w_txt)


def reviews(each, movie_title):
    """
    下载影评，长篇大论
    :param each:
    :param movie_title:
    :return:
    """
    # 影评 /reviews
    with open(save_reviews_dir + movie_title + '.csv', 'w', encoding='utf-8-sig', newline='') as csf:
        writer = csv.writer(csf)
        heads = ['评论员', '评论时间', '主题', '有用', 'bad', '影评']
        writer.writerow(heads)  # 头
        for i in range(r_counts):  # 评论多少页，每页20条
            r_url = each.split('?')[0] + 'reviews?start=' + str(i * 20)
            print("影评：", r_url)
            browser.get(r_url)  # 影评详情页
            time.sleep(10)
            page_source = browser.page_source
            pat = r'toggle-(.*?)-copy'
            open_ids = re.findall(pat, page_source, flags=re.S)
            error_open_count = 0
            for open_id in open_ids:  # 模拟点击展开20次
                try:
                    browser.find_element_by_id('toggle-' + open_id + '-copy').click()  # 点击
                    time.sleep(8)
                except:
                    error_open_count += 1
                    print(error_open_count, 'toggle-' + open_id + '-copy', '不可展开', )
                    pass
            for j in range(1, len(open_ids) + 1 - error_open_count):  # 每页20条
                remarks = browser.find_element_by_xpath(
                    '//*[@id="content"]/div/div[1]/div[1]/div[' + str(j) + ']').text.split('\n')
                reviewer = remarks[0].split()[0]  # 评论员
                remark_time = remarks[0].split()[1]  # 评论时间
                theam = remarks[1]  # 主题
                remark = '\n'.join(remarks[2:-5])  # 影评
                goodorbad = remarks[-4]
                if '有用' not in goodorbad or '没用' not in goodorbad:
                    goodorbad = remarks[-3]
                good = goodorbad.split()[1]  # 影评有用选择人数
                bad = goodorbad.split()[3]  # 影评没用选择人数
                w_txt = [reviewer, remark_time, theam, good, bad, remark]
                writer.writerow(w_txt)


def get_page_urls(main_url):
    """
    获取首页中电影地址
    :param main_url:
    :return:
    """
    browser.get(main_url)  ##主页
    time.sleep(15)
    for _ in range(5):
        browser.find_element_by_xpath('//*[@id="content"]/div/div[1]/div/div[4]/a').click()  # 点击
        time.sleep(5)
    page_source = browser.page_source
    pat = r'https://movie.douban.com/subject/.*?"'
    movie_url = re.findall(pat, page_source)  # , flags=re.S)
    print(len(movie_url))
    get_movie(movie_url)


def get_movie(movie_url):
    """
    进入电影详情页，获取影评等信息
    :param movie_url:
    :return:
    """
    for i, each in enumerate(movie_url):
        if i < 20:  # 二次及以后下载
            continue
        print(i, end=' ')
        each = each.replace('&amp;', '&').replace('"', '')
        browser.get(each)  # 影视详情页
        time.sleep(5)

        movie_title = browser.find_element_by_xpath('//*[@id="content"]/h1/span[1]').text
        movie_title = validateTitle(movie_title)  # 规范名称
        try:
            img_url = browser.find_element_by_xpath('//*[@id="mainpic"]/a/img').get_attribute("src")
            down_pic(img_url, movie_title)  # 下载封面
            print("有封面", end=' ')
        except:
            print("无封面", end=' ')

        try:
            # 简介标题
            introduction_title = browser.find_element_by_xpath('//*[@id="content"]/div[2]/div[1]/div[3]/h2').text
            # 简介内容
            brief_introduction = browser.find_element_by_xpath('//*[@id="link-report"]/span').text
        except:
            introduction_title = '无简介'
            brief_introduction = '无简介'

        print('开始下载：', movie_title)
        # p(each, movie_title, introduction_title, brief_introduction)
        reviews(each, movie_title)


if __name__ == "__main__":
    # 豆瓣按评价排序URL
    main_url = r'https://movie.douban.com/explore#!type=movie&tag=%E7%83%AD%E9%97%A8&sort=rank&page_limit=20&page_start=0'
    get_page_urls(main_url)
