import time
from time import sleep
import os
import shutil
import csv
import random

import requests
from bs4 import BeautifulSoup
from selenium import webdriver

import colorama
from colorama import Fore, Back

colorama.init()
time_step = 0.1


def insert_error(dic, key, *val):
    if key not in dic:
        dic[key] = []
    for v in val:
        dic[key].append(v)


def get_time() -> str:
    return time.strftime("%Y%m%d-%H%M%S", time.localtime())


def get_item_list(name):
    fp = open(os.path.join(data_pth, name), mode="r", encoding="utf-8")
    content = fp.readlines()
    fp.close()
    item_list = [i.rstrip() for i in content]
    return item_list


def header_x():
    # 随机获取一个headers
    user_agents = [
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
        "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2",
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
        "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 SE 2.X MetaSr 1.0",
    ]

    headers = {"User-Agent": random.choice(user_agents)}
    return headers


def driver_init(pth=None, file_pth=None):
    chr_cfg = webdriver.ChromeOptions()
    # chr_cfg.add_argument('--headless')
    # chr_cfg.add_argument('--disable-gpu')
    # chr_cfg.add_argument('log-level=3') #INFO = 0 WARNING = 1 LOG_ERROR = 2 LOG_FATAL = 3 default is 0
    chr_cfg.add_experimental_option("excludeSwitches", ["enable-logging"])
    if pth:
        driver = webdriver.Chrome(executable_path=pth, options=chr_cfg)
    else:
        driver = webdriver.Chrome(options=chr_cfg)
    driver.implicitly_wait(5)

    # 开始登录
    print("start to login")
    try:
        fp = open(file_pth, "r")
    except:
        username = input("username: ")
        passwd = input("passward: ")
    else:
        content = fp.readlines()
        username, passwd = [i.strip() for i in content]
    driver.get("https://www.douban.com/")
    iframe = driver.find_element_by_tag_name("iframe")
    driver.switch_to.frame(iframe)
    driver.find_element_by_class_name("account-tab-account").click()
    driver.find_element_by_id("username").send_keys(username)
    driver.find_element_by_id("password").send_keys(passwd)
    driver.find_element_by_class_name("btn-account").click()
    input("check over?")
    return driver


def get_page(url, headers=None, driver=None):
    if driver is None:
        res = requests.get(url, headers=headers)
        status_code = res.status_code
        soup = BeautifulSoup(res.text, features="lxml")
    else:
        driver.get(url)
        status_code = 0
        soup = BeautifulSoup(driver.page_source, features="lxml")
    return status_code, soup


def print_error(page_id, fail_list, type, error_path, error_info, status_code):
    if page_id in fail_list.keys():
        print(Fore.RED + "ERROR,", f"status code: {status_code}")
        print(Fore.BLACK + Back.WHITE, end="")
        for r in fail_list[page_id]:
            print(r)
        print(Fore.RESET + Back.RESET)
        with open(
            os.path.join(error_path, type + "-" + page_id + ".txt"),
            "w",
            encoding="utf-8",
        ) as f_err:
            f_err.write(error_info.prettify())


def search_movie(movie_list, output_path, error_path, driver=None):
    url = "https://movie.douban.com/subject/"
    web_head = header_x()
    fp = open(
        os.path.join(output_path, "movie.csv"),
        mode="w",
        encoding="utf-8",  # , newline=''
    )
    header = ["id", "名称", "基本信息", "剧情简介", "演职员表"]
    writer = csv.writer(fp)
    writer.writerow(header)

    print("movie.csv open!")
    fail_list = dict()
    for k, page_id in enumerate(movie_list):
        status_code, soup = get_page(url + page_id, headers=web_head, driver=driver)
        print(f"No.{k}, id:{page_id}, use_driver: {not driver is None}")

        ## 名称
        name = soup.find("title")
        if name:
            name = name.get_text().replace(" (豆瓣)", "")
        else:
            insert_error(fail_list, page_id, "name not find")

        ## 基本信息
        info = soup.find("div", id="info")
        if info is None:
            insert_error(fail_list, page_id, "info not find")
        else:
            info = info.get_text().replace(" ", "")

        ## 剧情简介
        intro = soup.find("div", id="link-report-intra", class_="indent")
        if intro is None:
            insert_error(fail_list, page_id, "intro not find")
        else:
            t = intro.find("span", class_="all hidden")
            if t is None:
                t = intro.find("span", class_="", property="v:summary")
            if t is None:
                intro = None
            else:
                intro = t.get_text().replace(" ", "")

        # 演职员表在另一个网页
        _, soup2 = get_page(
            url + page_id + "/celebrities", headers=web_head, driver=driver
        )
        celebrities_list = soup2.find_all("div", class_="info")
        if celebrities_list is None:
            insert_error(fail_list, page_id, "celebrities not find")
        celebrities = "".join([c.get_text() for c in celebrities_list]).replace(" ", "")

        print_error(page_id, fail_list, "movie", error_path, soup, status_code)
        writer.writerow([page_id, name, info, intro, celebrities])

        sleep(time_step)

    fp.close()
    shutil.copy(
        os.path.join(output_path, "movie.csv"),
        os.path.join(output_path, "movie-" + get_time() + ".csv"),
    )
    return fail_list


def get_intro(scoop, name):
    intro_title = scoop.find(text=name)
    if intro_title:
        intro_title = intro_title.parent.parent  # <h2> </h2>级别
        intro = intro_title.next_sibling
        while intro.find("div") == -1:  # <class='intro'>部分 TODO:有无其他处理办法
            intro = intro.next_sibling
        hidden = intro.find("span", class_="all hidden")
        if hidden:
            intro = hidden.get_text().replace(" ", "")
        else:
            intro = intro.find("div", class_="intro").get_text().replace(" ", "")
        return intro
    return None


def search_book(book_list, output_path, error_path, driver=None):
    url = "https://book.douban.com/subject/"
    web_head = header_x()

    fp = open(
        os.path.join(output_path, "book.csv"),
        mode="a",
        encoding="utf-8",  # , newline=''
    )
    header = ["id", "名称", "基本信息", "内容简介", "作者简介"]
    writer = csv.writer(fp)
    writer.writerow(header)

    print("\nbook.csv open!")
    fail_list = dict()
    for k, page_id in enumerate(book_list):
        status_code, soup = get_page(url + page_id, headers=web_head, driver=driver)
        print(f"No.{k}, id:{page_id}, use_driver: {not driver is None}")

        ## 名称
        name = soup.find("title")
        if name:
            name = name.get_text().replace(" (豆瓣)", "")
        else:
            insert_error(fail_list, page_id, "name not find")

        ## 基本信息
        info = soup.find("div", id="info")
        if info is None:
            insert_error(fail_list, page_id, "info not find")
        else:
            info = info.get_text().replace(" ", "")

        ## 判断内容简介和作者简介是否存在
        related_info = soup.find("div", class_="related_info")
        if related_info:
            intro = get_intro(related_info, "内容简介")
            if intro is None:
                insert_error(fail_list, page_id, "intro not find")

            author_intro = get_intro(related_info, "作者简介")
            if author_intro is None:
                insert_error(fail_list, page_id, "author intro not find")
        else:
            intro = author_intro = None
            insert_error(fail_list, page_id, "intro not find", "author intro not find")

        print_error(page_id, fail_list, "book", error_path, soup, status_code)
        writer.writerow([page_id, name, info, intro, author_intro])
        if driver is None:
            sleep(time_step)

    fp.close()
    shutil.copy(
        os.path.join(output_path, "book.csv"),
        os.path.join(output_path, "book-" + get_time() + ".csv"),
    )
    return fail_list


if __name__ == "__main__":
    pth = os.path.split(os.path.realpath(__file__))[0]
    data_pth = os.path.join(pth, "data")
    out_pth = os.path.join(pth, "output")
    err_pth = os.path.join(pth, "error", "error-" + get_time())
    account_file = os.path.join(data_pth, "account.txt")
    os.makedirs(out_pth, exist_ok=True)
    os.makedirs(err_pth, exist_ok=True)

    # chrome_pth = r'C:\Program Files\Google\Chrome\Application\chromedriver.exe'
    driver = driver_init(None, account_file)

    movie_list = get_item_list("Movie_id.txt")
    book_list = get_item_list("Book_id.txt")
    # movie_fail = search_movie(movie_list[0:5], out_pth,driver)
    # book_fail = search_book(book_list[0:5], out_pth,driver)
    # movie_fail = search_movie(movie_list, out_pth, err_pth, driver)
    book_fail = search_book(book_list[820:], out_pth, err_pth, driver)

    driver.close()

    fp_log = open(os.path.join(pth, "log.txt"), "w", encoding="utf-8")
    # print("\n\nmovie fail_list")
    # for k, v in movie_fail.items():
    #     print(f"page_id: {k}, fail reason: {v}")
    #     fp_log.write(k + "\n")
    # fp_log.write("\n")

    print("\n\nbook fail_list")
    for k, v in book_fail.items():
        print(f"page_id: {k}, fail reason: {v}")
        fp_log.write(k + "\n")
    fp_log.close()
