import os
import time
import urllib.request

import re
from selenium import webdriver
from selenium.webdriver.chrome.options import Options

from database_tool.storage_resources import DatabaseConnector


class ResourceLoader:
    def __init__(self):
        self.driver = None
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36"}
        self.path = "../resource/"
        self.original_url = "http://www.sixmh7.com"
        self.url = r"http://www.sixmh7.com/rank/1-1.html"
        self.chrome_options = Options()
        self.chrome_options.add_argument('--headless')
        self.chrome_options.add_argument('--disable-gpu')

    def close(self):
        self.driver.close()

    # 获取漫画的目录
    def get_comics_info(self):
        for page in range(1, 3):
            self.url = r"http://www.sixmh7.com/rank/1-{}.html".format(page)
            self.driver = webdriver.Chrome(chrome_options=self.chrome_options)
            self.driver.get(self.url)
            element = self.driver.find_element_by_xpath('//div[@class="cy_list_mh"]')
            elements = element.find_elements_by_tag_name("ul")
            page_id = []
            for item in elements:
                # 封面图片
                comics_url = item.find_element_by_xpath('.//li//a[@class="pic"]').get_attribute("href")
                page_id.append(comics_url)
                print(comics_url)
                time.sleep(1)
            for i in page_id:
                self.comics_info(i)
                time.sleep(1)
            self.close()

    def comics_info(self, comics_url):
        self.driver.get(comics_url)
        # 漫画名字
        comics_name = self.driver.find_element_by_xpath('//div[@class="cy_intro_l"]//div[@class="cy_title"]//h1').text
        comics_name = comics_name.strip(" ")
        spans_list = self.driver.find_elements_by_xpath('//div[@class="cy_intro_l"]//div[@class="cy_xinxi"]//span')
        # 作者
        author = spans_list[0].text
        author = author.replace('作者：', "").strip(" ")
        # 类别
        category = spans_list[2].text
        category = category.replace('类别：', "").strip(" ")
        # 漫画标签
        comics_label = spans_list[3].text
        comics_label = comics_label.replace('标签：', "").strip(" ")
        # 内容
        reduce = self.driver.find_element_by_xpath('//div[@class="cy_intro_l"]//div[@class="cy_xinxi cy_desc"]//p').text
        # 封面url
        cover_url = self.driver.find_element_by_xpath(
            '//div[@class="cy_intro_l"]//div[@class="cy_info_cover"]//img').get_attribute("src")
        # 封面路径
        cover_path = "../resource/cover/" + str(int(time.time())) + ".jpg"
        print(comics_url, comics_name, author, category, comics_label, reduce, cover_url, cover_path)
        time.sleep(1)
        dc = DatabaseConnector()
        dc.insert_comics_info(comics_name, comics_label, category, author, reduce, cover_path, comics_url, cover_url)
        time.sleep(1)
        self.download_img(cover_url, cover_path)


    # 下载图片
    def download_img(self, cover_url, cover_path):
        time.sleep(1)
        try:
            with open(cover_path, "wb") as f:
                req = urllib.request.Request(cover_url, headers=self.headers)
                resp = urllib.request.urlopen(req, timeout=10)
                data = resp.read()
                f.write(data)
                print("图片{}下载成功".format(cover_path))
                resp.close()
                time.sleep(1)
        except:
            time.sleep(1)
            return

    # 爬取漫画章节
    def crawl_comic_chapters(self):
        dc = DatabaseConnector()  # 建立数据库连接
        comic_id_urls = dc.get_comics_id_url()[0:7]
        self.driver = webdriver.Chrome(chrome_options=self.chrome_options)
        for comic_id_url in comic_id_urls:
            comic_id, comic_url = comic_id_url
            self.driver.get(comic_url)
            li_elements = self.driver.find_elements_by_xpath(
                '//div[@class="cy_plist"]//ul[@id="mh-chapter-list-ol-0"]//li')
            print(len(li_elements))
            for chapters_num, element in enumerate(li_elements):

                # 章数url
                chapters_url = element.find_element_by_xpath(".//a").get_attribute("href")
                chapters_url = chapters_url.strip(" ")

                # 章数名称
                chapters_name = element.find_element_by_xpath(".//a//p").text
                chapters_name = chapters_name.strip(" ")
                # 取出章节书数字

                # 创建文件夹
                folder_name = comic_url.split("/")[3].strip(" ")  # 文件夹名字
                folder_path = "../resource/" + folder_name + "/" + str(chapters_num)  # 文件夹路径、
                if not os.path.exists(folder_path):
                    os.makedirs(folder_path)
                    print("创建{}文件夹成功".format(folder_path))
                dc.insert_comics_chapter(comic_id, chapters_name, chapters_url, folder_path)
                print("章节信息:", chapters_url, chapters_name)
                self.download_chapters_img(chapters_url, folder_path)  # 下载章节名称
                time.sleep(1)
        self.close()

    # 下载章节图片图片
    def download_chapters_img(self, chapters_url, folder_path):
        self.driver1 = webdriver.Chrome(chrome_options=self.chrome_options)
        self.driver1.get(chapters_url)
        time.sleep(1)
        page = self.driver1.page_source  # 返回源码
        imag = re.compile(r'eval\(function\(.*?,\'(?P<name>.*?)\'\.split', re.S)
        img_url = re.search(imag, page).group("name")
        img_url_list = img_url.split("|")[10:]
        for i, url in enumerate(img_url_list):
            try:
                if i < 5 and len(url) == 32:  # 取五张
                    img_url = 'https://p3.dcarimg.com/tos-cn-i-8gu37r9deh/{}~noop.jpg'.format(url)
                    reps = urllib.request.Request(img_url, headers=self.headers)
                    img_path = folder_path + "/" + str(i) + '.jpg'  # 图片名字
                    with urllib.request.urlopen(reps, timeout=10) as rep:
                        with open(img_path, 'wb') as f:
                            data = rep.read()
                            f.write(data)
                            print("{}下载成功".format(img_path))
                        time.sleep(1)
                else:
                    time.sleep(1)
                    self.driver1.close()
                    return
            except Exception as e:
                time.sleep(2)
                continue
        self.driver1.close()


if __name__ == '__main__':
    r = ResourceLoader()
    # r.get_comics_info()
    r.crawl_comic_chapters()
