# /usr/bin/env python
# -*- coding: UTF-8 -*-
import requests
from bs4 import BeautifulSoup
from crawler import config
from selenium import webdriver
from crawler import Common as c
from logs.Logger import Logger
import file_md5.FilePrint as file_print
from crawler.db.ImageInfoDBOperator import DBOperator, ImageInfo
import time
import datetime
import os
import common.const as const

# 日志初始化
log_path = config.generalConfig.log_path
log = Logger(log_path + "/crawler.log", level='info')

class JiumanhuaCrawler:
    def __init__(self):
        # user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.50 Safari/537.36'
        # self.chrome_options = webdriver.ChromeOptions()
        # self.chrome_options.add_argument('user-agent={0}'.format(user_agent))
        # 使用headless无界面浏览器模式
        # self.chrome_options.add_argument('--headless')  # 增加无界面选项
        # self.chrome_options.add_argument('--no-sandbox')  # 增加无界面选项
        # self.chrome_options.add_argument('--disable-dev-shm-usage')  # 增加无界面选项
        # self.chrome_options.add_argument('window-size=10,10')  # 增加无界面选项
        # self.chrome_options.add_argument('--disable-gpu')  # 如果不加这个选项，有时定位会出现问题
        # self.chrome_options.add_argument('blink-settings=imagesEnabled=false')
        # self.driver = webdriver.Chrome("D:/Dev/006_Python/chromedriver_win32/chromedriver.exe",
        #                                options=self.chrome_options)
        self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
        self.download_links = []
        # self.download_images = []
        self.download_images_group = []

    def mkdir(self, path):
        # 引入模块
        import os

        # 去除首位空格
        path = path.strip()
        # 去除尾部 \ 符号
        path = path.rstrip("\\")

        # 判断路径是否存在
        # 存在     True
        # 不存在   False
        isExists = os.path.exists(path)

        # 判断结果
        if not isExists:
            # 如果不存在则创建目录
            # 创建目录操作函数
            os.makedirs(path)
            log.logger.info(path + ' 创建成功')
            return True
        else:
            # 如果目录存在则不创建，并提示目录已存在
            log.logger.info(path + ' 目录已存在')

            return False

    def crawlerLinks(self, url):
        source_code = requests.get(url, headers=self.headers)
        source_code.encoding = 'utf-8'
        plain_text = source_code.text
        soup = BeautifulSoup(plain_text, 'lxml')
        item_elements = soup.find_all('div', {'class': 'list'})
        a_list = item_elements[0].select("a")
        for i in a_list:
            link = i.get('href')
            self.download_links.append("https://www.jiumanhua.com" + link)

    def crawlerImageLinks(self):
        for i in self.download_links:
            source_code = requests.get(i, headers=self.headers)
            source_code.encoding = 'utf-8'
            print(i)
            plain_text = source_code.text
            soup = BeautifulSoup(plain_text, 'lxml')
            item_elements = soup.find_all('article', {'class': 'read-article'})
            if len(item_elements) > 0:
                img_list = item_elements[0].find_all("img", {'class': 'show-menu lazy'})
                # print(img_list)
                download_images = []
                for img in img_list:
                    img_link = img.get("data-original")
                    # print(img_link)
                    download_images.append(img_link)
                    # print(img)
                    # print("over")
                self.download_images_group.append(download_images)
            else:
                continue
        print(self.download_images_group)

    def crawlImages(self, title):
        # 中途中断在此处进行断点处理
        i = 1
        for group in self.download_images_group:
            index = 1
            # 中途中断在此处进行断点处理
            if i < 1:
                i = i + 1
                continue
            for img in group:
                if img.startswith('http'):
                    res = requests.get(img)
                else:
                    continue
                tmp_dir = "D:/data/Street/" + title + "/" + str(i).zfill(3) + "/"
                log.logger.debug(tmp_dir)
                self.mkdir(tmp_dir)
                download_file_name = tmp_dir + str(index).zfill(3) + ".jpg"
                with open(download_file_name, 'wb') as f:
                    f.write(res.content)
                index = index + 1
            print("第" + str(i) + "集 over")
            i = i + 1


if __name__ == '__main__':
    crawler = JiumanhuaCrawler()
    crawler.crawlerLinks('https://www.5983.org/comics/1266')
    crawler.crawlerImageLinks()
    crawler.crawlImages("啪啪啪调教所")
