# /usr/bin/env python
# -*- coding: UTF-8 -*-
import requests
from bs4 import BeautifulSoup
from crawler import config
from selenium import webdriver
from crawler import Common as c
from logs.Logger import Logger
import file_md5.FilePrint as file_print
from crawler.db.ImageInfoDBOperator import DBOperator, ImageInfo
import time
import datetime
import os
import common.const as const
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service as ChromeService

path = config.generalConfig.toutiao_output_path
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
# 谷歌浏览器位置
chrome_location = r'D:\\Software\\Chrome\\chrome-win64_131\\chrome.exe'
# 用户数据目录
user_data_dir = r'C:\Users\Administrator\AppData\Local\Google\Chrome\User Data\Default'
# 日志初始化
log = Logger("D:/logs/crawler.log", level='info')

# 爬取类型
# 常规头条页面
const.TYPE1 = 1
# iXXX类页面，title不会显示“今日头条”的页面
const.TYPE2 = 2
# article中的img组件src下直接是图片
const.TYPE3 = 3
# 今日头条问题
const.TYPE4 = 4


class WexinCrawler:

    def __init__(self):
        user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.50 Safari/537.36'
        self.chrome_options = Options()
        self.chrome_options.add_argument('-binary={0}'.format(chrome_location))
        self.chrome_options.add_argument('user-agent={0}'.format(user_agent))
        # 使用headless无界面浏览器模式
        # self.chrome_options.add_argument('--headless')  # 增加无界面选项
        # 添加浏览器用户数据
        self.chrome_options.add_argument('--no-sandbox')  # 增加无界面选项
        self.chrome_options.add_argument('--disable-dev-shm-usage')  # 增加无界面选项
        self.chrome_options.add_argument('window-size=10,10')  # 增加无界面选项
        self.chrome_options.add_argument('--disable-gpu')  # 如果不加这个选项，有时定位会出现问题
        self.chrome_options.add_argument('blink-settings=imagesEnabled=false')
        # self.chrome_options.add_argument(f'--user-data-dir={user_data_dir}') #用户数据，需要关闭已经运行的Chrome
        # self.driver = webdriver.Chrome("D:/Dev/006_Python/chromedriver-win64.V126/chromedriver.exe",
        #                                options=self.chrome_options)
        self.service = ChromeService(executable_path=r"D:\Software\Chrome\chromedriver-win64_131/chromedriver.exe")

        # 使用服务对象和选项对象初始化 Chrome WebDriver
        self.driver = webdriver.Chrome(service=self.service, options=self.chrome_options)

        self.download_links = []

    def crawl_images(self, url):
        # 启动浏览器，获取网页源代码
        self.driver.get(url)
        # 处理反扒机制 先睡一会儿，之后再获取当前的url，再刷新页面
        time.sleep(2)
        self.driver.get(self.driver.current_url)
        html = self.driver.page_source
        soup = BeautifulSoup(html, 'lxml')
        img_elements = soup.find_all('img')
        for image in img_elements:
            link = image.get('data-src')
            if type(link) != '''NoneType''' and link is not None:
                self.download_links.append(link)

        return self.download_links

    def img_download(self, links, title, index, parent_url, prefix_str):
        i = 1
        db_session = DBOperator()
        for item in links:
            if item.startswith('http'):
                res = requests.get(item)
            else:
                continue
            log.logger.info(item)
            tmp_dir = "D:/data/Street/" + title + "/"

            # print(tmp_dir)
            log.logger.debug(tmp_dir)
            self.mkdir(tmp_dir)
            download_file_name = tmp_dir + prefix_str + str(index) + "_" + str(i).zfill(3) + ".jpg"
            with open(download_file_name, 'wb') as f:
                f.write(res.content)

            # 检验下载图片是否重复
            img_info = ImageInfo()
            img_info.url = item
            img_info.image_print = file_print.get_img_print(download_file_name)
            d_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
            img_info.crawled_time = d_time
            img_info.parent_url = parent_url
            # print(parent_url.find('?'))
            if parent_url.find('?') > 0:
                img_info.abbr_parent_url = parent_url[:parent_url.find('?')]
            else:
                img_info.abbr_parent_url = parent_url
            # print(img_info.abbr_parent_url)
            if img_info.image_print is not None:
                result = db_session.check_image_info(img_info)
                if len(result) > 0:
                    log.logger.warn(
                        img_info.parent_url + " with image_print: " + str(img_info.image_print) + "have duplicated!")
                    log.logger.warn("File Name is " + download_file_name)
                    # 验证重复后，删除文件
                    os.remove(download_file_name)
                    i = i + 1
                    continue
            db_session.add_image_info(img_info)
            i = i + 1

    def close(self):
        self.driver.close()

    def mkdir(self, path):
        # 引入模块
        import os

        # 去除首位空格
        path = path.strip()
        # 去除尾部 \ 符号
        path = path.rstrip("\\")

        # 判断路径是否存在
        # 存在     True
        # 不存在   False
        isExists = os.path.exists(path)

        # 判断结果
        if not isExists:
            # 如果不存在则创建目录
            # 创建目录操作函数
            os.makedirs(path)
            log.logger.info(path + ' 创建成功')
            return True
        else:
            # 如果目录存在则不创建，并提示目录已存在
            log.logger.info(path + ' 目录已存在')

            return False

    def check_404_page(self, url):
        # 启动浏览器，获取网页源代码
        self.driver.get(url)
        # 处理反扒机制 先睡一会儿，之后再获取当前的url，再刷新页面
        time.sleep(2)
        self.driver.get(self.driver.current_url)
        html = self.driver.page_source
        soup = BeautifulSoup(html, 'lxml')
        img = soup.find('img')
        if img is None:
            return True
        src_str = img.get('src')
        if src_str is None:
            return False
        elif src_str.find("404.jpeg") != -1:
            return True
        else:
            return False


def get_prefix():
    time_str = datetime.datetime.now().strftime('%Y%m%d%H%M%S_')
    return time_str


if __name__ == '__main__':
    # img_download("https://www.toutiao.com/w/i1690404357529604/", "美腿")
    # get_ac_nonce()
    prefix_str = get_prefix()
    urls = c.read_urls('weixin')
    step = 0
    err_urls = []
    retry_urls = []
    conn_err_urls = []
    for url in urls:
        db_session = DBOperator()
        if url.find('?') > 0:
            parent_urls = db_session.check_abbr_parent_url(url[:url.find('?')])
        else:
            parent_urls = db_session.check_abbr_parent_url(url)
        if len(parent_urls) > 0:
            log.logger.warn(url + "..... has crawled!")
            continue
        log.logger.info(url)
        crawler = WexinCrawler()

        res = crawler.crawl_images(url)
        if len(res) == 0:
            if crawler.check_404_page(url):
                err_urls.append(url)
            else:
                 retry_urls.append(url)
        try:
            crawler.img_download(res, "技术", step, url, prefix_str)
        except requests.exceptions.ConnectionError:
            conn_err_urls.append(url)
        crawler.close()
        step = step + 1
    log.logger.info('error_urls-------------------------------------------------------------------- start')
    for err_url in err_urls:
        log.logger.info(err_url)
    log.logger.info('error_urls-------------------------------------------------------------------- end')
    log.logger.info('retry_urls-------------------------------------------------------------------- start')
    for retry_url in retry_urls:
        log.logger.info(retry_url)
    log.logger.info('retry_urls-------------------------------------------------------------------- end')
    log.logger.info('conn_err_urls-------------------------------------------------------------------- start')
    for conn_err_url in conn_err_urls:
        log.logger.info(conn_err_url)
    log.logger.info('conn_err_urls-------------------------------------------------------------------- end')
    log.logger.info('finish!')

