# /usr/bin/env python
# -*- coding: UTF-8 -*-
import requests
from bs4 import BeautifulSoup
from crawler import config
from selenium import webdriver
from crawler import Common as c
from logs.Logger import Logger
import file_md5.FilePrint as file_print
from crawler.db.ImageInfoDBOperator import DBOperator, ImageInfo
import time
import datetime
import os
import common.const as const
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service as ChromeService
from urllib.parse import quote, urlencode

path = config.generalConfig.toutiao_output_path
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
# 谷歌浏览器位置
# chrome_location = r'D:\Software\Chrome\chrome-win64_131\chrome-win64\chrome.exe'
# 用户数据目录
user_data_dir = r'C:\Users\Administrator\AppData\Local\Google\Chrome\User Data\Default'

# 日志初始化
log = Logger("D:/workspace/logs/crawler.log", level='info')

# 爬取类型
# 常规头条页面
const.TYPE1 = 1
# iXXX类页面，title不会显示“今日头条”的页面
const.TYPE2 = 2
# article中的img组件src下直接是图片
const.TYPE3 = 3
# 今日头条问题
const.TYPE4 = 4
# article中的img组件src下直接是图片
const.TYPE5 = 5


class ToutiaoCrawler:

    def __init__(self):
        user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.50 Safari/537.36'
        self.chrome_options = Options()
        self.chrome_options.add_argument('user-agent={0}'.format(user_agent))
        self.chrome_options.binary_location = r'D:\Software\Chrome\chrome-win64_131\chrome.exe' # Chrome可执行文件存放地址
        # 使用headless无界面浏览器模式
        self.chrome_options.add_argument('--headless')  # 增加无界面选项
        # 添加浏览器用户数据
        self.chrome_options.add_argument('--no-sandbox')  # 增加无界面选项
        self.chrome_options.add_argument('--disable-dev-shm-usage')  # 增加无界面选项
        self.chrome_options.add_argument('window-size=10,10')  # 增加无界面选项
        self.chrome_options.add_argument('--disable-gpu')  # 如果不加这个选项，有时定位会出现问题
        self.chrome_options.add_argument('blink-settings=imagesEnabled=false')
        self.chrome_options.add_argument(f'--user-data-dir={user_data_dir}') #用户数据，需要关闭已经运行的Chrome
        # self.driver = webdriver.Chrome("D:/Dev/006_Python/chromedriver-win64.V126/chromedriver.exe",
        #                                options=self.chrome_options)
        self.service = ChromeService(executable_path=r"D:\Software\Chrome\chromedriver-win64_131\chromedriver.exe")
        # 使用服务对象和选项对象初始化 Chrome WebDriver
        self.driver = webdriver.Chrome(service=self.service, options=self.chrome_options)

        self.download_links = []

    def crawl_images(self, url, type):
        if type == const.TYPE1:
            res = self.crawl_type1_images(url)
        elif type == const.TYPE3:
            res = self.crawl_type3_images(url)
        elif type == const.TYPE4:
            res = self.crawl_type4_images(url)
        elif type == const.TYPE5:
            res = self.crawl_type5_images(url)
        else:
            # 处理 ixxxxx类型的网站
            res = self.crawl_type2_images(url)
        return res


    def crawl_type1_images(self, url):
        # 启动浏览器，获取网页源代码
        self.driver.get(url)
        # 处理反扒机制 先睡一会儿，之后再获取当前的url，再刷新页面
        time.sleep(30)
        self.driver.get(self.driver.current_url)
        html = self.driver.page_source
        soup = BeautifulSoup(html, 'lxml')
        img_elements = soup.find_all('img', {'class': 'weitoutiao-img'})
        for image in img_elements:
            link = image.get('src')
            if type(link) != '''NoneType''':
                self.download_links.append("https:"+ link)
                # print("https:"+ link)

        return self.download_links


    '''
    处理 ixxxxx类型的网站
    '''
    def crawl_type2_images(self, url):
        # 启动浏览器，获取网页源代码
        self.driver.get(url)
        # 处理反扒机制 先睡一会儿，之后再获取当前的url，再刷新页面
        time.sleep(2)
        self.driver.get(self.driver.current_url)
        html = self.driver.page_source
        soup = BeautifulSoup(html, 'lxml').find('article',
                                                {'class':'syl-article-base syl-page-article tt-article-content syl-device-pc'})
        div_elements = soup.find_all('div', {'class': 'pgc-img'})
        for div in div_elements:
            img = div.find('img')
            link = img.get('data-src').replace('amp;', '')
            if type(link) != '''NoneType''':
                self.download_links.append(link)

        return self.download_links

    def crawl_type3_images(self, url):
        # 启动浏览器，获取网页源代码
        self.driver.get(url)
        # 处理反扒机制 先睡一会儿，之后再获取当前的url，再刷新页面
        time.sleep(2)
        self.driver.get(self.driver.current_url)
        html = self.driver.page_source
        soup = BeautifulSoup(html, 'lxml')
        div_elements = soup.find_all('article',
                                     {'class': 'syl-article-base tt-article-content syl-page-article syl-device-pc'})
        for div in div_elements:
            imgs = div.find_all('img')
            for img in  imgs:
                link = img.get('src')
                if type(link) != '''NoneType''' and link.startswith('http'):
                    self.download_links.append(link)

        return self.download_links

    def crawl_type4_images(self, url):
        # 启动浏览器，爬取头条中的问题列表
        self.driver.get(url)
        # 处理反扒机制 先睡一会儿，之后再获取当前的url，再刷新页面
        time.sleep(2)
        self.driver.get(self.driver.current_url)
        html = self.driver.page_source
        soup = BeautifulSoup(html, 'lxml')
        imgs = soup.find_all('img')
        for img in imgs:
            link = img.get('src')
            if type(link) != '''NoneType''':
                self.download_links.append(link)
        return self.download_links

    def crawl_type5_images(self, url):
        # 启动浏览器，获取网页源代码
        self.driver.get(url)
        # 处理反扒机制 先睡一会儿，之后再获取当前的url，再刷新页面
        time.sleep(2)
        self.driver.get(self.driver.current_url)
        html = self.driver.page_source
        soup = BeautifulSoup(html, 'lxml')
        div_elements = soup.find('article')
        imgs = div_elements.find_all('img')
        for img in imgs:
            link = img.get('src')
            if type(link) != '''NoneType''' and link.startswith('http'):
                self.download_links.append(link)
            else:
                self.download_links.append("https:" + link)


        return self.download_links

    def img_download(self, links, title, index, parent_url, prefix_str):
        i = 1
        db_session = DBOperator()
        for item in links:
            if item.startswith('http'):
                res = requests.get(item)
            else:
                continue
            log.logger.info(item)
            tmp_dir = "D:/data/Street/" + title + "/"

            # print(tmp_dir)
            log.logger.debug(tmp_dir)
            self.mkdir(tmp_dir)
            download_file_name = tmp_dir + prefix_str + str(index) + "_" + str(i).zfill(3) + ".jpg"
            with open(download_file_name, 'wb') as f:
                f.write(res.content)

            # 检验下载图片是否重复
            img_info = ImageInfo()
            img_info.url = item
            img_info.image_print = file_print.get_img_print(download_file_name)
            d_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
            img_info.crawled_time = d_time
            img_info.parent_url = parent_url
            # print(parent_url.find('?'))
            if parent_url.find('?') > 0:
                img_info.abbr_parent_url = parent_url[:parent_url.find('?')]
            else:
                img_info.abbr_parent_url = parent_url
            # print(img_info.abbr_parent_url)
            if img_info.image_print is not None:
                result = db_session.check_image_info(img_info)
                if len(result) > 0:
                    log.logger.warn(
                        img_info.parent_url + " with image_print: " + str(img_info.image_print) + "have duplicated!")
                    log.logger.warn("File Name is " + download_file_name)
                    # 验证重复后，删除文件
                    os.remove(download_file_name)
                    i = i + 1
                    continue
            db_session.add_image_info(img_info)
            i = i + 1

    def close(self):
        self.driver.close()
        self.driver.quit()

    def mkdir(self, path):
        # 引入模块
        import os

        # 去除首位空格
        path = path.strip()
        # 去除尾部 \ 符号
        path = path.rstrip("\\")

        # 判断路径是否存在
        # 存在     True
        # 不存在   False
        isExists = os.path.exists(path)

        # 判断结果
        if not isExists:
            # 如果不存在则创建目录
            # 创建目录操作函数
            os.makedirs(path)
            log.logger.info(path + ' 创建成功')
            return True
        else:
            # 如果目录存在则不创建，并提示目录已存在
            log.logger.info(path + ' 目录已存在')

            return False

    def check_404_page(self, url):
        # 启动浏览器，获取网页源代码
        self.driver.get(url)
        # 处理反扒机制 先睡一会儿，之后再获取当前的url，再刷新页面
        time.sleep(2)
        self.driver.get(self.driver.current_url)
        html = self.driver.page_source
        soup = BeautifulSoup(html, 'lxml')
        errors = soup.find('error-tips')
        if errors is not None:
            return False

        img = soup.find('error-img')
        if img is not None:
            log.logger.info("404")
            return True
        button = soup.find('button', {'class': 'back-home-btn'})
        if button is not None:
            log.logger.info("404")
            return True
        # src_str = img.get('src')fa
        # if src_str is None:
        #     return False
        # elif src_str.find("404.jpeg") != -1:
        #     log.logger.info(err_url)
        #     return True
        else:
            return False


def get_prefix():
    time_str = datetime.datetime.now().strftime('%Y%m%d%H%M%S_')
    return time_str


if __name__ == '__main__':
    # img_download("https://www.toutiao.com/w/i1690404357529604/", "美腿")
    # get_ac_nonce()
    prefix_str = get_prefix()
    urls = c.read_urls("toutiao")
    step = 0
    err_urls = []
    retry_urls = []
    conn_err_urls = []
    for url in urls:
        db_session = DBOperator()
        if url.find('?') > 0:
            parent_urls = db_session.check_abbr_parent_url(url[:url.find('?')])
        else:
            parent_urls = db_session.check_abbr_parent_url(url)
        if len(parent_urls) > 0:
            log.logger.warn(url + "..... has crawled!")
            continue
        log.logger.info(url)
        crawler = ToutiaoCrawler()

        res = crawler.crawl_images(url, const.TYPE1)
        if len(res) == 0:
            if crawler.check_404_page(url):
                err_urls.append(url)
            else:
                 retry_urls.append(url)
        try:
            crawler.img_download(res, "美腿2", step, url, prefix_str)
        except requests.exceptions.ConnectionError:
            conn_err_urls.append(url)
        crawler.close()
        step = step + 1

    log.logger.info('error_urls-------------------------------------------------------------------- start')
    for err_url in err_urls:
        log.logger.info(err_url)
    log.logger.info('error_urls-------------------------------------------------------------------- end')
    log.logger.info('retry_urls-------------------------------------------------------------------- start')
    for retry_url in retry_urls:
        log.logger.info(retry_url)
    log.logger.info('retry_urls-------------------------------------------------------------------- end')
    log.logger.info('conn_err_urls-------------------------------------------------------------------- start')
    for conn_err_url in conn_err_urls:
        log.logger.info(conn_err_url)
    log.logger.info('conn_err_urls-------------------------------------------------------------------- end')
    log.logger.info('finish!')

