from crawler.downloader.ImageDownloader import ImageDownloader
from date_manage.date_manage import TimeHelper
from crawler import config
from selenium import webdriver
import time

step = 1
length = 3316
path = config.generalConfig.toutiao_output_path
# 谷歌浏览器位置
chrome_location = r'C:\Program Files\Google\Chrome\Application\chrome.exe'


class kuhuaceCrawler:
    def __init__(self):
        user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.50 Safari/537.36'
        self.chrome_options = webdriver.ChromeOptions()
        self.chrome_options.binary_location = chrome_location
        self.chrome_options.add_argument('user-agent={0}'.format(user_agent))
        # 使用headless无界面浏览器模式
        # self.chrome_options.add_argument('--headless')  # 增加无界面选项
        # 添加浏览器用户数据
        self.chrome_options.add_argument('--no-sandbox')  # 增加无界面选项
        self.chrome_options.add_argument('--disable-dev-shm-usage')  # 增加无界面选项
        self.chrome_options.add_argument('window-size=10,10')  # 增加无界面选项
        self.chrome_options.add_argument('--disable-gpu')  # 如果不加这个选项，有时定位会出现问题
        self.chrome_options.add_argument('blink-settings=imagesEnabled=false')
        # self.chrome_options.add_argument(f'--user-data-dir={user_data_dir}') #用户数据，需要关闭已经运行的Chrome
        self.driver = webdriver.Chrome("D:/Dev/006_Python/chromedriver-win64.V117/chromedriver.exe",
                                       options=self.chrome_options)

    def imageDownload(self, urls):
        step = 1
        downloader = ImageDownloader()
        time_helper = TimeHelper()
        for url in urls:
            title = str(step)
            downloader.image_downloader(url, title, path, time_helper.getCurrentTimeStr('%Y%m%d'), '.png')
            step = step + 1

    def get_url(self, url):
        # 启动浏览器，获取网页源代码
        self.driver.get(url)
        # 处理反扒机制 先睡一会儿，之后再获取当前的url，再刷新页面
        # time.sleep(0.5)
        current_url = self.driver.current_url
        print(current_url)
        # self.driver.quit()
        return current_url


if __name__ == '__main__':
    url_prefix = 'https://app.kuhuace.com/flipbook/page?id=9bee1d38-1849-11ef-9a10-0242ac120002&page='
    url_suffix = '&token=f813bb5e852e78ef0c4e50f8dbe9d86b'
    urls = []
    image_download = kuhuaceCrawler()
    i = 1
    # image_download.imageDownload(url_prefix,url_suffix)
    for i in range(1, length):
        url = image_download.get_url(url_prefix+str(i)+url_suffix)
        urls.append(url)
        i = i + 1
    print(urls)
    image_download.imageDownload(urls)
    print('Done!')
