import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service as ChromeService
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from urllib.parse import quote, urlencode
import time
import datetime
import os
import aiohttp
import asyncio
import logging
from logging.handlers import RotatingFileHandler
from crawler import config, Common as c
from logs.Logger import Logger
import file_md5.FilePrint as file_print
from crawler.db.ImageInfoDBOperator import DBOperator, ImageInfo

# Configuration
path = config.generalConfig.toutiao_output_path
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
user_data_dir = r'C:\Users\Administrator\AppData\Local\Google\Chrome\User Data\Default'

# Logging setup
log = Logger("D:/workspace/logs/crawler.log", level='info')


class ToutiaoCrawler:
    def __init__(self):
        self.chrome_options = Options()
        self.chrome_options.add_argument('--headless')
        self.chrome_options.add_argument('--no-sandbox')
        self.chrome_options.add_argument('--disable-dev-shm-usage')
        self.chrome_options.add_argument('window-size=10,10')
        self.chrome_options.add_argument('--disable-gpu')
        self.chrome_options.add_argument(f'--user-data-dir={user_data_dir}')
        self.service = ChromeService(executable_path=r"D:\Software\Chrome\chromedriver-win64_131\chromedriver.exe")
        self.driver = webdriver.Chrome(service=self.service, options=self.chrome_options)
        self.download_links = []

    async def fetch_page(self, url):
        self.driver.get(url)
        WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.TAG_NAME, 'body')))
        return self.driver.page_source

    def parse_images(self, html, img_class=None):
        soup = BeautifulSoup(html, 'lxml')
        return soup.find_all('img', {'class': img_class}) if img_class else soup.find_all('img')

    async def download_image(self, session, url, file_path):
        try:
            async with session.get(url) as response:
                if response.status == 200:
                    with open(file_path, 'wb') as f:
                        f.write(await response.read())
                    log.logger.info(f"Downloaded {url} to {file_path}")
        except Exception as e:
            log.logger.error(f"Failed to download {url}: {e}")

    async def download_images(self, links, title, index, parent_url, prefix_str):
        tmp_dir = os.path.join("D:/data/Street/", title)
        os.makedirs(tmp_dir, exist_ok=True)

        async with aiohttp.ClientSession() as session:
            tasks = []
            for i, item in enumerate(links, 1):
                if item.startswith('http'):
                    file_name = os.path.join(tmp_dir, f"{prefix_str}{index}_{str(i).zfill(3)}.jpg")
                    tasks.append(self.download_image(session, item, file_name))
            await asyncio.gather(*tasks)

    def close(self):
        self.driver.close()
        self.driver.quit()


# Main execution
if __name__ == '__main__':
    prefix_str = datetime.datetime.now().strftime('%Y%m%d%H%M%S_')
    urls = c.read_urls("toutiao")
    crawler = ToutiaoCrawler()

    for step, url in enumerate(urls):
        try:
            html = asyncio.run(crawler.fetch_page(url))
            images = crawler.parse_images(html, img_class='weitoutiao-img')
            links = ["https:" + img.get('src') for img in images if img.get('src')]
            asyncio.run(crawler.download_images(links, "美腿2", step, url, prefix_str))
        except Exception as e:
            log.logger.error(f"Error processing {url}: {e}")
        finally:
            crawler.close()
