import re 
import hashlib
import time
from datetime import datetime

# 第三方库
from selenium.webdriver import Chrome
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from ..browser import switch_to_us, send_request

# 项目内库
from ..logger import logger

from ..db_conn_pool import conn_pool
from ..config import config
from ..models.task import Task
from .spider_base import SpiderBase



class ProductLinkSpider(SpiderBase):


    def find_product_links(self, driver):
        """查找页面上的商品链接"""
        product_elements = driver.find_elements(By.CSS_SELECTOR, 'div.zg-grid-general-faceout>span>div>a')
        new_links = []
        for element in product_elements:
            link = element.get_attribute('href')
            if link and link.startswith('https://www.amazon.com/'):  # 添加链接有效性检查
                new_links.append(link)
        return new_links

    def scroll_to_bottom(self, driver: Chrome, max_retries=10, ready_to_return: callable = None):
        """滚动页面到底部并等待加载完成"""
        scroll_start = time.time()
        last_height = driver.execute_script("return document.body.scrollHeight")
        retry_count = 0

        while retry_count < max_retries:
            driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
            time.sleep(3)
            new_height = driver.execute_script("return document.body.scrollHeight")
            if new_height == last_height:
                break
            last_height = new_height
            retry_count += 1

        WebDriverWait(driver, 600, poll_frequency=2).until(ready_to_return)
        scroll_time = time.time() - scroll_start
        logger.warning(f"页面滚动加载完成，耗时: {scroll_time:.2f}秒, 滚动次数: {retry_count}")
        return True
    
    @send_request
    @switch_to_us
    def crawl(self, url: str, driver: Chrome):

        def count_link(driver: Chrome) -> bool:
            """计算页面上的商品元素数量"""
            links = self.find_product_links(driver)
            cnt = len(links)
            logger.info(f"页面上的商品元素数量: {cnt}")
            return cnt == 50
            
        self.scroll_to_bottom(driver, ready_to_return=count_link)
        current_page_links = self.find_product_links(driver)
        if len(current_page_links) == 0:  # 检查是否有商品链接
            logger.warning(f"{url} 没有找到商品链接") 
        else:
            logger.info(f"{url} 抓取到 {len(current_page_links)} 个商品链接") 
        self.create_tasks(current_page_links)

    def create_tasks(self, product_links):
        """创建爬取商品详情页的任务"""
        conn = conn_pool.get_connection()
        try:
            for link in product_links:
                # 使用正则表达式提取商品ID（dp后的10位字母数字组合）
                match = re.search(r'/dp/([A-Z0-9]{10})', link)
                if match:
                    task_id = match.group(1)
                else:
                    task_id = hashlib.md5(link.encode('utf-8')).hexdigest()
                    logger.warning(f"无法从链接提取商品ID，使用哈希值作为task_id: {link}")

                logger.info(f"任务 {task_id} 已创建, 商品链接: {link}")
                Task.add_task(
                    conn,
                    task_id,
                    'SellerLinkSpider',
                    config.get_crawl_config().get('max_exec_times', 3),
                    datetime.now(),
                    link
                )
                logger.info(f"任务 {task_id} 已成功添加到数据库")
            conn.commit()  # 提交所有任务的插入操作，确保一次性提交所有任务，避免单个任务失败导致整体事务回滚
        except Exception as e:
            logger.error(f'添加任务 {task_id} 时数据库出错: {e}', exc_info=True)
            if conn:
                conn.rollback()
        finally:
            if conn:
                conn.close()


if __name__ == '__main__':
    spider = ProductLinkSpider()
    url = "https://www.amazon.com/Best-Sellers-Beauty-Personal-Care-Personal-Makeup-Mirrors/zgbs/beauty/3785121/ref=zg_bs_nav_beauty_3_3736391"
    spider.crawl(url)
    spider.close()