import random
import time

# curr_dir = os.path.dirname(os.path.abspath(__file__))
# sys.path.append(os.path.join(curr_dir, "."))
# sys.path.append(os.path.join(curr_dir, ".."))
# sys.path.append(os.path.join(curr_dir, "../datas"))
# sys.path.append(os.path.join(curr_dir, "../docs"))
# sys.path.append(os.path.join(curr_dir, "../scraping_code"))
# sys.path.append(os.path.join(curr_dir, "../edgedriver_win64"))

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options  # chrome 浏览器
# from selenium.webdriver.edge.options import Options   # edge 浏览器
from bs4 import BeautifulSoup


class TaobaoProductScraper:
    def __init__(self):
        # 设置Chrome浏览器选项
        chrome_options = Options()
        # chrome_options.add_argument('--headless')  # 无头模式，可选
        chrome_options.add_argument(r'--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36 Edg/140.0.0.0')
        chrome_options.add_argument(r'--accept=application/json')
        chrome_options.add_argument(r'--referer=https://www.taobao.com')
        chrome_options.add_argument('--disable-gpu')
        chrome_options.add_argument('--no-sandbox')
        chrome_options.add_argument('--disable-dev-shm-usage')
        chrome_options.add_experimental_option("excludeSwitches", ["enable-logging"])
        chrome_options.add_experimental_option("excludeSwitches", ['enable-automation'])
        # chrome_options.add_experimental_option("useAutomationExtension", False)
        self.driver = webdriver.Chrome(options=chrome_options)
        # stealth: 防止 navigator.webdriver = true
        script = '''
            Object.defineProperty(navigator, 'webdriver', {
                get: () => undefined
            })
            '''
        # 注入脚本
        try:
            self.driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {"source": script})
        except Exception:
            pass
        

        # # 设置Edge浏览器选项
        # edge_options = Options()
        # # edge_options.add_argument('--headless')  # 静默模式
        # edge_options.add_argument('--disable-extensions')
        # edge_options.add_argument('--disable-gpu')
        # edge_options.add_argument('--no-sandbox')
        # edge_options.add_argument('--disable-dev-shm-usage')
        # edge_options.add_argument("--disable-software-rasterizer")
        # edge_options.add_argument("--disable-blink-features=AutomationControlled")
        # edge_options.add_experimental_option("excludeSwitches", ["enable-logging"])
        # # SSL相关设置
        # edge_options.add_argument('--ignore-certificate-errors')
        # edge_options.add_argument('--allow-running-insecure-content')
        # edge_options.add_argument('--disable-web-security')
        # edge_options.add_argument('--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36 Edg/140.0.0.0')
        
        # 指定Edge WebDriver路径（如果不在PATH中）
        # service = Service(executable_path=r"E:\Project\ProductScraping\edgedriver_win64\msedgedriver.exe")
        # self.driver = webdriver.Edge(service=service, options=edge_options)
        self.wait = WebDriverWait(self.driver, 30)

        script = '''
            Object.defineProperty(navigator, 'webdriver', {
                get: () => undefined
            })
            '''
        self.driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {"source": script})

        self.base_url = "https://www.taobao.com"
        self.driver.get(self.base_url)

        self.set_login()  # TODO 可手动登录
    
    def search_products(self, keyword):
        """根据关键词搜索商品"""
        try:
            # print(f"正在搜索关键词: {keyword}")
            self.search_url = f"https://s.taobao.com/search?page=1&q={keyword}&tab=all"
            self.driver.get(self.search_url)
            
            _rt = random.randint(1, 3)
            time.sleep(_rt)
            # 等待商品节点而不是容器
            try:
                self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "#content_items_wrapper > div a")))
            except Exception:
                print("等待超时：商品节点未加载，可能被拦截或需要验证")

            # 获取商品
            product_elements = self.driver.find_elements(By.CSS_SELECTOR, "#content_items_wrapper > div")
            # print(f"抓到{keyword}商品数量:", len(product_elements))

            if product_elements:
                first_product_url = product_elements[0].find_element(By.XPATH, ".//a").get_attribute("href")
                # print(f"找到{keyword}商品链接: {first_product_url}")
                first_product_url = self.check_url(first_product_url)
                return first_product_url
            else:
                # print("未找到商品链接，保存截图调试")
                screenshot_path = f"taobao_debug_{int(time.time())}.png"
                self.driver.save_screenshot(screenshot_path)
                return None

        except Exception as e:
            print(f"搜索商品时出错: {str(e)}")
            screenshot_path = f"error_debug_{int(time.time())}.png"
            self.driver.save_screenshot(screenshot_path)
            print(f"异常时页面截图已保存: {screenshot_path}")
            return None
     
    def get_product_details(self, product_url):
        """获取商品详情信息"""
        try:
            # print(f"正在访问商品详情页: {product_url}")
            _rt = random.randint(1, 3)
            time.sleep(_rt)
            self.driver.get(product_url)
            
            # current_url = self.driver.current_url
            # parsed_url = urlparse(product_url)
            # base_url = f"{parsed_url.scheme}://{parsed_url.netloc}"
            
            # 获取页面源码
            page_source = self.driver.page_source
            soup = BeautifulSoup(page_source, 'html.parser')
            # 提取商品信息
            product_info = {}
            # 商品名称
            try:
                title_element = self.driver.find_element(By.CSS_SELECTOR, "#tbpcDetail_SkuPanelBody > div.block1--PlRzQppo > div > div > div.MainTitle--PiA4nmJz.f-els-2")
                product_info['title'] = title_element.text.strip()
            except:
                try:
                    title_element = self.driver.find_element(By.CSS_SELECTOR, ".tb-main-title")
                    product_info['title'] = title_element.text.strip()
                except:
                    product_info['title'] = "未找到商品名称"
            
            # 商品轮播图
            product_info['carousel_images'] = self.extract_carousel_images()
            # 商品详情和图片
            product_info['description_images'] = self.extract_description()
            
            return product_info
        except Exception as e:
            print(f"获取商品详情时出错: {str(e)}")
            return None
    
    def extract_carousel_images(self):
        carousel_images = []
        try:
            self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "#left-content-area > div > div > div.thumbnailsWrap--kljvw1fs")))

            selectors = self.driver.find_elements(By.CSS_SELECTOR, "#left-content-area > div > div > div.thumbnailsWrap--kljvw1fs > div.thumbnails--v976to2t > div.thumbnail--TxeB1sWz")
            for selector in selectors:
                try:
                    image_elements = selector.find_elements(By.CSS_SELECTOR, "div > img")
                    for img in image_elements:
                        src = img.get_attribute('src')
                        if src and src not in carousel_images:
                            carousel_images.append(src)
                except:
                    continue
        except Exception as e:
            print(e)
            print(f"提取轮播图时出错: {str(e)}")
        return carousel_images
    
    def extract_description(self):
        """提取商品详情和详情图片"""
        description_images = []
        try:
            # 滚动到页面底部加载依赖
            # self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")  # 直接滚动到底部
            self.slow_scroll_to_bottom(step=500)  # 模拟人为滚动
            """
            self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "#container > div.desc-root > div > div.descV8-singleImage")))
            # 尝试定位详情区域
            detail_contents = self.driver.find_elements(By.CSS_SELECTOR, "#container > div.desc-root > div > div.descV8-singleImage")

            for content in detail_contents:
                try:
                    # 提取详情图片
                    img_elements = content.find_elements(By.CSS_SELECTOR, "img")
                    for img in img_elements:
                        src = img.get_attribute('src')
                        if src and src not in description_images:
                            src = self.check_url(src)
                            description_images.append(src)
                except:
                    continue
            """
            self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "#container")))
            img_elements = self.driver.find_elements(By.CSS_SELECTOR, "#container div img")  # 通配选择器
            for img in img_elements:
                src = img.get_attribute('src')
                if src and src not in description_images:
                    src = self.check_url(src)
                    description_images.append(src)
        except Exception as e:
            print(f"提取商品详情时出错: {str(e)}")
        # # 删除掉最后一张价格说明的图片
        if description_images:
            description_images = description_images[:-2]
        return description_images

    def slow_scroll_to_bottom(self, step=300):
        """缓慢滚动到页面底部
        :param driver: selenium webdriver
        :param step: 每次滚动的像素
        :param delay: 每次滚动后的停顿秒数
        """
        last_height = self.driver.execute_script("return document.body.scrollHeight")
        current_position = 0
        while current_position < last_height:
            re_step = random.randint(step-50, step+50,)
            current_position += re_step
            self.driver.execute_script(f"window.scrollTo(0, {current_position});")
            _delay = random.uniform(0.3, 1.5)
            time.sleep(_delay)
            # last_height = self.driver.execute_script("return document.body.scrollHeight")  # 页面可能动态加载
        # 确保最后到达底部
        self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
    
    def set_login(self):
        """TODO 登录，手动登录"""
        try:
            time.sleep(120)
            cookies_list = self.driver.get_cookies()
        except Exception as e:
            print(f"加载Cookie文件失败: {e}")

    @staticmethod
    def check_url(str_url):
        str_url = str_url if ("https" in str_url) or ("http" in str_url) else f"https:{str_url}"
        return str_url

    def close(self):
        """关闭浏览器"""
        self.driver.quit()

