from bs4 import BeautifulSoup
import requests
import warnings
import urllib3
import time
import json
import random
from bs4 import Tag
from selenium import webdriver
import time
from selenium.webdriver.edge.options import Options
import re

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)


class item:
    def __init__(self, description="", price="", picture=""):
        self.description = description
        self.label = self._extract_label(description)
        self.price = price
        self.picture = picture
    
    def _extract_label(self, text):
        """提取文本中的label，处理特殊字符逻辑"""
        if not text:
            return ""
            
        # 检查第一个字符是否不符合条件
        if re.match(r'[^a-zA-Z0-9\u4e00-\u9fff]', text[0]):
            # 查找第二个不符合条件的字符
            matches = list(re.finditer(r'[^a-zA-Z0-9\u4e00-\u9fff]', text))
            if len(matches) >= 2:
                return text[:matches[1].start()]
            else:
                return text  # 如果没有第二个不符合条件的字符，返回全文
        else:
            # 正常情况：查找第一个不符合条件的字符
            match = re.search(r'[^a-zA-Z0-9\u4e00-\u9fff]', text)
            if match:
                return text[:match.start()]
            else:
                return text
        



def get_item_of_xianyu():
    url = "https://www.goofish.com/"
        # 配置浏览器选项
    edge_options = Options()
    edge_options.add_argument("--window-size=1920,1080")
    edge_options.add_argument("--window-position=-32000,-32000")
    edge_options.add_argument("--start-minimized")  # 无头模式，完全在后台运行
    
    driver = webdriver.Edge(options=edge_options)
    driver.get(url)
    
    # 等待初始加载
    time.sleep(2)
    
    # 模拟滚动确保图片加载
    print("开始模拟滚动加载内容...")
    scroll_pause_time = 1  # 每次滚动后的等待时间
    screen_height = 500  # 获取屏幕高度
    scroll_count = 5  # 滚动次数
    
    for i in range(scroll_count):
        # 滚动一屏高度
        driver.execute_script(f"window.scrollTo(0, {screen_height * (i+1)});")
        print(f"已滚动 {i+1}/{scroll_count} 次...")
        time.sleep(scroll_pause_time)

    print("滚动加载完成，开始解析页面...")
    html = driver.page_source
    driver.quit()
    
    soup = BeautifulSoup(html, "lxml")
    items = []  # 存储item对象的列表
    
    a_tags = soup.find_all('a', class_='feeds-item-wrap--rGdH_KoF')
    
    for a_tag in a_tags:
        # 提取商品标题
        title_span = a_tag.find('span', class_='main-title--sMrtWSJa')
        label = title_span.get_text(strip=True) if title_span else "标题未找到"
        
        # 提取价格
        price_span = a_tag.find('span', class_='number--NKh1vXWM')
        price = price_span.get_text(strip=True) if price_span else "价格未找到"
        
        # 提取图片URL
        img_tag = a_tag.find('img', class_='feeds-image--TDRC4fV1')
        if img_tag:
            img_url = img_tag.get('src', '')
            # 补全图片URL协议
            if img_url.startswith('//'):
                img_url = 'https:' + img_url
        else:
            img_url = "图片未找到"
        
        # 创建item对象并添加到列表
        items.append(item(description=label, price=price, picture=img_url))
    for it in items:
        print(it.picture)
    return items





#get_url("https://www.clssn.com/") + get_url("https://www.jkb.com.cn/") + get_url("https://www.jkb.com.cn/")

# items = get_item_of_xianyu()
# for it in items:
#     print(it.picture)
#     print("--------------------------")
