from excelhelper_xue import excelhelper_xue

import requests
from bs4 import BeautifulSoup
import time
import random
import re
from b2 import parse_category_cards
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Language': 'en-US,en;q=0.9',
    'Accept-Encoding': 'gzip, deflate, br',
    'Connection': 'keep-alive',
    'Upgrade-Insecure-Requests': '1'
}
def extract_sales_count(element):
    if not element:
        return None
    text = element.get_text(strip=True)
    return clean_sales_text(text)

def clean_sales_text(text):
    # 处理 "7K+ bought in past month" 格式
    multipliers = {'K': 1000, 'M': 1000000}
    match = re.search(r'([\d\.]+)([KMB]?)\+?', text)
    if match:
        number, unit = match.groups()
        return int(float(number) * multipliers.get(unit, 1))
    return None

def clean_number(element):
    if not element:
        return None
    return int(re.sub(r'[^\d]', '', element.get_text(strip=True)))

def clean_rating(element):
    if not element:
        return None
    text = element.get_text(strip=True)
    return float(re.search(r'(\d+\.\d+)', text).group(1))
INSERT_FIELDS_simple = [
        'category', 'parent_level', 'level',   'url', 'image_url', 'alt_text', 'currentpage','opdate'
    ]
class AmazonScraper_direction:
    def __init__(self):
        self.session = requests.Session()
    def get_page(self, url):
        try:
            response = self.session.get(
                url,
                headers=headers,
                timeout=30
            )
            if response.status_code == 200:
                return response.text
            return None
        except Exception as e:
            print(f"Error fetching page: {e}")
            return None
            
    def parse_category_page(self, html):
        if not html:
            return []
            
        soup = BeautifulSoup(html, 'html.parser')
        products = []
        
        # 查找所有产品元素
        product_elements = soup.find_all('div', {'data-component-type': 's-search-result'})
        
        for element in product_elements:
            try:
                product = {
                    'title': self._extract_title(element),
                    'price': self._extract_price(element),
                    'rating': self._extract_rating(element),
                    'reviews': self._extract_reviews(element),
                    'image_url': self._extract_image(element),
                    'product_url': self._extract_url(element)
                }
                products.append(product)
            except Exception as e:
                print(f"Error parsing product: {e}")
                continue
                
        return products
    
    def _extract_title(self, element):
        title_element = element.find('h2', {'class': 'a-size-mini'})
        return title_element.text.strip() if title_element else None
        
    def _extract_price(self, element):
        price_element = element.find('span', {'class': 'a-price'})
        return price_element.text.strip() if price_element else None
    

    def scrape_category(self, category_url, max_pages=10):
        all_products = []
        page = 1
        
        while page <= max_pages:
            # 构建分页URL
            if page > 1:
                url = f"{category_url}&page={page}"
            else:
                url = category_url
                
            # 随机延迟
            time.sleep(random.uniform(2, 5))
            
            html = self.get_page(url)
            if not html:
                break
                
            products = self.parse_category_page(html)
            if not products:
                break
                
            all_products.extend(products)
            page += 1
            
        return all_products
    



    def parse_grid_category_1(self,html):
        soup = BeautifulSoup(html, 'html.parser')
        
        # 查找所有网格列元素
        grid_elements = soup.find_all('div', class_='_Y29ud_bxcGridColumn_J5gfU')
        
        results = []
        for element in grid_elements:
            # 初始化每个元素的数据字典
            item_data = {
                'id': element.get('id', ''),
                'category': '',
                'alt_text': '',
                'url': '',
                'image_url': '',
                'parent_level':'0',
                'level':'1',

            }
            
            # 查找链接元素
            link = element.find('a')
            if link:
                item_data.update({
                    'url': link.get('href', ''),
                    'category': link.get('aria-label', '')
                })
                
                # 查找图片元素
                img = link.find('img')
                if img:
                    item_data.update({
                        'image_url': img.get('src', ''),
                        'alt_text': img.get('alt', '')
                    })
            
            results.append(item_data)
        return results

    def parse_grid_category_2(self,html):
        search_items = []
        blacklist_level1    =['Furniture','bed','vacuums','premium']
        parsed_items = self.parse_grid_category_1(html)
        for item in parsed_items:
            inblack=False
            if item['category']=='':
                continue
            if 'Living room' in item['category']:
                break
            for bs in blacklist_level1:
                if bs.lower() in item['category'].lower():
                    inblack=True
                    break
            if inblack:
                continue
            search_items.append(item)
            print("ID:", item['id'])
            print("Category:", item['category'])
            print("URL:", item['url'])
            print("Image URL:", item['image_url'])
            print("Alt Text:", item['alt_text']) 
            print("-" * 50)
            
        for index,cate_item in enumerate(search_items):
            if cate_item['id']=='0':
                continue
            url='https://www.amazon.com'    +   cate_item['url']
            html=amz.get_page(url)
            category_1=cate_item['category']

            cate_item['currentpage']=1
            results=parse_category_cards(html)
            for result in results:
                result['parent_level']=cate_item['id']
                result['level']='2'
                result['currentpage']=1  if result['currentpage']=='' else result['currentpage']
                print(f"Category: {result.get('name')}")
                category_2=result.get('name')
                print(f"URL: {result.get('url')}")
        
        all_results=[]
        all_results.extend(search_items)
        all_results.extend(results)
        dbhelper_xue_instance=excelhelper_xue()
        dbhelper_xue_instance.execute(insert_sql,all_results)       

amz=AmazonScraper_direction()
html = amz.get_page('https://www.amazon.com/b?node=1055398')
 
# 使用示例 

# 打印解析结果
search_items = []
blacklist_level1    =['Furniture','bed','vacuums','premium']
parsed_items=amz.parse_grid_category_1(html)
for item in parsed_items:
    inblack=False
    if item['category']=='':
        continue
    if 'Living room' in item['category']:
        break
    for bs in blacklist_level1:
        if bs.lower() in item['category'].lower():
            inblack=True
            break
    if inblack:
        continue
    search_items.append(item)
    print("ID:", item['id'])
    print("Category:", item['category'])
    print("URL:", item['url'])
    print("Image URL:", item['image_url'])
    print("Alt Text:", item['alt_text']) 
    print("-" * 50)
    
for index,cate_item in enumerate(search_items):
    if cate_item['id']=='0':
        continue
    url='https://www.amazon.com'    +   cate_item['url']
    html=amz.get_page(url)
    category_1=cate_item['category']

    cate_item['currentpage']=1
    results=parse_category_cards(html)
    for result in results:
        result['parent_level']=cate_item['id']
        result['level']='2'
        result['currentpage']=1  if result['currentpage']=='' else result['currentpage']
        print(f"Category: {result.get('name')}")
        category_2=result.get('name')
        print(f"URL: {result.get('url')}")
        response_2=amz.get_page(result.get('url'))
        page_count = 1       
        while True:
            soup = BeautifulSoup(response_2, 'html.parser')
            next_page = soup.select_one('a.s-pagination-next')
            print(page_count,next_page)
            product_divs = soup.select('div.a-section.a-spacing-base')
            products=[]
            for item in product_divs:
                product_link_tag = soup.find('a', class_='a-link-normal s-no-hover s-underline-text s-underline-link-text s-link-style a-text-normal')
                product_link ='https://www.amazon.com/' + product_link_tag['href'] if product_link_tag else None
                asin = product_link.split('/dp/')[1].split('/')[0] if product_link and '/dp/' in product_link else None
                number_text = soup.find('span', class_='a-size-base s-underline-text').text.replace(',','')
                if product_link and len(product_link) > 750:
                    product_link = product_link[:750]

                print("Product Link:", product_link)
                main_price = soup.find('span', class_='a-price-whole').text + soup.find('span', class_='a-price-fraction').text
                main_price = main_price.replace('$','').strip()
                sales_span = soup.find('span', class_='a-size-base a-color-secondary')

                sales_text = sales_span.text if sales_span else "0"
                sales_text=sales_text.replace(',','').strip()
                match = re.search(r'(\d+\.?\d*)([KMB])\+', sales_text)
                number=0
                if match:
                    number = float(match.group(1))
                    unit = match.group(2)
                    if unit == 'K':
                        number *= 1000
                    elif unit == 'M':
                        number *= 1000000
                    elif unit == 'B':
                        number *= 1000000000
 
                sales_info = extract_sales_count(item.select_one('div.a-row.a-size-base span.a-color-base:contains("bought")'))
                delivery_span = item.find('span', {'aria-label': lambda x: x and 'FREE delivery' in x and 'shipped by Amazon' in x})
                delivery_text = delivery_span.text if delivery_span else "文本未找到"
                part1 = 'FREE delivery'
                part2 = 'shipped by Amazon'
                if part1.lower() in delivery_text.lower() and part2.lower() in delivery_text.lower():
                    delivery_text = 'FREE delivery shipped by Amazon' 

                product_info ={
                    'product_id':asin,
                    'title': item.select_one('h2.a-size-base-plus span').get_text(strip=True),
                    'link': product_link,
                    'price': item.select_one('span.a-price .a-offscreen').get_text(strip=True).replace('$','').strip() if item.select_one('span.a-price') else None,
                    'rating': item.select_one('i.a-icon-star-small').get_text(strip=True).split()[0] if item.select_one('i.a-icon-star-small') else None,
                    'reviews_count': number_text,
                    'delivery_info': delivery_text,
                    'price': main_price,
                    'sales_info': number,
                    'currency': 'US',  # 货币单位
                    'seller': '',
                    'maincategory': category_1,  # 新增类别字段
                    'subcategory': category_2,
                    'in_stock': True,
                    'brand': ''  # 新增品牌字段
                }
                print(f"已解析产品：{product_info['title']}")
                products.append(product_info)
                if len(products) > 100:
                    # products写入数据库
                    #result写入数据库
                    products=[]
            # 如果当前页商品数量不足30则停止
            if len(product_divs) < 10:
                break
            if not next_page:
                break
            next_page_url = 'https://www.amazon.com' + next_page['href']
            response_2 = amz.get_page(next_page_url)
            page_count += 1
            print("***"*30)
        print(f"Image URL: {result.get('image_url')}")
        print("---"*30)

INSERT_FIELDS_simple = [
        'category', 'parent_level', 'level',  'url', 'image_url', 'alt_text', 'currentpage'
    ]     
named_params = [f' :{field} ' for field in INSERT_FIELDS_simple]

insert_sql = f"""
        INSERT INTO app_category_xue 
        ({', '.join(INSERT_FIELDS_simple)})
        VALUES ({', '.join(named_params)})
        ON DUPLICATE KEY UPDATE 
            category=VALUES(category),
            parent_level=VALUES(parent_level),
            level=VALUES(level),
            url=VALUES(url),
            image_url=VALUES(image_url),
            alt_text=VALUES(alt_text),
            currentpage=VALUES(currentpage),
            opdate=now() 
    """
  
 
