"""工具函数模块"""

import re
import json
import csv
from typing import List, Dict, Any, Optional
from urllib.parse import urljoin, urlparse
from bs4 import BeautifulSoup
from crawler.models import ProductInfo, SearchResult


def clean_text(text: str) -> str:
    """清理文本内容"""
    if not text:
        return ""
    
    # 移除多余的空白字符
    text = re.sub(r'\s+', ' ', text.strip())
    
    # 移除特殊字符
    text = re.sub(r'[\r\n\t]', '', text)
    
    return text


def extract_price(price_text: str) -> str:
    """提取价格信息"""
    if not price_text:
        return ""
    
    # 提取数字和小数点
    price_match = re.search(r'[\d,]+\.?\d*', price_text)
    if price_match:
        return price_match.group()
    
    return price_text


def extract_sales(sales_text: str) -> str:
    """提取销量信息"""
    if not sales_text:
        return ""
    
    # 提取销量数字
    sales_match = re.search(r'(\d+(?:,\d+)*)', sales_text)
    if sales_match:
        return sales_match.group(1)
    
    return sales_text


def is_guangzhou_location(location: str) -> bool:
    """判断是否为广州发货"""
    if not location:
        return False
    
    guangzhou_keywords = ['广州', '广东广州', '广东省广州', '穗']
    location_clean = clean_text(location).lower()
    
    return any(keyword in location_clean for keyword in guangzhou_keywords)


def validate_url(url: str, base_url: str = "https://www.1688.com") -> str:
    """验证和修复URL"""
    if not url:
        return ""
    
    # 如果是相对URL，转换为绝对URL
    if url.startswith('//'):
        url = 'https:' + url
    elif url.startswith('/'):
        url = urljoin(base_url, url)
    
    return url


def parse_product_element(element, base_url: str = "https://www.1688.com") -> Optional[ProductInfo]:
    """解析商品元素"""
    try:
        # 提取商品标题
        title_elem = element.find('a', class_='title') or element.find('.title a')
        title = clean_text(title_elem.get_text()) if title_elem else ""
        
        # 提取商品链接
        product_url = ""
        if title_elem and title_elem.get('href'):
            product_url = validate_url(title_elem.get('href'), base_url)
        
        # 提取价格
        price_elem = element.find('.price') or element.find('[class*="price"]')
        price = extract_price(price_elem.get_text()) if price_elem else ""
        
        # 提取销量
        sales_elem = element.find('.sales') or element.find('[class*="sales"]')
        sales = extract_sales(sales_elem.get_text()) if sales_elem else ""
        
        # 提取发货地
        location_elem = element.find('.location') or element.find('[class*="location"]')
        location = clean_text(location_elem.get_text()) if location_elem else ""
        
        # 提取供应商
        supplier_elem = element.find('.supplier') or element.find('[class*="supplier"]')
        supplier = clean_text(supplier_elem.get_text()) if supplier_elem else ""
        
        # 提取图片
        img_elem = element.find('img')
        image_url = validate_url(img_elem.get('src', '')) if img_elem else ""
        
        # 提取最小起订量
        min_order_elem = element.find('.min-order') or element.find('[class*="min"]')
        min_order = clean_text(min_order_elem.get_text()) if min_order_elem else ""
        
        if not title or not product_url:
            return None
        
        return ProductInfo(
            title=title,
            price=price,
            min_order=min_order,
            sales=sales,
            location=location,
            supplier=supplier,
            product_url=product_url,
            image_url=image_url
        )
    
    except Exception as e:
        print(f"解析商品元素时出错: {e}")
        return None


def save_to_json(data: SearchResult, filename: str) -> None:
    """保存数据到JSON文件"""
    try:
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(data.dict(), f, ensure_ascii=False, indent=2)
        print(f"数据已保存到 {filename}")
    except Exception as e:
        print(f"保存JSON文件时出错: {e}")


def save_to_csv(products: List[ProductInfo], filename: str) -> None:
    """保存数据到CSV文件"""
    try:
        with open(filename, 'w', newline='', encoding='utf-8-sig') as f:
            if not products:
                return
            
            fieldnames = list(products[0].dict().keys())
            writer = csv.DictWriter(f, fieldnames=fieldnames)
            
            writer.writeheader()
            for product in products:
                writer.writerow(product.dict())
        
        print(f"数据已保存到 {filename}")
    except Exception as e:
        print(f"保存CSV文件时出错: {e}")


def filter_guangzhou_products(products: List[ProductInfo]) -> List[ProductInfo]:
    """筛选广州发货的商品"""
    return [product for product in products if is_guangzhou_location(product.location)]


def sort_by_sales(products: List[ProductInfo], reverse: bool = True) -> List[ProductInfo]:
    """按销量排序"""
    def get_sales_number(product: ProductInfo) -> int:
        try:
            # 提取销量数字
            sales_text = product.sales.replace(',', '')
            sales_match = re.search(r'\d+', sales_text)
            return int(sales_match.group()) if sales_match else 0
        except:
            return 0
    
    return sorted(products, key=get_sales_number, reverse=reverse)


def print_product_summary(products: List[ProductInfo]) -> None:
    """打印商品摘要信息"""
    if not products:
        print("没有找到商品")
        return
    
    print(f"\n找到 {len(products)} 个商品:")
    print("-" * 80)
    
    for i, product in enumerate(products[:10], 1):  # 只显示前10个
        print(f"{i}. {product.title[:50]}...")
        print(f"   价格: {product.price} | 销量: {product.sales} | 发货地: {product.location}")
        print(f"   链接: {product.product_url}")
        print("-" * 80)