import requests
import urllib3
from lxml import html
import json
import csv
from urllib.parse import urlparse

# 禁用 InsecureRequestWarning 警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)


class ProductSpider:
    def __init__(self, base_url, max_products=None, extract_image=True, extract_description=True, extract_price=True,
                 save_format='json', logger=None):
        self.base_url = base_url
        self.page = 1
        self.all_products = []
        self.max_products = max_products  # 设置商品最大数量限制
        self.extract_image = extract_image  # 控制是否提取商品图片
        self.extract_description = extract_description  # 控制是否提取商品描述
        self.extract_price = extract_price  # 控制是否提取商品价格
        self.save_format = save_format  # 设置保存方式（json 或 csv）
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
        }
        self.logger = logger  # 日志处理器

    def log(self, message):
        if self.logger:
            self.logger(message)
        print(message)

    def get_filename_from_url(self):
        # 从 URL 中解析出路径
        path = urlparse(self.base_url).path
        # 获取路径的最后一个部分作为文件名
        filename = path.strip('/').split('/')[-1]
        return filename

    # 获取指定页面的内容
    def fetch_page(self, page):
        url = f"{self.base_url}?page={page}"
        response = requests.get(url, headers=self.headers, verify=False)
        if response.status_code == 200:
            return html.fromstring(response.content)
        else:
            self.log(f"获取数据失败。状态码: {response.status_code}")
            return None

    # 解析页面内容以找到商品盒子
    def parse_boxes(self, tree):
        boxes = tree.xpath("//ul[@class='S4WbK_ uQ5Uah c2Zj9x H1ux6p']//li[*]")
        if len(boxes) == 0:
            boxes = tree.xpath("//ul[@class='S4WbK_ uQ5Uah c2Zj9x']//li[*]")
        return boxes

    # 从单个商品盒子中提取商品数据
    def extract_product_data(self, box):
        product = {}
        if self.extract_image:
            image = box.xpath(".//descendant::img[1]/@src")
            product["image"] = (image[0] if image else "未找到图片").replace(',blur_2', '').replace('w_147,h_147',
                                                                                                        'w_208,h_208')

        if self.extract_description:
            description = box.xpath(
                ".//h3[@class='syLHluk oK3k4Zk---typography-11-runningText oK3k4Zk---priority-7-primary syHtuvM FzO_a9']/text()")
            product["description"] = description[0] if description else "未找到描述"

        if self.extract_price:
            price = box.xpath(".//span[@class='cfpn1d']/text()")
            product["price"] = price[0] if price else "未找到价格"

        return product

    # 将所有商品数据保存为 JSON 文件
    def save_products_to_json(self, filename):
        with open(filename, 'w', encoding='utf-8') as json_file:
            json.dump(self.all_products, json_file, ensure_ascii=False, indent=4)
        self.log(f"所有商品数据已保存到 {filename}")

    # 将所有商品数据保存为 CSV 文件
    def save_products_to_csv(self, filename):
        keys = self.all_products[0].keys()
        with open(filename, 'w', newline='', encoding='utf-8') as csv_file:
            dict_writer = csv.DictWriter(csv_file, fieldnames=keys)
            dict_writer.writeheader()
            dict_writer.writerows(self.all_products)
        self.log(f"所有商品数据已保存到 {filename}")

    # 根据保存格式选择保存方式
    def save_products(self, filename):
        if self.save_format == 'json':
            self.save_products_to_json(f"{filename}.json")
        elif self.save_format == 'csv':
            self.save_products_to_csv(f"{filename}.csv")
        else:
            self.log(f"不支持的保存格式: {self.save_format}")

    # 主运行方法，负责循环获取和处理页面内容
    def run(self):
        while True:
            tree = self.fetch_page(self.page)
            if tree is None:
                break

            boxes = self.parse_boxes(tree)
            self.log(f"第 {self.page} 页找到 {len(boxes)} 个商品")
            if not boxes:
                self.log("未找到更多商品，停止。")
                break

            for box in boxes:
                if self.max_products and len(self.all_products) >= self.max_products:
                    self.log(f"已达到商品数量限制 {self.max_products}，停止。")
                    filename = self.get_filename_from_url()
                    self.save_products(filename)
                    return

                product = self.extract_product_data(box)
                self.all_products.append(product)

            self.page += 1

        filename = self.get_filename_from_url()
        self.save_products(filename)


if __name__ == "__main__":
    base_url = "https://www.blueoco.com/health-beauty/shower-caps"
    max_products = 100  # 设置要抓取的最大商品数量
    save_format = 'json'  # 设置保存方式，可以是 'json' 或 'csv'
    spider = ProductSpider(base_url, max_products, extract_image=True, extract_description=True, extract_price=True,
                           save_format=save_format)
    spider.run()
