#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright: (c)  : @Time 2025/3/28 21  @Author  : hjl
# @Site    : 
# @File    : main.py
# @Project: alibaba_spider
# @Software: PyCharm
# @Desc    :
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)

__metaclass__ = type

import re
from datetime import datetime
import requests
from lxml import etree
import os
import time
import vault as c
from log import DIYLog
import xlwt
import random
from concurrent.futures import ThreadPoolExecutor, as_completed

data_list = []

log = DIYLog()
ua_list = [
    "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36",
    'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0',
    'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; InfoPath.3)',
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36"
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36",
]

headers = {
    "origin": "https://ilogistics.aliexpress.com",
    "referer": "zhttps://ilogistics.aliexpress.com/recommendation_engine_public.htm",
    "sec-fetch-mode": "zcors",
    "sec-fetch-site": "zsame-origin",
    "content-type": "application/x-www-form-urlencoded; charset=UTF-8",
    "accept": "application/json, text/javascript, */*; q=0.01",
    "accept-encoding": "gzip, deflate, br",
    "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
    "User-Agent": random.choice(ua_list)
}


class Spider(object):
    """
    爬虫基类
    """

    def __init__(self, url, headers, proxy_pools=None):
        self.url = url
        self.headers = headers
        self.proxy_pools = proxy_pools
        self.min_delay = 1  # 最小延迟(秒)
        self.max_delay = 3  # 最大延迟(秒)

    def random_delay(self):
        """随机延迟控制"""
        delay = random.uniform(self.min_delay, self.max_delay)
        time.sleep(delay)

    def __str__(self):
        return "爬虫基类"

    def get_html(self, url=None):
        try:
            self.random_delay()  # 添加随机延迟
            if not url:
                url = self.url
            response = requests.get(url, headers=self.headers, proxies=self.proxy_pools)
            html = response.text
        except Exception as e:
            log.error("请求错误", e)
            return None
        return html


class Xapth_select(Spider):
    """
    使用Xpath进行解析
    """

    def __init__(self, url, headers, proxy_pools, signals=None):
        super(Xapth_select, self).__init__(url, headers, proxy_pools)
        self.signals = signals

    def get_data(self):
        html = self.get_html()
        if html:
            parser = etree.HTMLParser(encoding="utf-8")
            selector = etree.HTML(html, parser=parser)
            url_infos = selector.xpath('//*[@id="root"]/div[2]/div[3]/div[2]/div/div')
            if len(url_infos) > 0:
                item_count = len(data_list)  # 从当前数据长度开始计数
                for url_info in url_infos:
                    item_count += 1
                    try:
                        shop_company = url_info.xpath('./div[2]/div[1]/a[4]/text()')[0].strip()
                        shop_url = url_info.xpath('./div[2]/div[1]/a[4]/@href')[0]
                        if not shop_url.startswith("https"):
                            shop_url = "https:" + shop_url
                        product_link = url_info.xpath('./div[1]/div[1]/div/a/@href')[0]
                        product_title = url_info.xpath('./div[2]/div[1]/h2/a/span/text()')[0].strip()
                        product_category = selector.xpath(
                            '//*[@id="root"]/div[2]/div[2]/div/div/div[3]/div/div[2]/div//div/a/div/span/text()')
                        try:
                            product_price = url_info.xpath('./div[2]/a[2]/div/div/text()')[0]
                        except:
                            product_price = "暂无"
                        minimum_order_quantity = url_info.xpath('./div[2]/div[1]/a[3]/div/div/text()')[0].strip()
                        shop_level = str(len(url_info.xpath('./div[2]/div[1]/div/div[1]/div/a/svg'))) + "钻石"

                        data = [shop_company, shop_url, product_link,
                                product_title, product_category, product_price,
                                minimum_order_quantity, shop_level]
                        # 添加到数据列表
                        data_list.append(data)
                    except Exception as e:
                        log.error(f"解析第{item_count}个商品时出错: {e}")
                        continue


class Re_select(Spider):
    """
    使用正则表达式进行解析
    """

    def __init__(self, url, headers, proxy_pools):
        super(Re_select, self).__init__(url, headers, proxy_pools)

    def get_data(self):
        pass


class Beautifulsoup_select(Spider):
    """
    使用Beautifulsoup进行解析
    """

    def __init__(self, url, headers, proxy_pools):
        super(Beautifulsoup_select, self).__init__(url, headers, proxy_pools)

    def get_data(self):
        pass


def save_excel(data, base_filename="spider_data"):
    """
    使用 xlwt 库将商品信息保存到 Excel 文件中，文件保存到当前目录的 data 文件夹，文件名包含当前时间。

    :param data: 商品信息列表，每个元素是一个包含商品信息的字典
    :param base_filename: 基础文件名，默认为 "spider_data"
    """
    # 创建 data 文件夹（如果不存在）
    data_folder = os.path.join(os.getcwd(), "data")
    if not os.path.exists(data_folder):
        os.makedirs(data_folder)

    # 生成带时间的文件名
    current_time = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    filename = f"{base_filename}_{current_time}.xls"
    file_path = os.path.join(data_folder, filename)

    header = ["店铺公司", "店铺链接", "产品链接", "产品标题", "产品类目", "产品价格", "最低起订量", "店铺等级"]
    workbook = xlwt.Workbook(encoding='utf-8')
    worksheet = workbook.add_sheet('商品信息')

    for h in range(len(header)):
        # 表头设置加粗
        worksheet.col(h).width = 256 * 40  # 设置列宽
        worksheet.row(0).height_mismatch = True  # 开启行高自适应
        worksheet.row(0).height = 20 * 40  # 设置行高
        worksheet.write(0, h, header[h], xlwt.easyxf('font: bold on; align: horizontal center'))
        style = xlwt.XFStyle()  # 创建样式
        style.font.bold = True
        worksheet.write_merge(0, 0, h, h, header[h], style)  # 写入表头
    i = 1
    for list in data_list:
        j = 0
        for data in list:
            worksheet.write(i, j, data)
            j += 1
        i += 1  # 写入爬虫数据
    try:
        workbook.save(file_path)
        print(f"数据已成功保存到 {file_path}。")
    except Exception as e:
        print(f"保存文件时出错: {e}")


class Spider_Factory(object):
    def __init__(self, spider_type, url, headers, proxy_pools=None):
        self.spider_type = spider_type
        self.url = url
        self.headers = headers
        self.proxy_pools = proxy_pools

    def start_spider(self):
        if self.spider_type == "xpath":
            spider = Xapth_select(self.url, self.headers, self.proxy_pools)
            spider.get_data()
        elif self.spider_type == "re":
            spider = Re_select(self.url, self.headers, self.proxy_pools)
            spider.get_data()
        elif self.spider_type == "bs":
            spider = Beautifulsoup_select(self.url, self.headers, self.proxy_pools)
            spider.get_data()
        else:
            log.error("请输入正确的爬虫类型")


def main():
    global data_list
    data_list = []  # 清空数据列表
    header_url = c.header_url
    page = c.page  # 从配置中获取页数
    spider_type = c.spider_type
    proxy_pools = c.proxy_pools
    product_keywords = c.product_keywords
    url_base = header_url + "&keywords={0}&originKeywords=Computer&searchScene=countrySiteCN&tab=all&&page={1}&spm=a2700.search_countrysitecn.pagination.0"
    urls = [url_base.format(product_keywords, str(i)) for i in range(1, page + 1)]
    # 使用线程池并发爬取
    with ThreadPoolExecutor(max_workers=3) as executor:  # 减少并发数
        futures = []
        for url in urls:
            log.info(f"【spider_type】: {spider_type} 【url】： {url}")
            spider = Spider_Factory(spider_type, url, headers, proxy_pools)
            futures.append(executor.submit(spider.start_spider))

        # 等待所有任务完成
        for future in as_completed(futures):
            try:
                future.result()
            except Exception as e:
                log.error(f"爬取过程中出错: {e}")

    return data_list  # 返回爬取到的数据

    # save_excel(all_info_list)


if __name__ == "__main__":
    main()
