import threading

import requests
from bs4 import BeautifulSoup
import pandas as pd
import os
import json
import http.client

# 请确保安装了所需的库：requests, bs4, pandas, openpyxl, lxml
# 你可以使用pip来安装：pip install requests beautifulsoup4 pandas openpyxl lxml

# 基本设置
BASE_URL = 'https://eucs31v2.ksearchnet.com/cs/v2/search'
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
    'Content-Type': 'text/plain'
}
# 分页参数，根据实际情况调整
offset = 0
# 11000 1448
categoryPath = ["Mobile Phone Parts", "Other Device Parts"]
# Tablet Parts 884
# Battery 1476
# Tools & Equipment 2462
# Games Console Parts 471

# 用于存储商品数据的列表
products_list = []
all_data = []
total_size = 0


# 用于下载图片的函数
def download_image(record):
    try:
        headers_img = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
        }
        image = record['image'].replace('needtochange/', '')
        sku = record['sku']
        small_img_name = sku + '_small'
        if '.webp' in image:
            small_img_name = small_img_name + '.webp'
        else:
            small_img_name = small_img_name + '.jpg'
        if os.path.exists(f'./result/images/{small_img_name}'):
            return
        img_data_small = requests.get(image, headers=headers_img).content
        with open(f'./result/images/{sku}_small.jpg', 'wb') as handler:
            handler.write(img_data_small)

        url = record['url']
        conn = http.client.HTTPSConnection("www.replacebase.co.uk")
        payload = ''
        headers_detail = {
            'User-Agent': 'Apifox/1.0.0 (https://apifox.com)',
            'Accept': '*/*',
            'Host': 'www.replacebase.co.uk',
            'Connection': 'keep-alive'
        }
        conn.request("GET", url.replace('https://www.replacebase.co.uk', ''), payload, headers_detail)
        res = conn.getresponse()
        data = res.read()
        # print(data.decode("utf-8"))

        if res.getcode() == 200:
            # 使用BeautifulSoup解析HTML
            soup = BeautifulSoup(data.decode("utf-8"), 'html.parser')

            # 查找所有的script标签
            scripts = soup.find_all('script')

            # 遍历script标签，查找包含data-gallery-role="gallery-placeholder"的数据
            for script in scripts:
                if "data-gallery-role=gallery-placeholder" in str(script):
                    # 获取JSON格式的数据
                    try:
                        data_body = json.loads(script.text.strip())["[data-gallery-role=gallery-placeholder]"]
                        if "mage/gallery/gallery" in data_body:
                            images = data_body["mage/gallery/gallery"]["data"]
                            for index, img in enumerate(images):
                                image_url = img["full"]
                                img_name = f'{sku}_big_' + str(index)
                                # print(f'images:{img["full"]}')
                                if '.webp' in img["full"]:
                                    img_name = img_name + '.webp'
                                else:
                                    img_name = img_name + '.jpg'
                                if os.path.exists(f'./result/images/{img_name}'):
                                    return
                                img_data = requests.get(image_url, headers=headers_img).content
                                with open(f'./result/images/{img_name}', 'wb') as handler:
                                    handler.write(img_data)
                    except json.JSONDecodeError as e:
                        print(f"Error decoding JSON: {e}")
                    except Exception as e:
                        s = record['sku']
                        print(f"handle big image a error occurred,sku:{s},e: {e}")
    except Exception as e:
        s = record['sku']
        print(f"download_image a error occurred,sku:{s},e: {e}")


def handle_records_data(records):
    for record in records:
        products_list.append(record)
        download_image(record)
    # 写excel
    # 将数据写入Excel文件
    global total_size
    total_size += len(records)
    print(f'total_size:{total_size}')
    if total_size % 2000 == 0:
        idx = total_size / 2000
        print(f'idx:{idx}')
        df = pd.DataFrame(products_list)
        df.to_excel(f'products_{idx}.xlsx', index=False)
        products_list.clear()


def handle_records_data2(records):
    for record in records:
        download_image(record)


# 抓取商品数据的函数
def scrape_data():
    global offset
    for category in categoryPath:
        # 循环发送POST请求，直到没有更多数据
        while True:
            start = str(offset)
            if category == 'Mobile Phone Parts' and offset >= 11000:
                # 没有更多数据，退出循环
                offset = 0
                print(" ***** set offset=0")
                break
            if category == 'Other Device Parts' and offset >= 1448:
                # 没有更多数据，退出循环
                offset = 0
                print(" ***** set offset=0")
                break

            # 请求体，根据实际情况构造
            payload = {
                "context": {
                    "apiKeys": ["klevu-167059655826815803"]
                },
                "recordQueries": [{
                    "id": "productList",
                    "typeOfRequest": "CATNAV",
                    "settings": {
                        "query": {
                            "term": "*",
                            "categoryPath": category
                        },
                        "typeOfRecords": ["KLEVU_PRODUCT"],
                        "offset": start,
                        "limit": "100",
                        "searchPrefs": ["searchCompoundsAsAndQuery"],
                        "priceFieldSuffix": "GBP-undefined"
                    }
                }]
            }
            # 发送POST请求
            response = requests.post(BASE_URL, headers=headers, data=json.dumps(payload))
            print(f"handle_records_data Fetched data for offset {offset}, categoryPath: {category}")
            print("*" * 40)

            # 检查响应状态码
            if response.status_code == 200:
                # 解析响应数据为JSON
                response_data = response.json()
                productlist = response_data['queryResults']
                pr = productlist[0]
                handle_records_data(pr['records'])
                # 准备获取下一页数据
                offset += 100
            else:
                # 请求失败，打印错误信息并退出循环
                print(f"Failed to fetch data: {response.status_code}")
                break


def handle_data(idx, startIndex, endIndex, category):
    print(f"===>>开启线程 for startIndex {startIndex}, categoryPath: {category}")
    products_data = []
    # 循环发送POST请求，直到没有更多数据
    while True:
        start = str(startIndex)
        if startIndex >= endIndex:
            print(f" ***** break while --> startIndex={startIndex}")
            break
        # 请求体，根据实际情况构造
        payload = {
            "context": {
                "apiKeys": ["klevu-167059655826815803"]
            },
            "recordQueries": [{
                "id": "productList",
                "typeOfRequest": "CATNAV",
                "settings": {
                    "query": {
                        "term": "*",
                        "categoryPath": category
                    },
                    "typeOfRecords": ["KLEVU_PRODUCT"],
                    "offset": start,
                    "limit": "100",
                    "searchPrefs": ["searchCompoundsAsAndQuery"],
                    "priceFieldSuffix": "GBP-undefined"
                }
            }]
        }
        # 发送POST请求
        response = requests.post(BASE_URL, headers=headers, data=json.dumps(payload))
        print(f"get response for startIndex {startIndex}, categoryPath: {category}")

        # 检查响应状态码
        if response.status_code == 200:
            # 解析响应数据为JSON
            response_data = response.json()
            productlist = response_data['queryResults']
            pr = productlist[0]
            rs = pr['records']
            if len(rs) == 0:
                print("records = []")
                break
            handle_records_data2(rs)
            products_data.extend(rs)
            # 准备获取下一页数据
            startIndex += 100
        else:
            # 请求失败，打印错误信息并退出循环
            print(f"Failed to fetch data: {response.status_code}")
            break
    if len(products_data) > 0:
        df = pd.DataFrame(products_data)
        df.to_excel(f'products_{idx}.xlsx', index=False)
    print(f"===>>结束线程 for startIndex {startIndex}, categoryPath: {category}")


# 主函数
def main():
    # 创建存放图片的目录
    if not os.path.exists('./result/images'):
        os.makedirs('./result/images')
    # 开始抓取数据
    threads = []

    # # 创建线程对象
    # thread = threading.Thread(target=handle_data, args=('3', 4000, 6000, 'Mobile Phone Parts',))
    # # 将线程添加到线程列表
    # threads.append(thread)
    # # 启动线程
    # thread.start()
    #
    # # 创建线程对象
    # thread2 = threading.Thread(target=handle_data, args=('4', 6000, 8000, 'Mobile Phone Parts',))
    # # 将线程添加到线程列表
    # threads.append(thread2)
    # # 启动线程
    # thread2.start()
    #
    # # 创建线程对象
    # thread3 = threading.Thread(target=handle_data, args=('5', 8000, 10000, 'Mobile Phone Parts',))
    # # 将线程添加到线程列表
    # threads.append(thread3)
    # # 启动线程
    # thread3.start()

    # 创建线程对象
    thread4 = threading.Thread(target=handle_data, args=('6', 10000, 11000, 'Mobile Phone Parts',))
    # 将线程添加到线程列表
    threads.append(thread4)
    # 启动线程
    thread4.start()

    # 创建线程对象
    # thread5 = threading.Thread(target=handle_data, args=('7', 0, 1448, 'Other Device Parts',))
    # # 将线程添加到线程列表
    # threads.append(thread5)
    # # 启动线程
    # thread5.start()

    # 等待所有线程完成
    for td in threads:
        td.join()

    print("**线程全部执行完毕**")

    if len(products_list) > 0:
        print("products_list 有数据？")
        df = pd.DataFrame(products_list)
        df.to_excel(f'products_last.xlsx', index=False)


# 运行主函数
if __name__ == '__main__':
    main()
