import requests
from bs4 import BeautifulSoup
import pandas as pd
import os
import json
import http.client

# 请确保安装了所需的库：requests, bs4, pandas, openpyxl, lxml
# 你可以使用pip来安装：pip install requests beautifulsoup4 pandas openpyxl lxml

# 基本设置
BASE_URL = 'https://eucs31v2.ksearchnet.com/cs/v2/search'
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
    'Content-Type': 'text/plain'
}
# 分页参数，根据实际情况调整
offset = 0
# 11000 1448
categoryPath = ["Mobile Phone Parts", "Other Device Parts"]

# 用于存储商品数据的列表
products_list = []
all_data = []


# 抓取商品数据的函数
def scrape_data():
    global offset
    for category in categoryPath:
        # 循环发送POST请求，直到没有更多数据
        while True:
            start = str(offset)
            if category == 'Mobile Phone Parts' and offset >= 11000:
                # 没有更多数据，退出循环
                offset = 0
                print(" ***** set offset=0")
                break
            if category == 'Other Device Parts' and offset >= 1448:
                # 没有更多数据，退出循环
                offset = 0
                print(" ***** set offset=0")
                break

            # 请求体，根据实际情况构造
            payload = {
                "context": {
                    "apiKeys": ["klevu-167059655826815803"]
                },
                "recordQueries": [{
                    "id": "productList",
                    "typeOfRequest": "CATNAV",
                    "settings": {
                        "query": {
                            "term": "*",
                            "categoryPath": category
                        },
                        "typeOfRecords": ["KLEVU_PRODUCT"],
                        "offset": start,
                        "limit": "100",
                        "searchPrefs": ["searchCompoundsAsAndQuery"],
                        "priceFieldSuffix": "GBP-undefined"
                    }
                }]
            }
            # 发送POST请求
            response = requests.post(BASE_URL, headers=headers, data=json.dumps(payload))
            print(f"handle_records_data Fetched data for offset {offset}, categoryPath: {category}")
            print("*" * 40)

            # 检查响应状态码
            if response.status_code == 200:
                # 解析响应数据为JSON
                response_data = response.json()
                productlist = response_data['queryResults']
                pr = productlist[0]
                print(pr['records'])
                # 准备获取下一页数据
                offset += 100
            else:
                # 请求失败，打印错误信息并退出循环
                print(f"Failed to fetch data: {response.status_code}")
                break


# 主函数
def main():
    if os.path.exists('./result/images/RBSKU22068_small.jpg'):
        print('exists')
    # 开始抓取数据
    # scrape_data()


# 运行主函数
if __name__ == '__main__':
    main()
