import requests
from bs4 import BeautifulSoup
import pandas as pd
import os
import json
import http.client

# 请确保安装了所需的库：requests, bs4, pandas, openpyxl, lxml
# 你可以使用pip来安装：pip install requests beautifulsoup4 pandas openpyxl lxml

# 基本设置
BASE_URL = 'https://eucs31v2.ksearchnet.com/cs/v2/search'
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
    'Content-Type': 'text/plain'
}
# 分页参数，根据实际情况调整
offset = 0
# 11000 1448
categoryPath = ["Mobile Phone Parts", "Other Device Parts"]

# 用于存储商品数据的列表
products_list = []
all_data = []
total_size = 0


# 用于下载图片的函数
def download_image(record):
    headers_img = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
    }
    image = record['image'].replace('needtochange/', '')
    sku = record['sku']
    small_img_name = sku + '_small'
    if '.webp' in image:
        small_img_name = small_img_name + '.webp'
    else:
        small_img_name = small_img_name + '.jpg'
    if os.path.exists(f'./result/images/{small_img_name}'):
        return
    img_data_small = requests.get(image, headers=headers_img).content
    with open(f'./result/images/{sku}_small.jpg', 'wb') as handler:
        handler.write(img_data_small)

    url = record['url']
    conn = http.client.HTTPSConnection("www.replacebase.co.uk")
    payload = ''
    headers_detail = {
        'User-Agent': 'Apifox/1.0.0 (https://apifox.com)',
        'Accept': '*/*',
        'Host': 'www.replacebase.co.uk',
        'Connection': 'keep-alive'
    }
    conn.request("GET", url.replace('https://www.replacebase.co.uk', ''), payload, headers_detail)
    res = conn.getresponse()
    data = res.read()
    # print(data.decode("utf-8"))

    if res.getcode() == 200:
        # 使用BeautifulSoup解析HTML
        soup = BeautifulSoup(data.decode("utf-8"), 'html.parser')

        # 查找所有的script标签
        scripts = soup.find_all('script')

        # 遍历script标签，查找包含data-gallery-role="gallery-placeholder"的数据
        for script in scripts:
            if "data-gallery-role=gallery-placeholder" in str(script):
                # 获取JSON格式的数据
                try:
                    data_body = json.loads(script.text.strip())["[data-gallery-role=gallery-placeholder]"]
                    if "mage/gallery/gallery" in data_body:
                        images = data_body["mage/gallery/gallery"]["data"]
                        for index, img in enumerate(images):
                            image_url = img["full"]
                            img_name = f'{sku}_big_' + str(index)
                            # print(f'images:{img["full"]}')
                            if '.webp' in img["full"]:
                                img_name = img_name + '.webp'
                            else:
                                img_name = img_name + '.jpg'
                            if os.path.exists(f'./result/images/{img_name}'):
                                return
                            img_data = requests.get(image_url, headers=headers_img).content
                            with open(f'./result/images/{img_name}', 'wb') as handler:
                                handler.write(img_data)
                except json.JSONDecodeError as e:
                    print(f"Error decoding JSON: {e}")


def handle_records_data(records):
    # for record in records:
    #     products_list.append(record)
    #     download_image(record)
    global total_size
    total_size += len(records)
    print(f'total_size:{total_size}')
    if total_size % 2000 == 0:
        idx = total_size / 2000
        print(f'idx:{idx}')
        df = pd.DataFrame(products_list)
        df.to_excel(f'products_{idx}.xlsx', index=False)
        products_list.clear()


# 抓取商品数据的函数
def scrape_data():
    global offset
    for category in categoryPath:
        # 循环发送POST请求，直到没有更多数据
        while True:
            start = str(offset)
            if category == 'Mobile Phone Parts' and offset >= 11000:
                # 没有更多数据，退出循环
                offset = 0
                print(" ***** set offset=0")
                break
            if category == 'Other Device Parts' and offset >= 1448:
                # 没有更多数据，退出循环
                offset = 0
                print(" ***** set offset=0")
                break

            # 请求体，根据实际情况构造
            payload = {
                "context": {
                    "apiKeys": ["klevu-167059655826815803"]
                },
                "recordQueries": [{
                    "id": "productList",
                    "typeOfRequest": "CATNAV",
                    "settings": {
                        "query": {
                            "term": "*",
                            "categoryPath": category
                        },
                        "typeOfRecords": ["KLEVU_PRODUCT"],
                        "offset": start,
                        "limit": "100",
                        "searchPrefs": ["searchCompoundsAsAndQuery"],
                        "priceFieldSuffix": "GBP-undefined"
                    }
                }]
            }
            # 发送POST请求
            response = requests.post(BASE_URL, headers=headers, data=json.dumps(payload))
            print(f"handle_records_data Fetched data for offset {offset}, categoryPath: {category}")
            print("*" * 40)

            # 检查响应状态码
            if response.status_code == 200:
                # 解析响应数据为JSON
                response_data = response.json()
                productlist = response_data['queryResults']
                pr = productlist[0]
                handle_records_data(pr['records'])
                # 准备获取下一页数据
                offset += 100
            else:
                # 请求失败，打印错误信息并退出循环
                print(f"Failed to fetch data: {response.status_code}")
                break


# 主函数
def main():
    # 开始抓取数据
    scrape_data()


# 运行主函数
if __name__ == '__main__':
    main()
