import os
import openpyxl
import requests
from bs4 import BeautifulSoup
import json
import re
from utils import parse_cookie_string


# 从接口返回的数据形式
def saveData(file_path, data):
    if os.path.exists(file_path):
        # 打开现有的 Excel 文件
        workbook = openpyxl.load_workbook(file_path)
        # 选择要追加数据的工作表
        sheet = workbook.active  # 或者通过名称选择工作表： sheet = workbook['Sheet1']
    else:
        # 创建一个工作簿 或打开一个
        workbook = openpyxl.Workbook()
        # 选择默认的活动工作表
        sheet = workbook.active
        # 设置列名
        columns = ["商品简介", "商品回购率", "商品名称", '综合服务评分', '商品价格', "收藏量", "近30日销售总量", '销售总量', '图片地址']
        sheet.append(columns)
    goods_inf = data['data']['offerList']

    for i in range(len(goods_inf)):
        # 商品的绝大部分信息
        good_info = goods_inf[i]['information']
        brief = good_info['brief']  # 简介
        rePurchaseRate = good_info['rePurchaseRate']  # 商品回购率
        simpleSubject = good_info['simpleSubject']  # 商品名称
        svrRpsRate = good_info['svrRpsRate']  # 综合服务评分
        # 商品的价格信息
        good_price = goods_inf[i]['tradePrice']
        price = good_price['offerPrice']['priceInfo']['price']  # 商品价格
        # 交易信息
        good_trade = goods_inf[i]['tradeQuantity']
        bookedCount = good_trade['bookedCount']  # 收藏量
        quantitySumMonth = good_trade['quantitySumMonth']  # 近一个月销售量
        saleQuantity = good_trade['saleQuantity']  # 销售总量
        # 图片信息
        image_src = goods_inf[i]['image']['imgUrl']
        # 保存到excel文件中
        item = [brief, rePurchaseRate, simpleSubject, svrRpsRate,
                price, bookedCount, quantitySumMonth, saleQuantity, image_src]
        sheet.append(item)
    # 保存工作簿为 Excel 文件
    workbook.save("商品信息.xlsx")


def parseFirstPage(page_url, cookies, headers, file_path):
    res = requests.get(page_url, cookies=cookies, headers=headers)
    # 获取request_id 和 session_id
    # 获取响应的 RequestID
    request_id = res.headers.get('RequestID')  # RequestID 在响应头部中
    # 解析 HTML
    soup = BeautifulSoup(res.content, 'html.parser')
    # 查找包含 window.data.pageMessage 字段的内容
    page_message_script = soup.find('script', string=lambda x: x and 'window.data.pageMessage' in x)
    if page_message_script:
        # 获取包含 window.data.pageMessage 字段内容的文本
        script_content = page_message_script.text
        # 使用正则表达式提取 sessionId
        pattern_session = r'"sessionId":\s*"([a-fA-F0-9]+)"'  # 正则表达式模式，匹配 sessionId 的值
        # 查找匹配的内容
        match = re.search(pattern_session, script_content)
        if match:
            session_id = match.group(1)  # 获取匹配到的 sessionId 的值
        else:
            session_id = ''
        # 查找每个页面前20个商品信息
        # 使用正则表达式提取括号内的内容
        pattern_data = r'window.data.offerresultData = successDataCheck\((.*?)\);'  # 正则表达式模式，匹配 successDataCheck 括号内的内容
        # 查找匹配的内容
        match = re.search(pattern_data, script_content)
        if match:
            content_inside_brackets = match.group(1)  # 获取匹配到的括号内的内容
            # 抽取信息
            data = json.loads(content_inside_brackets)
            saveData(file_path, data)
        else:
            print("未找到括号内的内容")

    return request_id, session_id


# 处理页面余下40个商品信息
def parseNextPage(page_url, cookies, headers, file_path):
    res = requests.get(page_url, cookies=cookies, headers=headers)
    original_data = json.loads(res.text)
    data = original_data['data']
    saveData(file_path=file_path, data=data)


if __name__ == '__main__':
    # 保存文件的名称
    file_path = '../商品信息.xlsx'
    headers = {
        'Accept': '*/*',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Origin': 'https://s.1688.com',
        'Referer': 'https://s.1688.com/',
        'Sec-Ch-Ua': '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
        'Sec-Ch-Ua-Mobile': '?0',
        'Sec-Ch-Ua-Platform': '"Windows"',
        'Sec-Fetch-Dest': 'document',
        'Sec-Fetch-Mode': 'navigate',
        'Sec-Fetch-Site': 'none',
        'Sec-Fetch-User': '?1',
        'Upgrade - Insecure - Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
    }
    original_cookies = ''  # 填入自己的cookies
    cookies_dic = parse_cookie_string(original_cookies)
    # 循环处理50个页面
    for i in range(51, 100):
        url = 'https://s.1688.com/selloffer/offer_search.htm?keywords=%B2%DD%C3%B1&spm=a26352.13672862.searchhover.0&beginPage={}'.format(i)
        # 处理页面前20个商品
        request_id, session_id = parseFirstPage(page_url=url, cookies=cookies_dic, headers=headers, file_path=file_path)
        # 处理余下40个商品
        next_20_url = 'https://search.1688.com/service/marketOfferResultViewService?keywords=%B2%DD%C3%B1&spm=a26352.13672862.searchhover.0' \
                      '&beginPage={}&async=true&asyncCount=20&pageSize=60&requestId={}' \
                      '&startIndex=20&pageName=major&sessionId={}'.format(i, request_id, session_id)
        next_40_url = 'https://search.1688.com/service/marketOfferResultViewService?keywords=%B2%DD%C3%B1&spm=a26352.13672862.searchhover.0' \
                      '&beginPage={}&async=true&asyncCount=20&pageSize=60&requestId={}' \
                      '&startIndex=40&pageName=major&sessionId={}'.format(i, request_id, session_id)
        parseNextPage(next_20_url, cookies_dic, headers, file_path)
        parseNextPage(next_40_url, cookies_dic, headers, file_path)
        print("已完成第{}页的数据爬取".format(i))
