import json
import csv
import time

import requests
import execjs
import base64
from Crypto.Cipher import DES
from Crypto.Util.Padding import unpad


def read_js_file(file_path):
    """读取JavaScript文件内容"""
    with open(file_path, 'r', encoding='utf-8') as f:
        return f.read()


def compile_js(js_code):
    """编译JavaScript代码"""
    return execjs.compile(js_code)


def get_url(ctx, page_num):
    """获取指定页数的URL"""
    return ctx.call('get_type__1017', page_num)


def make_request(url, headers, cookies):
    """发送HTTP GET请求"""
    response = requests.get(url, headers=headers, cookies=cookies)
    response.raise_for_status()  # 检查请求是否成功
    return response.text


def decrypt_by_des(ciphertext):
    """使用DES算法解密密文"""
    key = "1qaz@wsx3e".encode('utf-8')[:8]
    cipher = DES.new(key, DES.MODE_ECB)
    ciphertext_bytes = base64.b64decode(ciphertext)
    decrypted_data = cipher.decrypt(ciphertext_bytes)
    unpadded_data = unpad(decrypted_data, DES.block_size)
    return unpadded_data.decode('utf-8')


def decrypt_data(ciphertext):
    """解密数据"""
    return decrypt_by_des(ciphertext)


def fetch_and_decrypt_data(js_file_path, page_num, headers, cookies):
    """获取并解密指定页数的数据"""
    js_code = read_js_file(js_file_path)
    ctx = compile_js(js_code)
    url = get_url(ctx, page_num)
    encrypted_data = make_request(url, headers, cookies)
    decrypted_data = decrypt_data(encrypted_data)
    return decrypted_data


def save_to_csv(data_list, csv_file_path):
    """将数据列表保存到CSV文件"""
    if not data_list:
        print("数据列表为空，未保存到CSV文件。")
        return

    # 获取所有字典的键作为CSV的列名
    fieldnames = data_list[0].keys()

    with open(csv_file_path, mode='w', newline='', encoding='utf-8') as file:
        writer = csv.DictWriter(file, fieldnames=fieldnames)
        writer.writeheader()
        writer.writerows(data_list)

    print(f"数据已成功保存到 {csv_file_path}")


def main():
    """主函数，处理多页数据"""
    js_file_path = 'qgzbgg加密.js'
    headers = {
        'Accept': 'application/json, text/plain, */*',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Cache-Control': 'no-cache',
        'Connection': 'keep-alive',
        'Pragma': 'no-cache',
        'Referer': 'https://ctbpsp.com/',
        'Sec-Fetch-Dest': 'empty',
        'Sec-Fetch-Mode': 'cors',
        'Sec-Fetch-Site': 'same-origin',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.6261.95 Safari/537.36',
        'sec-ch-ua': '"Chromium";v="122", "Not(A:Brand";v="24", "Google Chrome";v="122"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"Windows"',
    }
    cookies = {
        '_c_WBKFRo': 'sEEUbSMzZfAfE0WsrTKOfub3TNJKWkblx5IjW04i',
        'acw_tc': '1a0c642717408603642105249e004bd5449dd2bec3eab16e1867a073ed3752',
        'Hm_lvt_b966fe201514832da03dcf6cbf25b8a2': '1740860364',
        'HMACCOUNT': '879CD8B10F5128DF',
        'acw_sc__v3': '67c36bd330ff06d5b5e6339c09d7998c7473f419',
        'Hm_lpvt_b966fe201514832da03dcf6cbf25b8a2': '1740860388',
        'ssxmod_itna': 'Yq02Y57K0ImG8Dz=DUDGht+G7DyDA2+72REdgQSikqGX=oDZDiqAPGhDC3f/w08+pxNjlBrd+oBlGrLrsz=CrLtqKnRbLe3cDB3DEx06ejnDYACDt4DTD34DYDixibhxi5GRD0KDFzHNNZ+qDEDYpyDA3Di4D+zd=DmqG0DDUO/4G2D7tgjDDljYWdt0jPYBiKV4bl0nUe7Cw=zqDMneGXz0FdzwRkuc4V7lc7djpnDB6wxBQZ0MX00m4gDCX4vDfx/Y4G3Y+w9Yh=YBGi7iM4zBwPFBDYD8wNzA3IWvp6Xy4DG4+YntxxD=',
        'ssxmod_itna2': 'Yq02Y57K0ImG8Dz=DUDGht+G7DyDA2+72REdgQS7DA=95dD//F9DFOM0RKt/BIh6eNlcrBG1c+hn95hRz7kyhteFt0KGF9DmYy1OVBluGpLn92B+q2DE1u4hzTAcg+RrqdNKZ6MKKu=OP5GkDKC=6rQelKFs0K7tf7FghPGSpP1NKTF76uDdlxvqiv54o8Fe75hecg1BnxFemDvKRB42rKvN0B5r1o+qlkLr6hToYStyl0Iyju53KHv=aI2827htpprYXnhFbW88ZD1DIi44t0wBmiOs07IbUWpjKAnqnaGZoQcui5UV3=1GThikGSKCOb0gD8gD7wYPiizACNBDjOqtidqGhkiaC3xQQa5OHKg7phbMkmhxzYf1smpeSmKgNveh7L8jubnSK4wr1B+72bad346=97w90D2BN+wdaMOwn8pLQGfowjfK8m6uKdGkWZTF0mdgI7kPcS=BGewRTfmOcrpiM8fed1+35aEQ+I9uKQnpkxOBi1B7hL1sqhXVGGbOUMiX1RotzDjcX/WYn3T4oHqCilBIeQxMi=3pUOcp0uu0dIxSi=rKnhIjDDwc2NjTphedakCAf1+iCs3SnxOmaq22yOCh=Ce8xwTDK3CDjKDeTKkesxSxNiDD',
    }

    total_pages = 10
    all_data_list = []
    for page in range(1, total_pages + 1):
        data = fetch_and_decrypt_data(js_file_path, page, headers, cookies)
        print(f"Page {page} Data: {data}")
        data = json.loads(data)
        data_list = data['data']['dataList']
        for data in data_list:
            print(data)
            all_data_list.append(data)
        time.sleep(5)

    # 保存到CSV文件
    csv_file_path = 'qgzbgg_data.csv'
    save_to_csv(all_data_list, csv_file_path)


if __name__ == "__main__":
    main()
