import math
import time
import requests
from lxml import etree
import json

headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Connection": "keep-alive",
    "Referer": "https://www.bjcourt.gov.cn/cpws/index.htm",
    "Sec-Fetch-Dest": "document",
    "Sec-Fetch-Mode": "navigate",
    "Sec-Fetch-Site": "same-origin",
    "Sec-Fetch-User": "?1",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36",
    "sec-ch-ua": "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"138\", \"Google Chrome\";v=\"138\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Windows\""
}
cookies = {
    "thguid-r": "1776957059204378624",
    "__jsluid_s": "95d42844131b870d84c149d43eb2cc7d",
    "JSESSIONID": "node01nv9duupafaszy9dlmh0pywv463762300.node0",
    "vh2PJqrvyx": "MDAwM2IyYWYxZTQwMDAwMDAwMDQwXmAbOQYxNzUxOTE5MTE2",
    "6JDgKK8lEy": "MDAwM2IyYWYxZTQwMDAwMDAwMDMwRnMvDEExNzUxOTE5MTE2"
}
url = "https://www.bjcourt.gov.cn/cpws/index.htm"
params = {
    "st": "1",
    "q": "",
    "sxnflx": "0",
    "prompt": "",
    "dsrName": "",
    "ajmc": "",
    "ajlb": "",
    "jbfyId": "1",
    "zscq": "",
    "ay": "",
    "ah": "",
    "cwslbmc": "",
    "startCprq": "2021-01-04",
    "endCprq": "2021-01-08",
    "startFbrq": "",
    "endFbrq": "",
    "page": "1"
}
with open("result.json", 'r', encoding='utf-8') as f:
    court_list = json.load(f)


response = requests.get(url, headers=headers, cookies=cookies, params=params)
html_str = response.content.decode()
root = etree.HTML(html_str)
total = "".join(root.xpath("//div[@class='table_list_info pt5']/span[@class='info']/em/text()"))
data_list = root.xpath("//div[@class='layer p5_0']/ul[@class='ul_news_long']/li[@class='refushCpws']")
count = 0
for court_dict in court_list[1:]:
    jbfy_id = court_dict.get("value_id")
    court_list.pop(0)
    print(jbfy_id)
    for page in range(1, math.ceil(int(total) / 20)):
        params["page"] = page
        params["jbfyId"] = jbfy_id
        response = requests.get(url, headers=headers, cookies=cookies, params=params)
        html_str = response.content.decode()
        root = etree.HTML(html_str)
        data_list = root.xpath("//div[@class='layer p5_0']/ul[@class='ul_news_long']/li[@class='refushCpws']")
        for data in data_list:
            count += 1
            title = "".join(data.xpath("./a/text()")).strip()
            court_name = "".join(data.xpath(".//span[@class='sp_name']/text()"))
            date = "".join(data.xpath(".//span[@class='sp_time']/text()"))
            print(page, count, title, court_name, date)
            time.sleep(1)
        if page == 10:
            break

    if court_dict.get("value_id") == "12":
        break
    time.sleep(5)