import json
import re
import time

import requests
from lxml import etree

headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "Accept-Language": "zh-CN,zh;q=0.9",
    "Cache-Control": "no-cache",
    "Connection": "keep-alive",
    "$Cookie": "JSESSIONID=DD0D7D2F4B429D64342E1CC5B5AF1054; Hm_lvt_9c8767bf2ffaff9d16e0e409bd28017b=1732091768; Hm_lpvt_9c8767bf2ffaff9d16e0e409bd28017b=1732091768; HMACCOUNT=AFD53293F7A60F46; _gid=GA1.3.2125136288.1732091768; aliyungf_tc=b077c374855b054210cceefe22e9888ae207380c130ec0d810d8f824ae50bb67; XSRF-CCKTOKEN=d2d9e073d22018db11a2eb52e430ec10; CHSICC_CLIENTFLAGGAOKAO=7485b4f67adcaa05ecb6f9348135796b; CHSICC01=\\u0021FRLL5sXyTgDirj0nVPBkiJOoJxwY2kr7kG3nQN7+PEqF4Rs71fENlGXn+9llEFUmoO4UjJwlhNDI; Hm_lvt_19141110b831c2c573190bb7a3b0ef3f=1732091824; CHSICC_CLIENTFLAGSPECIALITY=b388e0a4876c07d89b9cca8c54cf5b10; JSESSIONID=ABE72FFEE76B2EF9DB01CFD875F5D963; CHSICC_CLIENTFLAGCHSI=43fc98c2043067034d755fa6b965b98d; Hm_lpvt_19141110b831c2c573190bb7a3b0ef3f=1732091966; _ga=GA1.1.71236413.1732091768; _ga_5FHT145N0D=GS1.1.1732091824.1.1.1732091978.0.0.0; _ga_8YMQD1TE48=GS1.1.1732091768.1.1.1732092039.0.0.0",
    "Pragma": "no-cache",
    "Referer": "https://gaokao.chsi.com.cn/zyk/pub/myd/schAppraisalTop.action?start=2560",
    "Sec-Fetch-Dest": "document",
    "Sec-Fetch-Mode": "navigate",
    "Sec-Fetch-Site": "same-origin",
    "Sec-Fetch-User": "?1",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
    "sec-ch-ua": "\"Google Chrome\";v=\"131\", \"Chromium\";v=\"131\", \"Not_A Brand\";v=\"24\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Windows\""
}
url = "https://gaokao.chsi.com.cn/zyk/pub/myd/schAppraisalTop.action"
num = -20
page = 0
while True:
    page += 1
    num += 20
    params = {
        "start": str(num)
    }
    response = requests.get(url, headers=headers, params=params)

    # print(response.text)
    # print(response)
    res = response.text
    res = etree.HTML(res)
    information_list = res.xpath("//table[@class='cnt_table']//tr")[1:]
    try:
        for information in information_list:
            school_url = information.xpath(".//div[@class='item-yxmc']/a/@href")
            school_name = information.xpath(".//div[@class='item-yxmc']/a/text()")
            school_detail_url = "https://gaokao.chsi.com.cn" + school_url[0]
            print(page, num, school_name, school_detail_url)
    except IndexError as e:
        with open('阳光高考.html', 'a', encoding='utf-8') as f:
            f.write(response.text)
    if len(information_list) < 20:
        break
