import scrapy
from fake_useragent import UserAgent
import json

ua = UserAgent().random


class DatasSpider(scrapy.Spider):
    name = "datas"

    def start_requests(self):
        urls = [
            f"https://www.governbond.org.cn:4443/api/loadBondData.action?timeStamp=1744715104979&dataType=ZQFXLISTBYAD&adList=&adCode=87&zqlx=&year=&fxfs=&qxr=&fxqx=&zqCode=&zqName=&page={page}&pageSize=10" for page in range(1, 2)]
        headers = {
            "User-Agent": ua
        }
        for url in urls:
            yield scrapy.Request(url, headers=headers)

    def parse(self, response):
        all = json.loads(response.text)["data"]
        for one in all:
            ZQ_NAME = one["ZQ_NAME"]
            ZQ_CODE = one["ZQ_CODE"]
            AD_CODE = one["AD_CODE"]
            AD_NAME = one['AD_NAME']
            ZQ_FXTIME = one["ZQ_FXTIME"]
            ZQQX_NAME = one["ZQQX_NAME"]
            FXFS = one["FXFS"]
            QX_DATE = one["QX_DATE"]
            # print(ZQ_NAME, ZQ_CODE, AD_CODE, AD_NAME, ZQ_FXTIME, ZQQX_NAME, FXFS, QX_DATE)
            # next_url = f"https://www.celma.org.cn/zqgkxq/index.jhtml?pcCode = {ZQ_CODE} & adCode = {AD_CODE}"
            next_url = f"https: // www.governbond.org.cn: 4443 / api / loadBondData.action?dataType = ZQDETAILS & pcCode = {ZQ_CODE}"
            yield scrapy.Request(next_url, headers={"User-Agent": ua}, callback=self.Mistakes)

    def Mistakes(self, response):
        print(response.text)


    def parse_next(self, response):
        pass
