import json
import scrapy
from localbond.items import LocalbondItem
from lxml import etree
import os


class LocalbondsSpider(scrapy.Spider):
    name = "localbonds"

    def start_requests(self):
        for i in range(1, 11):
            print(f"开始请求第{i}页")
            urls = [
                f"https://www.governbond.org.cn:4443/api/loadBondData.action?timeStamp=1744717852195&dataType=ZQFXLISTBYAD&adList=&adCode=87&zqlx=&year=&fxfs=&qxr=&fxqx=&zqCode=&zqName=&page={i}&pageSize=10",
            ]
            for url in urls:
                yield scrapy.Request(url=url, callback=self.parse)

    def parse(self, response):
        try:
            data = response.json()
            for item_data in data.get('data', []):
                item = LocalbondItem()
                item['AD_CODE_GK'] = item_data.get('AD_CODE_GK')
                item['SET_YEAR_GK'] = item_data.get('SET_YEAR_GK')
                item['FXPC_GK'] = item_data.get('FXPC_GK')
                item['ZQ_PC_CODE'] = item_data.get('ZQ_PC_CODE')
                item['FXPC_NAME'] = item_data.get('FXPC_NAME')
                item['AD_CODE'] = item_data.get('AD_CODE')
                item['AD_NAME'] = item_data.get('AD_NAME')
                item['SET_YEAR'] = item_data.get('SET_YEAR')
                item['ZQ_NAME'] = item_data.get('ZQ_NAME')
                item['ZQ_CODE'] = item_data.get('ZQ_CODE')
                item['ZQ_JC'] = item_data.get('ZQ_JC')
                item['ZQQX_ID'] = item_data.get('ZQQX_ID')
                item['ZQQX_NAME'] = item_data.get('ZQQX_NAME')
                item['FX_AMT'] = item_data.get('FX_AMT')
                item['XZZQ_AMT'] = item_data.get('XZZQ_AMT')
                item['ZHZQ_AMT'] = item_data.get('ZHZQ_AMT')
                item['ZRZZQ_AMT'] = item_data.get('ZRZZQ_AMT')
                item['QX_DATE'] = item_data.get('QX_DATE')
                item['ZQLX_ID'] = item_data.get('ZQLX_ID')
                item['ZQLX_NAME'] = item_data.get('ZQLX_NAME')
                item['LL'] = item_data.get('LL')
                item['ZQ_FXTIME'] = item_data.get('ZQ_FXTIME')
                item['LXFS_ID'] = item_data.get('LXFS_ID')
                item['FXFS'] = item_data.get('FXFS')
                item['SHMS'] = item_data.get('SHMS')
                yield item
                # 根据 ZQ_CODE 发起新的请求
                zq_code = item_data.get('ZQ_CODE')
                if zq_code:
                    new_url = f"https://www.governbond.org.cn:4443/api/loadBondData.action?dataType=ZQDETAILS&pcCode={zq_code}"
                    douc_url = f'https://www.celma.org.cn/zqgkxq/index.jhtml?pcCode={zq_code}&adCode=11'
                    yield scrapy.Request(url=new_url, callback=self.parse_new_data, meta={'item': item})
                    yield scrapy.Request(url=douc_url, callback=self.parse_douc_data, meta={'item': item})
                    print("结束所有请求")
        except json.JSONDecodeError as e:
            self.logger.error(f"Failed JSON: {e}")

    def parse_new_data(self, response):
        try:
            data_pro = response.json()
            data = data_pro["data"]
            zq_code = response.meta['item']['ZQ_CODE']
            file_name = f"{zq_code}_details.json"
            with open(f'./json_detil/{file_name}', 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=4)
            print(f"Data saved to {file_name}")
        except Exception as e:
            self.logger.error(f"Error saving data to file: {e}")

    def parse_douc_data(self, response):
        try:
            tree = etree.HTML(response.text)
            print("开始解析(两个请求)")
            url_1 = tree.xpath('//div[@class="col-md-11"]/a/@href')[0]
            url_2 = tree.xpath('//div[@class="col-md-11"]/a/@href')[1]
            print(url_1)
            print(url_2)
            if url_1:
                yield scrapy.Request(url=url_1, callback=self.parse_douc_data_detail,
                                     meta={'item': response.meta['item']})
            if url_2:
                yield scrapy.Request(url=url_2, callback=self.parse_douc_data_detail,
                                     meta={'item': response.meta['item']})

        except Exception as e:
            self.logger.error(f"Error parsing douc data: {e}")

    def parse_douc_data_detail(self, response):
        print("开始解析详情（两个请求下的页面）")
        tree = etree.HTML(response.text)
        urls1 = tree.xpath('//div[@class="content-fj"]/ul/li/a/@href')
        for url in urls1:
            yield scrapy.Request(url=url, callback=self.save_file, meta={'item': response.meta['item']})

    def save_file(self, response):
        zq_code = response.meta['item']['ZQ_CODE']
        file_ext = response.url.split('.')[-1]
        if file_ext in ['zip', 'pdf']:
            file_name = f"{zq_code}_{os.path.basename(response.url)}"
            save_path = f'./downloads/{file_ext}'
            if not os.path.exists(save_path):
                os.makedirs(save_path)
            with open(os.path.join(save_path, file_name), 'wb') as f:
                f.write(response.body)
            print(f"File saved to {os.path.join(save_path, file_name)}")



