import scrapy
import json
import os


class GzlwSpider(scrapy.Spider):
    name = 'gzlw'
    allowed_domains = ['www.cbirc.gov.cn']
    start_urls = [f'https://www.cbirc.gov.cn/cbircweb/DocInfo/SelectDocByItemIdAndChild?itemId=934&pageSize=18&pageIndex={i}' for i in range(1, 7)]

    def parse1(self, response):
        json_result = json.loads(response.text)
        with open('results.json', 'a') as f:
            if os.stat("results.json").st_size == 0:
                f.write('[')
            line = json.dumps(json_result['data']['rows']).lstrip('[').rstrip(']') + ', '
            f.write(line)

    def parse(self, response):
        json_result = json.loads(response.text)
        yield json_result
