import scrapy


class SsSpider(scrapy.Spider):
    name = 'ss'
    # allowed_domains = ['ss.com']
    start_urls = ['http://www.cninfo.com.cn/new/disclosure']

    def start_requests(self):
        for i in range(1,10):
            data = {
                    "column": "szse_gem_latest",
                    "pageNum": str(i),
                    "pageSize": "30",
                    "sortName": "",
                    "sortType": "",
                    "clusterFlag": "true"
                }
            yield scrapy.FormRequest(url=self.start_urls[0],formdata=data,meta={'page':data['pageNum']})

    def parse(self, response):
        page = response.meta.get('page')
        print(f'正在抓取第{page}页的数据')