import scrapy
from scrapy.http import HtmlResponse
from scrapy import Request
from ssqSpider.items import SsqspiderItem


class SsqSpider(scrapy.Spider):
    name = "ssq"
    allowed_domains = ["www.cwl.gov.cn"]
    # start_urls = ["https://cwl.gov.cn"]
    # start_urls = ["http://www.cwl.gov.cn/cwl_admin/front/cwlkj/search/kjxx/findDrawNotice?name=ssq&issueCount=&issueStart=&issueEnd=&dayStart=&dayEnd=&pageNo=1&pageSize=30&week=&systemType=PC"]

    def start_requests(self):
        # lst=[]
        for page in range(1,55):   ## 1-54页，range为左闭右开
            url=f'http://www.cwl.gov.cn/cwl_admin/front/cwlkj/search/kjxx/findDrawNotice?name=ssq&issueCount=&issueStart=&issueEnd=&dayStart=&dayEnd=&pageNo={page}&pageSize=30&week=&systemType=PC'        
            # lst.append(Request(url=url,dont_filter=True))
            yield Request(url=url,dont_filter=True)  # 如果有302跳转加入dont_filter=True
        # return lst

    def parse(self, response:HtmlResponse):
        # print('test###########################################')
        # print(response.json())
        
        data=response.json()
        result=data['result']
        for i in result:
            item=SsqspiderItem()
            # print(i)
            item['qihao']=i['code']
            item['riqi']=i['date']
            item['kaijianghaoma_red']=i['red']
            item['kaijianghaoma_blue']=i['blue']
            
            item['jiangchijiner']=i['poolmoney']
            item['xiaoshouer']=i['sales']

            yield item
