# -*- coding: utf-8 -*-
import scrapy, json
from gkzxscores.items import GkzxscoresItem

class ScrocesspiderSpider(scrapy.Spider):
    name = 'scrocesSpider'
    # allowed_domains = ['api.eol.cn/gkcx/api/?access_token=']

    start_urls = []
    def __init__(self):
        self.start_urls = self.gkzxscroceUrl()

    headers = {
        'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36',
        'Accept':
        'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
        'Cookie':
        'tool_ipuse=172.16.30.144; tool_ipprovince=99'
    }

    # 重写scrapy方法
    def start_requests(self):
        for url in self.start_urls:
            yield scrapy.Request(url=url, callback=self.parse, headers=self.headers)

    def parse(self, response):
        # if response.status == 403:
        #     print(response.status)
        #     print(response.url)
        #     yield scrapy.Request(url=response.url, callback=self.parse, headers=self.headers)
        # 初始化items
        im = GkzxscoresItem()
        # 将json的数据转换成字典的格式
        contents = json.loads(response.text)
        # print(contents)
        items = contents['data']['item']
        # print(items)
        for item in items:
            im['average'] = item['average']
            im['year'] = item['year']
            im['min'] = item['min']
            im['school_id'] = item['school_id']
            im['max'] = item['max']
            im['local_province_name'] = item['local_province_name']
            im['local_batch_name'] = item['local_batch_name']
            im['filing'] = item['filing']
            im['proscore'] = item['proscore']
            im['name'] = item['name']
            im['local_type_name'] = item['local_type_name']
            im['province_id'] = item['province_id']
            # print(im['province_id'])
            yield im

    def gkzxscroceUrl(self):
        urls = []
        for page in range(1, 3944):
            url = F'https://api.eol.cn/gkcx/api/?access_token=' \
                  F'&admissions=&central=&department=&dual_class=&f211=&f985=' \
                  F'&is_dual_class=&keyword=&local_batch_id=&page={page}' \
                  F'&province_id=&school_type=&signsafe=' \
                  F'&size=20&type=&uri=apidata/api/gk/score/province&year=2019'

            urls.append(url)
            # print(url)
        return urls
            # if page == page:
            #     break
        # for u in urls:
        #     print(u)

if __name__ == '__main__':
    gk = ScrocesspiderSpider()
    gk.gkzxscroceUrl()