import scrapy
import urllib.parse as urlparse
import json


class RepoSpider(scrapy.Spider):
    name = 'repo'
    allowed_domains = ['gitee.com']


    def __init__(self, queryKey='java', beginPage = 1, endPage = 100, **kwargs):
        super().__init__(**kwargs)
        self.start_page = beginPage
        self.end_page = endPage
        self.base_url = 'https://gitee.com/api/v5/search/repositories?'
        self.params = {
            'q': queryKey,
            'page': beginPage,
            'order': 'desc',
            'per_page' : 100,
        }

    def start_requests(self):
        url = self.base_url + urlparse.urlencode(self.params)
        # self.log(f'Begin crawling url {url}')
        print(f'Begin crawling url {url}')
        yield scrapy.Request(url, callback=self.parse)

    def parse(self, response):

        filename = '.\downloads\PHPRepo_Page' + str(self.params['page']) +'.json'
        with open(filename, 'w') as f:
            # f.write(response.body)
            # print(response.json())
            json.dump(response.json(),f)

            # f.write(response.json())

        # item = GiteespiderItem()
        # item['body'] = response.text
        # yield item



        print(f'已经爬完了第{self.params.get("page")}个页面')
        # 开始爬取下一个页面
        self.params['page'] += 1
        if self.params['page'] <= self.end_page:
            next_page = self.base_url + urlparse.urlencode(self.params)
            yield scrapy.Request(next_page, callback=self.parse)