import scrapy
import time
import json


QUERY_FORMAT = "https://api.github.com/search/repositories?\
q=language:Java+stars:%s&sort=stars&order=desc&page=%d&per_page=100"
DOWNLOAD_FORMAT = "https://codeload.github.com/%s/zip/%s"
QUERY_STAR = ['744..*', '400..744', '250..403', '172..253',
              '127..175', '97..130', '78..100', '64..81', '55..67',
              '48..58', '42..50', '37..44', '33..39', '30..35',
              '28..32', '22..29', '22..27', '22..25', '23..25',
              '21..23', '22', '21', '20', '19', '18', '17', '16',
              '15', '14', '13', '12', '11', '10']
MAX_PAGE = 10


class githubSpider(scrapy.Spider):
    name = "github_api"
    allowed_domains = ["github.com"]

    query = 0
    page = 1
    full_url = QUERY_FORMAT % (QUERY_STAR[query], page)

    start_urls = [full_url]

    def parse(self, response):
        projects = json.loads(response.body_as_unicode())
        for p in projects['items']:
            yield {
                'project': p['full_name'],
                'file_urls': [DOWNLOAD_FORMAT % (p['full_name'], p['default_branch'])],
            }

        if self.page % 6 == 0:
            time.sleep(66)  # sleep to avoid HTTP 429

        self.page += 1
        if self.page <= MAX_PAGE:
            self.full_url = QUERY_FORMAT % (QUERY_STAR[self.query], self.page)
            yield scrapy.Request(self.full_url, callback=self.parse)
        else:
            time.sleep(66)  # sleep to avoid HTTP 429
            self.query += 1
            # if self.query >= len(QUERY_STAR):
            if self.query >= 3:
                return
            self.page = 1

            self.full_url = QUERY_FORMAT % (QUERY_STAR[self.query], self.page)
            yield scrapy.Request(self.full_url, callback=self.parse)
