import scrapy
import time


URL_QUERY = []
URL_QUERY.append(['https://github.com/search?l=&p=', '&q=language%3AJava+stars%3A%3E700&ref=advsearch&type=Repositories'])
URL_QUERY.append(['https://github.com/search?p=', '&q=language%3AJava+stars%3A"398+..+735"&ref=searchresults&type=Repositories'])
DOWNLOAD_FORMAT = "https://codeload.github.com/%s/zip/master"
MAX_PAGE = 100


class githubSpider(scrapy.Spider):
    name = "github"
    allowed_domains = ["github.com"]

    query = 0
    url_page = URL_QUERY[query]
    page = 1
    full_url = url_page[0] + str(page) + url_page[1]

    start_urls = [full_url]

    def parse(self, response):
        project = response.css(".repo-list-name a").xpath("text()")
        for p in project:
            yield {
                'project': p.extract(),
                'file_urls': [DOWNLOAD_FORMAT % p.extract()],
            }

        if self.page % 7 == 0:
            time.sleep(66)  # sleep to avoid HTTP 429

        self.page += 1
        if self.page <= MAX_PAGE:
            self.full_url = self.url_page[0] + str(self.page) + self.url_page[1]
            yield scrapy.Request(self.full_url, callback=self.parse)
        else:
            time.sleep(66)  # sleep to avoid HTTP 429
            self.query += 1
            if self.query >= len(URL_QUERY):
                return
            self.page = 1
            self.url_page = URL_QUERY[self.query]

            self.full_url = self.url_page[0] + str(self.page) + self.url_page[1]
            yield scrapy.Request(self.full_url, callback=self.parse)


# URL_FORMAT = "https://github.com/search?l=&p=%d&q=language%3AJava+stars%3A%3E700&ref=advsearch&type=Repositories"
# DOWNLOAD_FORMAT = "https://codeload.github.com/%s/zip/master"


# class githubSpider(scrapy.Spider):
#     name = "github"
#     allowed_domains = ["github.com"]
#     temp_urls = []
#     for page in range(11, 16):
#         temp_urls.append("https://github.com/search?l=&p=" + str(page) +
#                          "&q=language%3AJava+stars%3A%3E700&ref=advsearch&type=Repositories")
#     start_urls = temp_urls

#     def parse(self, response):
#         project = response.css(".repo-list-name a").xpath("text()")
#         file_urls = []
#         for p in project:
#             file_urls.append(DOWNLOAD_FORMAT % p.extract())
#             yield {
#                 'project': p.extract(),
#                 'file_urls': [DOWNLOAD_FORMAT % p.extract()],
#             }
