# -*- coding: utf-8 -*-

import re
import json
import time
from datetime import datetime
from requests.utils import parse_header_links
from urllib.parse import quote
from issue_crawler.items import IssueItem
import scrapy

# Constants of category
CATEGORY_GITHUB = "github"
CATEGORY_GOOGLE_CODE = "google code"

CATEGORIES = (CATEGORY_GITHUB, CATEGORY_GOOGLE_CODE)


# Issue spider is designed to crawl issues with specified keywords of repos.
# Two categories of open source repos are supported: github and google code.
class IssueSpider(scrapy.Spider):
    name = 'issue'
    allowed_domains = [
        'api.github.com', 'www.googleapis.com', 'code.google.com'
    ]
    # start_urls will be set in __init__
    start_urls = []

    def __init__(self,
                 category=CATEGORY_GITHUB,
                 repo=None,
                 keywords=None,
                 username=None,
                 password=None,
                 *args,
                 **kwargs):
        super(IssueSpider, self).__init__(*args, **kwargs)
        if repo is None:
            raise scrapy.exceptions.CloseSpider('repo name should be given.')
        if keywords is None:
            raise scrapy.exceptions.CloseSpider('Keywords should be given.')

        self.category = category
        self.keywords = keywords.split(',')
        self.repo = repo

        if category == CATEGORY_GITHUB and username is not None and password is not None:
            self.http_user = username
            self.http_pass = password

        # define parse according to category here
        self.parse_ = {
            CATEGORY_GITHUB: self.parse_github,
            CATEGORY_GOOGLE_CODE: self.parse_google_code
        }.get(category)

        if self.parse_ is None:
            raise scrapy.exceptions.CloseSpider(
                'Invalid category: must be one of ' + CATEGORIES)

        # for github we set query with keywords,
        # but for google code query is set to the first issue page
        # set start_urls to issues page

        self.page_num = None
        self.page_no = 1
        self.issue_url_constructor = {
            CATEGORY_GITHUB: self.github_issue_url,
            CATEGORY_GOOGLE_CODE: self.google_code_issue_url,
        }.get(category)

        # spider properties
        self.start_urls = [self.next_issue_url()]

    def next_issue_url(self):
        return self.issue_url_constructor(self.page_no)

    # query syntax of github: https://help.github.com/articles/searching-issues/
    # this is a simple version of issue search
    @classmethod
    def construct_github_issue_query(cls, repo, keywords):
        return 'repo:%s is:issue %s' % (repo, ' OR '.join(keywords))

    @classmethod
    def github_issue_base_url_template(cls):
        return "https://api.github.com/search/issues?q=%s&page=%d"

    def github_issue_url(self, page_no):
        return self.github_issue_base_url_template() % (
            quote(self.construct_github_issue_query(self.repo, self.keywords)),
            page_no)

    @classmethod
    def google_code_issue_page_url_template(cls):
        return r"https://www.googleapis.com/storage/v1/b/google-code-archive/o/v2%2Fcode.google.com%2F{0}%2Fissues-page-{1}.json?alt=media&stripTrailingSlashes=false"

    @classmethod
    def google_code_issue_detail_url_template(cls):
        return r"https://www.googleapis.com/storage/v1/b/google-code-archive/o/v2%2Fcode.google.com%2F{0}%2Fissues%2Fissue-{1}.json?alt=media&stripTrailingSlashes=false"

    def google_code_issue_url(self, page_no):
        return self.google_code_issue_page_url_template().format(
            self.repo, page_no)

    def google_code_issue_detail_url(self, issue_id):
        return self.google_code_issue_detail_url_template().format(
            self.repo, issue_id)

    def get_page_num_from_github_issue(self, response):
        links_header = response.headers.get('Link')
        if links_header is None:
            return 1

        links = parse_header_links(str(links_header))
        for link in links:
            if link['rel'] == 'last':
                match = re.search('.+page=(\d+)', link['url'])
                assert match
                page_num = int(match.group(1))

        assert not page_num is None
        return page_num

    def get_page_num_from_google_code_issue(self, response):
        json_response = json.loads(response.body_as_unicode())
        page_num = int(json_response['totalPages'])
        return page_num

    def get_page_num(self, response):
        self.page_num = {
            CATEGORY_GITHUB: self.get_page_num_from_github_issue,
            CATEGORY_GOOGLE_CODE: self.get_page_num_from_google_code_issue,
        }[self.category](response)
        self.logger.info('Num of issue pages is %d' % self.page_num)
        return self.page_num

    def check_github_search_limit(self, response):
        http_headers = response.headers
        limit = int(http_headers['X-RateLimit-Limit'])
        remaining = int(http_headers['X-RateLimit-Remaining'])
        reset = int(http_headers['X-RateLimit-Reset'])
        return limit, remaining, reset

    def parse_github(self, response):
        json_response = json.loads(response.body_as_unicode())

        for issue in json_response['items']:
            issue_item = IssueItem()
            issue_item['id'] = issue['number']
            issue_item['title'] = issue['title']
            issue_item['link'] = issue['html_url']
            yield issue_item

        _, remaining, reset = self.check_github_search_limit(response)
        if remaining == 0:
            time_remaining = (datetime.utcfromtimestamp(reset) -
                              datetime.utcnow()).total_seconds()
            if time_remaining >= 0:
                self.logger.info(
                    'API limit reached, sleep for %f seconds' % time_remaining)
                time.sleep(time_remaining + 2)

    def parse_google_code_issue_page(self, response):
        json_response = json.loads(response.body_as_unicode())

        for issue in json_response['issues']:
            issue_id = int(issue['id'])
            yield scrapy.Request(
                self.google_code_issue_detail_url(issue_id),
                callback=self.parse_google_code_issue_detail)

    def check_keywords(self, paragraph):
        return re.compile(r'\b({0})\b'.format('|'.join(self.keywords)),
                          re.IGNORECASE).search(paragraph)

    def parse_google_code_issue_detail(self, response):
        json_response = json.loads(response.body_as_unicode())

        keywords_found = False
        for comment in json_response['comments']:
            if self.check_keywords(comment['content']):
                keywords_found = True
                break

        if keywords_found:
            issue_item = IssueItem()
            issue_item['id'] = json_response['id']
            issue_item['title'] = json_response['summary']
            issue_item[
                'link'] = "https://code.google.com/archive/p/%s/issues/%d" % (
                    self.repo, issue_item['id'])
            yield issue_item

    def parse_google_code(self, response):
        request = response.request
        if re.search('(.+)issues-page(.+)', request.url):
            generator = self.parse_google_code_issue_page(response)
        else:
            generator = self.parse_google_code_issue_detail(response)
        for item in generator:
            yield item

    def parse(self, response):
        if self.page_num is None:
            self.get_page_num(response)

        for item in self.parse_(response):
            yield item

        if self.page_no < self.page_num:
            self.page_no += 1
            yield scrapy.Request(self.next_issue_url(), callback=self.parse)
