# -*- coding: utf-8 -*-
import http.client
import json
import time
import random
import socket
import urllib.request as api_request
import urllib.error as api_error

from crawler.tools import get_github_access_token, get_random_user_agent


class GithubRepoCrawler:

    def __init__(self, repo_url):
        self.repo_url = repo_url
        self.repo_user = repo_url.split("/")[-2]
        self.repo_name = repo_url.split("/")[-1]

    def crawl_repo_info(self):
        # 构建API请求
        access_token = get_github_access_token()
        url = f"https://api.github.com/repos/{self.repo_user}/{self.repo_name}"
        print(url)

        # 调用API
        header = {'User-Agent': get_random_user_agent(), 'Authorization': f'token {access_token}'}
        r = api_request.Request(url, headers=header)
        try:
            response = api_request.urlopen(r, timeout=30)
            repo_info = json.load(response)
        except api_error.HTTPError as e:
            if e.code == 429:
                print(f"错误429: 爬取过快，尝试重新爬取Repo信息 {self.repo_url}")
                time.sleep(10)
                return self.crawl_repo_info()
            else:  # Repo不存在
                print(f"错误{e.code}: 无法通过API请求获取Repo信息 {self.repo_url}")
                return None
        except socket.error:  # 网速问题
            print(f"出错啦! 尝试重新爬取Repo信息 {self.repo_url}")
            time.sleep(10)
            return self.crawl_repo_info()

        # 解析json
        return repo_info

    def crawl_commits_info(self):
        # 初始化
        access_token = get_github_access_token()
        commits_info_list = []

        # 分页爬取commits(目前暂定10页)
        page = 1
        while True:
            # 构建请求
            url = f"https://api.github.com/repos/{self.repo_user}/{self.repo_name}/commits" \
                  f"?page={page}&per_page=100"
            print(url)
            header = {'User-Agent': get_random_user_agent(), 'Authorization': f'token {access_token}'}

            # 调用API获取信息
            r = api_request.Request(url, headers=header)
            try:
                response = api_request.urlopen(r, timeout=30)
                commits_list = json.load(response)
            except api_error.HTTPError:
                print(f"网络异常，尝试重新爬取Commits")
                time.sleep(10)
                continue
            except api_error.URLError:
                print(f"爬取过快，尝试重新爬取Commits")
                time.sleep(10)
                continue
            except http.client.IncompleteRead:
                print(f"数据读取异常，尝试重新爬取Commits")
                time.sleep(10)
                continue
            except http.client.RemoteDisconnected:
                print(f"连接异常，尝试重新爬取Commits")
                time.sleep(10)
                continue
            except TimeoutError:
                print(f"超时异常，尝试重新爬取Commits")
                time.sleep(10)
                continue

            # 分页获取完毕结束循环
            if len(commits_list) == 0:
                break
            commits_info_list.extend(commits_list)
            page += 1

            # 页数一定结束循环
            if page == 11:
                break

        # 返回
        print(f"{self.repo_url} commits数目: {len(commits_info_list)}（最大暂设为1000）")
        return commits_info_list

    def crawl_issues_info(self):
        # 初始化
        access_token = get_github_access_token()
        issues_info_list = []

        # 分页爬取open_issues
        page = 1
        while True:
            # 构建请求
            url = f"https://api.github.com/repos/{self.repo_user}/{self.repo_name}/issues" \
                  f"?page={page}&per_page=100"
            print(url)
            header = {'User-Agent': get_random_user_agent(), 'Authorization': f'token {access_token}'}

            # 调用API获取信息
            r = api_request.Request(url, headers=header)
            try:
                response = api_request.urlopen(r, timeout=30)
                issues_list = json.load(response)
            except api_error.HTTPError:
                print(f"网络异常，尝试重新爬取Issues")
                time.sleep(10)
                continue
            except api_error.URLError:
                print(f"爬取过快，尝试重新爬取Issues")
                time.sleep(10)
                continue
            except http.client.IncompleteRead:
                print(f"数据读取异常，尝试重新爬取Issues")
                time.sleep(10)
                continue
            except http.client.RemoteDisconnected:
                print(f"连接异常，尝试重新爬取Commits")
                time.sleep(10)
                continue
            except TimeoutError:
                print(f"超时异常，尝试重新爬取Commits")
                time.sleep(10)
                continue

            # 分页获取完毕结束循环
            if len(issues_list) == 0:
                break
            issues_info_list.extend(issues_list)
            page += 1

        # 返回
        print(f"{self.repo_url} issues数目: {len(issues_info_list)}")
        return issues_info_list
