import requests
import json
import datetime
import time
import math
import os
from zipfile import is_zipfile

headers = {
		"Authorization": "token 46a8dd2ec922c9a006368c6756d46c9bba40c6d0"
	}

def get_download_url_of_oneday(datestr):
	"""
	获取某一天的所有下载链接
	:param datestr:查询的日期，形如2018-10-24
	:return:
	"""
	url = 'https://api.github.com/search/repositories?q=language:java stars:>=1 created:%s..%s&per_page=100' % (
	datestr,  datestr)
	while True:
		r = requests.get(url, headers=headers)
		# 访问api的次数用尽则休眠10分钟
		if r.status_code == 403:
			time.sleep(60 * 10)
		else:
			res = r.json()
			total_count = res['total_count']
			print('Total count: ', total_count)
			with open('Jsons/1__%s.json' %datestr, 'w', encoding='utf-8') as fw:
				json.dump(res, fw, indent=4)
			if total_count > 100:
				top = math.ceil(total_count / 100) + 1
				for i in range(2, top if top <= 11 else 11):
					page_url = '%s&page=%d'%(url, i)
					print(page_url)
					while True:
						r = requests.get(page_url, headers=headers)
						# 访问api的次数用尽则休眠10分钟
						if r.status_code == 403:
							time.sleep(60 * 10)
						else:
							res = r.json()
							with open('Jsons/%d__%s.json' %(i, datestr), 'w', encoding='utf-8') as fw:
								json.dump(res, fw, indent=4)
							break
			break


def get_download_url():
	"""
	获取github上符合条件的版本库的信息，包括下载地址等
	:return:
	"""
	time_delta = datetime.timedelta(days=1)
	start_date = datetime.date(2009,1,1)
	end_date = datetime.date(2016,12,31)
	d = start_date
	while d <= end_date:
		str_time = d.strftime(format='%Y-%m-%d')
		get_download_url_of_oneday(str_time)
		print('%s download finished.' %str_time)
		d += time_delta

def download_repo(url, name):
	'''
	下载一个版本库的zip文件到本地
	:param url:下载地址
	:param name 版本库名字
	:return:
	'''
	file_name = r"E:\ZipRepos" + os.sep + name.replace("/","+") + ".zip"
	if is_zipfile(file_name):
		print("%s has existed." %name)
	else:
		while True:
			r = requests.get('%s/zipball' %url, stream=True)
			if r.status_code == 403:
				print("Rate limit, sleep 20 minute.")
				time.sleep(60 * 20)
			else:
				# 过滤大于10M的文件
				if int(r.headers.get('Content-Length', 0)) > 1024 * 1024 * 10:
					print("Filter %s because its size greater than 10M." %name)
					break
				f = open(file_name, 'wb')
				for chunk in r.iter_content(chunk_size=1024):
					if chunk:
						f.write(chunk)
				f.flush()
				f.close()
				print('Download from %s and write to E:\ZipRepos/%s done.' % (url, name))
				break


def download_repos(url_and_name, start_index, end_index):
	"""
	批量下载版本库
	:param url_and_name: 下载地址和版本库名字对应的字典
	:param start_index: 从第几个文件开始下载
	:param end_index: 到第几个文件结束
 	:return:
	"""
	file_count = len(url_and_name)
	for i,url_name in enumerate(url_and_name[start_index:end_index]):
		download_repo(url_name[0], url_name[1])
		print("The %d / %d zip file has been downloaded." %(i + start_index, file_count))
	print('Download finished.')

def analyze_repositories_json(path):
	"""
	解析使用 get_download_url 函数获取到的json，得到下载地址和版本库名字
	:return:下载地址和版本库名字
	"""
	res = []
	with open(path, 'r') as fr:
		j = json.load(fr, encoding='utf-8')
		try:
			for item in j['items']:
				res.append((item['url'] ,item['full_name'].replace('/', '__')))
		except:
			pass
	return res

def analyze_repositories_jsons():
	res = []
	file_list = os.listdir('Jsons')
	file_count = len(file_list)
	for i,file in enumerate(file_list):
		path = os.path.join('Jsons', file)
		res.extend(analyze_repositories_json(path))
		print('Finished update from: %s, %d / %d' %(file, i, file_count))
	with open(os.path.join('RepoJson', 'all_repo.json'), 'w') as fw:
		json.dump(res, fw, indent=4)



if __name__ == '__main__':
	with open(os.path.join('RepoJson', 'all_repo.json'), 'r') as fr:
		url_and_name = json.load(fr)
		download_repos(url_and_name, start_index = 0, end_index=len(url_and_name))
