#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ResoucesPrj.py
# Author: gfcocos

search_ = 'game'
# language = 'python'

import os,re,sys,math
import requests
import json
import re
import threading
import time
import urllib2
from   urllib     import urlopen
from   Queue      import Queue
from   threading  import Thread
from   time       import strftime
from   git        import Repo


threadNum=10         #线程数量
threadList=[]        #线程列表
threadList2=[]       #线程列表



#===========================工程仓库总数量=============================
g_total_repositories     = 0               #仓库总数量
g_current_repositories   = 0               #当前已经下载中的数量
global g_mutex_current_repositories        #互斥信号量
g_repositories = []                        #仓库信息列表





#日志文件
logfilename = search_+'_log%s.txt'%(strftime('%y_%m_%d_%H_%M_%S'))
if os.path.isfile(logfilename):
   os.remove(logfilename)
logfile = open(logfilename,'a');
logfile.write("===========Start Clone==============\n")



def url_user_agent(url):
    #设置使用代理
    
    proxy = {'http':'127.0.0.1:8087'}
    proxy_support = urllib2.ProxyHandler(proxy)
    #proxy_auth_handler = urllib2.HTTPBasicAuthHandler()
    #proxy_auth_handler.add_password('realm', 'host', 'username', 'password')
    #opener = urllib2.build_opener(proxy_support,urllib2.HTTPHandler(debuglevel=1))
    opener = urllib2.build_opener(proxy_support)
    urllib2.install_opener(opener)

    #添加头信息，模仿浏览器抓取网页，对付返回403禁止访问的问题
    # i_headers = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
    i_headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.48'}
    req = urllib2.Request(url,headers=i_headers)
    html = urllib2.urlopen(req)
    if url == html.geturl():
        doc = html.read()
        return doc
    else:
        print 'urllib2.geturl error'
    return
    
    # try:
    #     res=urlopen(url)
    # except Exception,e:
    #     print '[-] [%s] [Error] [%s]'
    # if res.getcode()==200:
    #     html=res.read()
    #     return html
    # else:
    #     return

def git_clone_url(search_repository,item,language=''):
  global g_repositories
  global mutex
  download_url = ''
  repositories_url = []  
  if language:
      download_url = 'https://api.github.com/search/repositories?q='+search_repository+'+language:'+language+'&page='+str(item)
  else:
      #https://api.github.com/search/repositories?q=tetris+sort:stars&order=desc
      download_url = 'https://api.github.com/search/repositories?q='+search_repository+'+sort:stars'+'&page='+str(item)
  doc = []
  while True:
    try:
      doc = url_user_agent(download_url)  
    except Exception, e:
      print e
    else:
      pass
    finally:
      pass
    
    if doc:
      break
    else:
      print '[*] [%s]'%strftime('%X')+' [download_url %s error]'%download_url  
      time.sleep(5) # 休眠5秒
  if doc:
    repoItem = json.loads(doc)
    for item in repoItem['items']:
      dictionary = {}
      dictionary['git_url']  = item['clone_url']
      dictionary['language'] = item['language']
      dictionary['forks']    = item['forks_count']
      if(item['language']):
        dictionary['language']    = item['language']
      else:
        dictionary['language']    = 'None'

      dictionary['repo_dir']      = './'+search_repository+'/'+dictionary['language']+'/'+str(item['forks_count']).zfill(5)+re.sub('/','-',re.sub('https://','',item['clone_url']))
      dictionary['description']   = '<dt><a href="%s" target="_blank">%s</a></dt>'%(dictionary['repo_dir'],item['description'])
      dictionary['forks_watchers']         = '<dt>forks:%s watchers:%s</dt>'%(str(item['forks_count']),str(item['watchers']))
      repositories_url.append(dictionary)
    print '[*] [%s]'%strftime('%X')+' [download_url %s]'%download_url+'[len:%s]'%str(len(repositories_url))
    # 取得锁
    mutex.acquire()
    g_repositories += repositories_url
    # 释放锁
    mutex.release()
  else:
    print '[*] [%s]'%strftime('%X')+' [download_url %s error]'%download_url

def init(search_repository,language=''):
  global g_total_repositories
  download_url = ''  
  if language:
    download_url = 'https://api.github.com/search/repositories?q='+search_repository+'+language:'+language
  else:
    download_url = 'https://api.github.com/search/repositories?q='+search_repository+'+sort:stars'
  print download_url
  try:
    r = url_user_agent(download_url)
  except Exception, e:
    print e
  else:
    print '[*] [error]'
  finally:
    print '[*] [error]'
  
  if r:
    repoItem = json.loads(r)
    g_total_repositories = repoItem['total_count']
    return math.ceil(repoItem['total_count']/30.0)
  else:
    print '[*] [%s]'%strftime('%X')+' [download_url %s error]'%download_url    



class WorkThread(Thread):
    '''
    work thread
    '''
    def __init__(self,q,language):
        Thread.__init__(self)
        self.q=q
        self.language=language
    def run(self):
        while True:
            if self.q.empty()==True:
                break
            git_clone_url(self.language,self.q.get())

 
class WorkThread2(Thread):
    '''
    work thread
    '''
    def __init__(self,q):
        Thread.__init__(self)
        self.q=q
    def run(self):
        global g_mutex_current_repositories
        global g_current_repositories
        while True:
            if self.q.empty()==True:
                break
            item_ = self.q.get()
            #更改当前索引值
            g_mutex_current_repositories.acquire()
            g_current_repositories += 1
            #输出调试信息
            print '[*] [%s] [current:%d,total:%d]\n[clone %s]'%(strftime('%X'),g_current_repositories,g_total_repositories,item_['git_url'])
            g_mutex_current_repositories.release()
            try:
              Repo.clone_from(item_['git_url'], item_['repo_dir'])
            except Exception, e:
              logfile.write(item_['git_url']+'\t'+item_['repo_dir']+'\n')
            else:
              pass
            finally:
              pass
            #print '[*] [%s] [clone %s] [end:%s]'%(strftime('%X'),item_['git_url'],r)
def key_for_line(line):
    return line['forks']

def main():
    global mutex
    global g_mutex_current_repositories
     # 创建一个锁
    mutex = threading.Lock()
    
    #创建当前索引锁
    g_mutex_current_repositories = threading.Lock()

    '''
    main function
    '''     
    print '[+] [%s] [Start]'%strftime('%X')
    print '[*] [%s] [link email:gfcocos@gmail.com]'%strftime('%X')
    print '[*} [%s] [download the repositories...]'%(strftime('%X'))
    while True:
      pages = init(search_)
      if pages:
        break
      else:
        time.sleep(5)

    q=Queue(maxsize=0)
    for i in xrange(1,int(pages)+1):
      q.put(i)

    print '[*] [%s] [get pages succeed]'%strftime('%X')

    for i in xrange(threadNum):
        t=WorkThread(q,search_)
        threadList.append(t)
    
    for i in threadList:
        i.start()
    
    for i in threadList:
        i.join()

    print len(g_repositories)

    # threadList = []
    #download .... respons
    q2 = Queue(maxsize=0)
    for iten in g_repositories:
      q2.put(iten)

    for i in xrange(100):
        t=WorkThread2(q2)
        threadList2.append(t)
    
    for i in threadList2:
        i.start()
    
    for i in threadList2:
        i.join()

    print '\n[+] [%s] [End] [All Done!]'%strftime('%X')
    
    logfile.write("===========Start Clone==============\n")
    logfile.close()

    #genarat html doc
    html_file_name = search_+'%s.html'%(strftime('%y_%m_%d_%H_%M_%S'))
    f=open(html_file_name,'ab')
    f.write('<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />\n')
    f.write('<title>Freebuf Tools List</title>\n')
    f.write('<center><h1><b>Freebuf Tools List</b></h1>\n'+'Time:'+str(strftime("%Y-%b-%d %X"))+'  Count:'+str(len(g_repositories))+'</center><hr/>\n<h5>\n')
    sorted(g_repositories,key=key_for_line)
    for item in g_repositories:
      f.write(unicode(item['description']).encode('utf-8') +'\n</br>\n')
      f.write(unicode(item['forks_watchers']).encode('utf-8') +'\n</br>\n')
    f.write('</h5><hr/><center><a href="http://hi.baidu.com/l34rn">Powered By L34Rn</a></center>')
    f.close()


if __name__=='__main__':
    main()
