#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ResoucesPrj.py
# Author: gfcocos

search_ = 'lua'
language = 'c++'

import os,re,sys,math
import requests
import json
import re
import threading
import time
import urllib2
from   urllib     import urlopen
from   Queue      import Queue
from   threading  import Thread
from   time       import strftime
from   git        import Repo


threadNum=10         #线程数量
threadList=[]        #线程列表
threadList2=[]       #线程列表



#===========================工程仓库总数量=============================
g_total_repositories     = 0               #仓库总数量
g_current_repositories   = 0               #当前已经下载中的数量
global g_mutex_current_repositories        #互斥信号量
g_repositories = []                        #仓库信息列表





#日志文件
logfilename = search_+'_log%s.txt'%(strftime('%y_%m_%d_%H_%M_%S'))
if os.path.isfile(logfilename):
   os.remove(logfilename)
logfile = open(logfilename,'a');
logfile.write("===========Start Clone==============\n")

# https://github.com/search?p=2&q=game&ref=cmdform&type=Repositories

def url_user_agent(url):
    #设置使用代理
    proxy = {'http':'127.0.0.1:8087'}
    proxy_support = urllib2.ProxyHandler(proxy)
    #proxy_auth_handler = urllib2.HTTPBasicAuthHandler()
    #proxy_auth_handler.add_password('realm', 'host', 'username', 'password')
    #opener = urllib2.build_opener(proxy_support,urllib2.HTTPHandler(debuglevel=1))
    opener = urllib2.build_opener(proxy_support)
    urllib2.install_opener(opener)

    #添加头信息，模仿浏览器抓取网页，对付返回403禁止访问的问题
    # i_headers = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
    i_headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.48'}
    req = urllib2.Request(url,headers=i_headers)
    html = urllib2.urlopen(req)
    if url == html.geturl():
        doc = html.read()
        return doc
    else:
        print 'urllib2.geturl error'
    return
    
    # try:
    #     print(url)
    #     res=urlopen(url)
    # except Exception,e:
    #     print '[-] [%s] [Error] [%s]'
    # if res.getcode()==200:
    #     html=res.read()
    #     return html
    # else:
    #     return

def get_repos_adders_with_page(language,page_index,search_repository,forks):
  global g_repositories
  global mutex

  repositories_url = []  
  download_url = 'https://github.com/search?l=%s&p=%s&q=%s+forks:>%s&ref=searchresults&type=Repositories'%(language,page_index,search_repository,forks)
  # <h3 class="repolist-name">
  #   <a href="/okamstudio/godot" class="css-truncate css-truncate-target">okamstudio/godot</a>
  # </h3>
  print download_url
  html_date = []
  while True:
    try:
      html_date = url_user_agent(download_url)  
    except Exception, e:
      print e
    else:
      break
    finally:
      pass

  if html_date:
    print html_date
    find_reps = re.findall('<a href=".*" class="css-truncate css-truncate-target">.*</a>',html_date)
    for it in find_reps:
      # https://github.com/NYTimes/objective-c-style-guide.git
      repos = re.sub('<a href="|" class.*','',it)
      dic = {}
      dic['addr'] = 'https://github.com'+repos+'.git'
      dic['local_dir'] = '.'+repos
      repositories_url.append(dic)
    # 取得锁
    mutex.acquire()
    g_repositories += repositories_url
    # 释放锁
    mutex.release()
  else:
    print '[*] [%s]'%strftime('%X')+' [download_url %s error]'%download_url

def getpages_and_totalrepositories(search_repository,language='c++',forks='10'):
  # https://github.com/search?l=C%2B%2B&q=game+forks%3A%3E10&ref=searchresults&type=Repositories
  # <span class="counter">68</span>
  global g_total_repositories
  download_url = 'https://github.com/search?l=%s&q=%s+forks:>%s&ref=searchresults&type=Repositories'%(language,search_repository,forks)
  print download_url
  try:
    r = url_user_agent(download_url)
  except Exception, e:
    print e
  else:
    obj = re.search('<span class="counter">\d+</span>',r)
    g_total_repositories = int(re.sub('<span class="counter">|</span>','',obj.group()))
    return g_total_repositories/10



class WorkThread(Thread):
    def __init__(self,q,language,search_name,forks):
        Thread.__init__(self)
        self.q=q
        self.language=language
        self.search_repos = search_name
        self.forks = forks
    def run(self):
        while True:
            if self.q.empty()==True:
                break
            get_repos_adders_with_page(self.language,self.q.get(),self.search_repos,self.forks)

 
class WorkThread2(Thread):
    '''
    work thread
    '''
    def __init__(self,q):
        Thread.__init__(self)
        self.q=q
    def run(self):
        global g_mutex_current_repositories
        global g_current_repositories
        while True:
            if self.q.empty()==True:
                break
            item_ = self.q.get()
            #更改当前索引值
            g_mutex_current_repositories.acquire()
            g_current_repositories += 1
            #输出调试信息
            # print '[*] [%s] [current:%d,total:%d]\n[clone %s]'%(strftime('%X'),g_current_repositories,g_total_repositories,item_['git_url'])
            g_mutex_current_repositories.release()
            try:
              print item_
              Repo.clone_from(item_['addr'],item_['local_dir'])
            except Exception, e:
              print e
              # logfile.write(item_['git_url']+'\t'+item_['repo_dir']+'\n')
            else:
              pass
            finally:
              pass
            #print '[*] [%s] [clone %s] [end:%s]'%(strftime('%X'),item_['git_url'],r)
def key_for_line(line):
    return line['forks']

def main():
    global mutex
    global g_mutex_current_repositories
     # 创建一个锁
    mutex = threading.Lock()
    
    #创建当前索引锁
    g_mutex_current_repositories = threading.Lock()

    '''
    main function
    '''     
    print '[+] [%s] [Start]'%strftime('%X')
    print '[*] [%s] [link email:gfcocos@gmail.com]'%strftime('%X')
    print '[*} [%s] [download the repositories...]'%(strftime('%X'))
    while True:
      pages = getpages_and_totalrepositories(search_,language)
      if pages:
        break
      else:
        time.sleep(5)
    print pages
    q=Queue(maxsize=0)
    for i in xrange(1,2):
      q.put(i)

    print '[*] [%s] [get pages succeed]'%strftime('%X')

    for i in xrange(threadNum):
        t=WorkThread(q,search_,language,5)
        threadList.append(t)
    
    for i in threadList:
        i.start()
    
    for i in threadList:
        i.join()

    print len(g_repositories)

    # threadList = []
    #download .... respons
    q2 = Queue(maxsize=0)
    for iten in g_repositories:
      q2.put(iten)

    for i in xrange(100):
        t=WorkThread2(q2)
        threadList2.append(t)
    
    for i in threadList2:
        i.start()
    
    for i in threadList2:
        i.join()

    print '\n[+] [%s] [End] [All Done!]'%strftime('%X')
    
    logfile.write("===========Start Clone==============\n")
    logfile.close()

    #genarat html doc
    # html_file_name = search_+'%s.html'%(strftime('%y_%m_%d_%H_%M_%S'))
    # f=open(html_file_name,'ab')
    # f.write('<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />\n')
    # f.write('<title>Freebuf Tools List</title>\n')
    # f.write('<center><h1><b>Freebuf Tools List</b></h1>\n'+'Time:'+str(strftime("%Y-%b-%d %X"))+'  Count:'+str(len(g_repositories))+'</center><hr/>\n<h5>\n')
    # sorted(g_repositories,key=key_for_line)
    # for item in g_repositories:
    #   f.write(unicode(item['description']).encode('utf-8') +'\n</br>\n')
    #   f.write(unicode(item['forks_watchers']).encode('utf-8') +'\n</br>\n')
    # f.write('</h5><hr/><center><a href="http://hi.baidu.com/l34rn">Powered By L34Rn</a></center>')
    # f.close()

if __name__=='__main__':
    main()
