'''
Utility script, no general purpose
'''

from lib.BeautifulSoup import BeautifulSoup
from urllib2 import urlopen
import re

# Scrape repository URLs from GitHub search results

repo_urls = []
soup = BeautifulSoup(urlopen('https://github.com/search?type=Repositories&q=language%3AGo').read())
results = soup.findAll(attrs={'class': 'result'})
pages = soup.findAll(attrs={'class': 'pager_link'})

for result in results:
  repo_urls.append('https://github.com' + str(result.find('a')['href']))

# Result pages to paginate through
page_count = int(pages[-1].string)
print str(page_count) + ' github result pages total'

for i in range(2, page_count+1):
  print 'parsing github result page ' + str(i)
  soup = BeautifulSoup(urlopen('https://github.com/search?type=Repositories&q=language%3AGo&start_value=' + str(i)))
  results = soup.findAll(attrs={'class': 'result'})
  for result in results:
    repo_urls.append('https://github.com' + str(result.find('a')['href']))

output = open('repo_urls/github.txt', 'w')
for url in repo_urls:
  output.write(url + '\n')

# Scrape repository URLs from Google Code search results

repo_urls = set()
repo_pages = set()
soup = BeautifulSoup(urlopen('https://code.google.com/hosting/search?q=label:Go').read())
results = soup.findAll(href=re.compile('/p/.*/'))
pages = soup.find('td', text=re.compile('Results 1'))

for result in results:
  repo_pages.add('https://code.google.com' + str(result['href']) + 'source/checkout')

# Result pages to paginate through
page_count = int(pages.string.split(' ')[-1])/10 + 1
print str(page_count) + ' googlecode result pages total'

for i in range(1, page_count):
  print 'parsing googlecode result page ' + str(i)
  soup = BeautifulSoup(urlopen('https://code.google.com/hosting/search?q=label:Go&start=' + str(i*10)))
  results = soup.findAll(href=re.compile('/p/.*/'))
  for result in results:
    repo_pages.add('https://code.google.com' + str(result['href']) + 'source/checkout')

count = 0
for repo_page in repo_pages:
  count += 1
  print 'parsing repo page ' + str(count)
  soup = BeautifulSoup(urlopen(repo_page).read())
  #print repo_page
  checkoutcmd = soup.findAll('tt', id='checkoutcmd')[0].contents
  tag = re.compile(r'<.*?>')
  checkoutcmd = map(lambda x: tag.sub('', str(x)), checkoutcmd)
  checkoutcmd = ''.join(checkoutcmd)
  #print checkoutcmd
  repo_url = filter(lambda x: x.startswith('http'), checkoutcmd.split(' '))[0]
  repo_url = repo_url.replace('/trunk/', '')
  repo_urls.add(repo_url)

output = open('repo_urls/googlecode.txt', 'w')
for url in repo_urls:
  output.write(url + '\n')

