'''
Checks out source code repositories from seeds specified in defaults.cfg.
Stores code in dump specified in config file.
'''

# lambda can't take 'print' as function
import sys
# Superior to ConfigParser
from lib.configobj import ConfigObj
# HTML parsing
from lib.BeautifulSoup import BeautifulSoup
from urllib2 import urlopen
import re
# For executing rcs clones/checkouts
import os
from repohandlers import RepoHandler
# For handling urls without schemes
from urlparse import urlparse
import logging
import logging.config

# Read config information
cfg = ConfigObj('tools.cfg')
seeds = cfg['collect']['seeds']
repo_urls = cfg['collect']['repo_urls']
repos = cfg['collect']['repos']
logcfg = cfg['collect']['logcfg']
dump = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../sources')) + '/'

# Logging - debug, info, warn, error, critical
logging.config.fileConfig(logcfg)
logger = logging.getLogger('root')

# Keep track of visited pages
encountered_urls = set()
def visit(url, func):
  # Avoid http://.../path and http://.../path/
  url = url.rstrip('/')
  if (urlparse(url).scheme == ''):
    url = 'http://' + url
  if url not in encountered_urls:
    func(url)
    encountered_urls.add(url)

# Get HTML of each seed page
soups = map(lambda url: BeautifulSoup(urlopen(url).read()), seeds)

# Create regex for whether/not to checkout link
repo_regex = re.compile('|'.join(repos))
handler = RepoHandler(dump)
if not handler.supports(repos):
  raise Exception, 'Handler does not support all repos: ' + str(repos)
def checkout(link):
  logger.debug('Checking ' + link)
  cmd = 'cd ' + dump + '/; '
  for repo in repos:
    if repo in link:
      cmd += handler.getCommand(link)
      logger.debug('Cmd returned: ' + cmd)
      break
  sts = os.system(cmd)
  logger.debug('Exit status: ' + str(sts))

# For each page, visit appropriate links and clone/update repositories
for soup in soups:
  anchors = soup.findAll('a')
  links = map(lambda a: a['href'], anchors)
  #---------------------------------------------------
  # Hack :( since Go packages page has code.google.com
  # in href which does tell whether SVN or Hg repo
  #---------------------------------------------------
  links.extend(map(lambda a: a.string, anchors))
  links = filter(lambda l: l!= None and repo_regex.search(l), links)
  for link in links:
    visit(link, checkout)

# Also visit scraped repo urls
for file in repo_urls:
  for line in open(file, 'r'):
    visit(line.strip(), checkout)
