#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.


"""spiral's deb packages crawler module.

spiral's crawler gets packages metadata using germinate and write its output in
CSV format to be imported into Google App Engine datastore.
"""

__author__ = 'stratus@google.com (Gustavo Franco)'

import sys
sys.path.append('/usr/lib/germinate/')
from Germinate import germinator
from Germinate.Archive import tagfile
import apt_pkg
import csv
import gzip
import os
import string
import syck
import urllib2


class Crawler(object):
  """Class containing spiral's crawl methods."""

  def Packages(self, mirror, source_mirror, dists, components, arch):
    """Get Packages files from a given deb repository.

    Args:
      mirror: mirror URI.
      source_mirror: source mirror URI.
      dists: list of distribution releases (ie: hardy).
      components: list of distribution release components (ie: main).
      arch: architecture (ie: amd64)
    """
    germ = germinator.Germinator()
    tagfile.TagFile(mirror, source_mirror).feed(germ, dists, components, arch)

  def Contents(self, contentsuri):
    """Retrieve the list of files in the packages listed in a given Contents.

    Args:
      contentsuri: URI for a Contents index file.

    Returns:
      pkg_to_files = list of dictionaries containing packages and respective
      files.
    """
    pkg_to_files = {}
    header = True

    contentsgz = urllib2.urlopen(contentsuri).read()
    fd = open('contents.tmp', 'w')
    fd.write(contentsgz)
    fd.close()

    contents = gzip.open('contents.tmp').read()

    for line in map(string.strip, contents):
      if line.startswith('FILE'):
        header = False
        continue
      if header:
        continue
      (filepath, location) = string.rsplit(line, maxsplit=1)
      (_, pkg) = location.split('/', maxsplit=1)
      if not pkg_to_files[pkg]:
        pkg_to_files[pkg] = list()
      pkg_to_files[pkg].append(filepath)

    self.contents = pkg_to_files
    return pkg_to_files

  def Votes(self, votes_uri):
    """Retrieve the list of votes for all the packages listed in a given file.

    Args:
      votes_uri: URI for a by_vote (as in popcon).

    Returns:
      votes = list of dictionaries containing packages and respective votes.
    """
    popcon = []
    votes = {}

    by_vote = urllib2.urlopen(votes_uri).read()
    fd = open('votes.tmp', 'w')
    fd.write(by_vote)
    fd.close()

    for line in map(string.strip, open('votes.tmp', 'r')):
      if line.startswith('#') or line.startswith('-'):
        continue
      popcon = string.split(line, maxsplit=4)
      name = popcon[1]
      vote = popcon[3]
      votes[name] = vote

    self.votes = votes
    return votes

  def GetRemovals(self, rmfile='removals.txt'):
    """Retrieve the list of packages to be removed from a given removals file.

    Args:
      rmfile: (optional) Full path for a removals file.

    Returns:
      removals: list of dictionaries containing packages and respective
      versions.
    """
    removals = {}
    for line in map(string.strip, open(rmfile, 'r')):
      if line.startswith('#'):
        continue
      entry = string.split(line, maxsplit=1)
      if len(entry) == 2:
        package = entry[0]
        version = entry[1]
        removals[package] = version

    self.removals = removals
    return removals

  def Writecsv(self, directory=os.getcwd()):
    """Convert Packages file to CSV.

    PackagestoCSV looks for every list of packages in the given directory and
    write a corresponding file in the CSV format.

    Args:
      directory: If set will use the given directory to look for Packages
      files, otherwise will use current directory.

    Returns:
      nothing.
    """
    for dist in dists:
      for component in components:
        packagesfile = directory + '/%s_%s_Packages' % (dist, component)
        parse = apt_pkg.ParseTagFile(open(packagesfile, 'r'))

        fd = open(packagesfile + '.csv', 'wt')
        try:
          while parse.Step():
            writer = csv.writer(fd)

            pkg = parse.Section.get('Package')
            version = parse.Section.get('Version')

            if self.removals[pkg]:
              continue
            # TODO(stratus): no version match yet, fix removals file!
            #if pkgsremovals[pkg] == version:
            #  continue

            maintainer = parse.Section.get('Maintainer')
            # converts from unicode to ascii replacing unknown chars
            maintainer = maintainer.decode('utf8').encode('ascii', 'replace')

            if self.contents[pkg]:
              contents = self.contents[pkg]
            else:
              contents = 'NA'

            if self.votes[pkg]:
              votes = self.votes[pkg]
            else:
              votes = 999999

            writer.writerow((pkg,
                             parse.Section.get('Description'),
                             version,
                             parse.Section.get('Architecture'),
                             maintainer,
                             parse.Section.get('Source', pkg),
                             parse.Section.get('Priority'),
                             parse.Section.get('Section'),
                             parse.Section.get('Filename'),
                             votes, contents, mirror, dist, component, arch))
        finally:
          fd.close()


if __name__ == '__main__':
  yamlfile = open('crawler.yaml', 'r')

  for details in syck.load_documents(yamlfile.read()):
    for conf in details:
      mirror = conf['spiral']['mirror']
      source_mirror = conf['spiral']['source_mirror']
      dists = conf['spiral']['dists']
      components = conf['spiral']['components']
      arch = conf['spiral']['arch']
      contentsuri = conf['spiral']['contentsuri']
      votes_uri = conf['spiral']['votesuri']

      crawl = Crawler()

      print 'Fetching %s / %s / %s packages from %s ...' % (dists, components,
                                                            arch, mirror)
      crawl.Packages(mirror, source_mirror, dists, components, arch)

      print 'Computing removals.'
      crawl.GetRemovals()

      print 'Fetching contents file from %s ...' % contentsuri
      crawl.Contents(contentsuri)

      print 'Fetching votes file from %s ...' % votes_uri
      crawl.Votes(votes_uri)

      print 'Writing CSV file(s).'
      crawl.Writecsv()
