#!/usr/bin/python2.5
# Copyright (c) 2008 MalFind

__author__ = "mavrommatis@gmail.com (Panayiotis Mavrommatis)"

import logging
import md5
import urlparse
import url_lookup
from google.appengine.api import urlfetch
from google.appengine.ext import db
from BeautifulSoup import BeautifulSoup
from model import models

''' Fetches a URL, parses it, compares urls it extracts with the SB API.
'''

# Globals are cached between requests on each web server
TAGS = ['script', 'iframe', 'object', 'frame']

class Assessor(object):
  '''.'''

  def DigestLookup(self, digest):
    ''' Returns true iff digest is in the blacklist.'''
    logging.info('Looking up ' + digest)
    result = models.Digest.gql("WHERE hash = :1", digest).get()
    if result != None:
      logging.info("*** MATCH ***")
    return result != None

  def MatchesSB(self, url):
    ''' Returns true iff the url is in the Safe Browsing blacklist.'''
    for path in url_lookup.LookupUrlGenerator().Generate(url):
      digest = md5.new(path).hexdigest()
      if self.DigestLookup(digest):
        return True

    return False


  def Parse(self, url, html):
    '''Parses HTML and gathers up all TAGS into findings.'''
    logging.info("Parsing Document ...")
    findings = []
    soup = BeautifulSoup(html)
    for tag in TAGS:
      matches = soup.findAll(tag)
      if matches:
        for match in matches:
          finding = models.Finding(sort=tag, data=unicode(match))
          if match.has_key('src'):
            finding.url = urlparse.urljoin(url, match['src'])
          findings.append(finding)

    return findings

  def ParseUrl(self, url):
    '''Returns a valid url from the given string, or None.'''
    if url.find('://') == -1:
      url = 'http://' + url
    u = urlparse.urlparse(url)
    if u.scheme != 'http' or u.netloc == '':
      return None
    return u.geturl()

  def Assess(self, url):
    '''.'''
    logging.info("Request starting ...")
    url = self.ParseUrl(url)
    assessment = models.Assessment(top_blacklisted=False)
    errors = []
    if not url:
      errors.append("Illformed url.")
      return (assessment, [])

    assessment.url = url
    # Compare it against Google's blacklist
    if self.MatchesSB(url):
      assessment.top_blacklisted = True
    # fetch the url
    logging.info("Fetching Remote URL...")
    httpresponse = ""
    try:
      httpresponse = urlfetch.fetch(url, allow_truncated=True)
      assessment.response_body = httpresponse.content
      if httpresponse.content_was_truncated:
        errors.append("Error fetching %s: Too Large" % url)
    except urlfetch.DownloadError:
      errors.append("Could not download %s" % url)
      return (assessment, [])

    # parse the body
    # TODO assessment.findings = [f.key() for f in findings]
    findings = self.Parse(url, assessment.response_body)

    # Mark findings that match our blacklist.
    contains_blacklist = False
    for finding in findings:
      if finding.url:
        if self.MatchesSB(finding.url):
          finding.url_blacklisted = True
          contains_blacklist = True

    logging.info('Adding Query to DB...')
    query = models.Query(url=url,
        matches_blacklist=(assessment.top_blacklisted), 
        contains_blacklist=contains_blacklist)
    query.put()
    logging.info('Query added.')
    
    # TODO: Add assessment, findings to the DB, return a reference
    return (assessment, findings)
