#!/usr/bin/python2.4
#
#Copyright 2009 Google Inc.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#

"""Module that implements HTMLParser to extract textual content from html."""


import formatter
import htmllib
import logging
import re
from google.appengine.api import urlfetch
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import base_request_handler
import constants
import digital_content_wizard_exceptions


class TextParser(htmllib.HTMLParser):
  """This class implements HTMLParser to extract text from html."""

  def __init__(self, doc_id, doc_type):
    """The constructor.

    Args:
      doc_id: Document Id string, whose content needs to be extracted.
      doc_type: String representing type of document DOC/SPREADSHEET.
    """
    self.document_id = doc_id
    self.doc_type = doc_type
    htmllib.HTMLParser.__init__(self, formatter.NullFormatter())

  def ConstructUrl(self):
    """Constructs url from the specified document id.

    Returns:
      Url string.
    """
    if not self.document_id:
      logging.error('Document Id not specified.')

    if self.doc_type == constants.CONTENT_TYPES[3]:
      prefix_url = 'http://docs.google.com/View?id='
      suffix_url = '&hl=en&hgd=1'
    elif self.doc_type == constants.CONTENT_TYPES[5]:
      prefix_url = 'http://spreadsheets.google.com/pub?key='
      suffix_url = ''
    return prefix_url + self.document_id + suffix_url

  def FetchUrl(self, url):
    """Fetches the url content.

    Args:
      url: Url string.

    Returns:
      HTML Content string.
    """
    try:
      logging.debug('Trying to fetch url: ' + url)
      result = urlfetch.fetch(url)
      if result.status_code == 200:
        logging.debug('Url fetched successfully')
        return result.content
      logging.error('None 200 HTTP response.')
      return None
    except urlfetch.Error, e:
      logging.error('Unable to fetch the document url.')
      logging.error(e)
      return None

  def Init(self):
    """Initializes the member variables required during text parsing."""
    self.in_body = False
    self.in_title = False
    self.in_non_text = False
    self.valid = True
    self.title = ''
    self.output_text = ''
    self.footer_started = False

  def Process(self):
    """Processes the HTML content."""
    document_content = self.FetchUrl(self.ConstructUrl())
    if not document_content:
      raise digital_content_wizard_exceptions.InvalidDocumentException(
          'Could not fetch writely document content', self.document_id)
    try:
      logging.debug('Starting the parsing')
      self.Init()
      self.feed(document_content)
    except htmllib.HTMLParseError, e:
      logging.error(e)
      raise digital_content_wizard_exceptions.InvalidDocumentException(
          'Could not parse writely document content', self.document_id)

  def handle_data(self, data):
    """Handles data segment. Only required data is copied to output_text.

    Args:
      data: Data string.
    """
    data = data.strip()
    if self.in_body and data:
      if not self.in_non_text and not self.footer_started:
        self.output_text += data
    if self.in_title and data:
      # Check if title contains page not found.
      self.title = data
      result = re.search(r'page not found', data.lower())
      if result:
        # Invalid document id specified.
        logging.error('Page Not Found error')
        self.valid = False

  def handle_starttag(self, tag, method, attrs):
    """Handles start of known tag. Used to check body and title tags.

    Args:
      tag: Tag string.
      method: Method string.
      attrs: Array of attribute tuples for this tag.
    """
    if tag == 'body':
      self.in_body = True
    if tag.lower() == 'title':
      self.in_title = True

  def handle_endtag(self, tag, method):
    """Handles end of known tags like title, body.

    Args:
      tag: Tag string.
      method: Method string.
    """
    if tag.lower() == 'title':
      self.in_title = False

  def unknown_starttag(self, tag, attrs):
    """Handles start of unknown tags. Used to check style and script tag.

    Args:
      tag: Tag string.
      attrs: Array of attribute tuples for this tag.
    """
    if tag == 'div':
      # Check if the id is google-view-footer.
      for attr in attrs:
        if attr[0] == 'id':
          match = re.search(r'footer', attr[1])
          if match:
            # This is footer text. Should not be included in text beyond this.
            self.footer_started = True
            return
    elif tag == 'script':
      self.in_non_text = True
    elif tag == 'style':
      self.in_non_text = True

  def unknown_endtag(self, tag):
    """Handles end of unknown tags like div, script and style.

    Args:
      tag: Tag string.
    """
    if tag == 'script':
      self.in_non_text = False
    elif tag == 'style':
      self.in_non_text = False


class ParseHandler(base_request_handler.BaseRequestHandler):
  """Handler to parse writely document and extract textual content."""

  def get(self):
    """GET method for this request handler."""
    try:
      document_id = self.request.get('id')
      doc_type = self.request.get('type')
      if not document_id:
        logging.error('Document id not provided')
        self.error(404)
      if not doc_type or doc_type.upper() not in constants.CONTENT_TYPES:
        logging.warning('Type Not provided')
        logging.warning('Default type is DOC')
        doc_type = constants.CONTENT_TYPES[3]
      doc_type = doc_type.upper()
      parser = TextParser(document_id, doc_type)
      parser.Process()
      if parser.valid:
        template_values = {}
        template_values ['page_text'] = parser.title + ' ' + parser.output_text
        template_values['redirect_url'] = parser.ConstructUrl()
        self.response.out.write(self.GenerateHtml('delayed_redirection.html',
                                                  template_values))
      else:
        self.error(404)
    except digital_content_wizard_exceptions.InvalidDocumentException, e:
      logging.error('Error while parsing the document.')
      self.error(404)


application = webapp.WSGIApplication(
    [('/parse', ParseHandler)],
    debug=True)


def main():
  run_wsgi_app(application)


if __name__ == '__main__':
  main()
