#!/usr/bin/python
# Editor settings for vim: fileencoding=utf-8 shiftwidth=4 tabstop=4 expandtab

# Python interface to Google Sites data (pages, attachments, revisions, etc.).
#
# Note: I modified gdata.client.Query to accept additional
# query strings, such as path, parent, and ancestor.
# (We should probably subclass Query instead...)

# We use a Python library to simplify using the Google data API. 
# See http://code.google.com/p/gdata-python-client/.
import gdata.sites.client


# Which Google Sites site are we working with?
site_name = "cedutd3"
print "Google Sites site '" + site_name + "'"

# Allow the user to enter (paste in) an existing authorization token.
print "Enter an authorization token if you have one; otherwise just press Enter."
auth_token = raw_input("Token: ")

if auth_token == '':
    auth_token = None

# Create a client that will send requests to the Sites data server for our site.
site = gdata.sites.client.SitesClient(site_name, auth_token)

print "'site' is your interface to site data."
print "You can create a query, e.g.,"
print
print "  >>> q = Query(max_results = 1)"
print "  >>> feed = site.get_content_feed(q=q)"
print "  >>> entry = feed.entry[0]"
print "  >>> entry.kind()"
print "  'webpage' (for example)"
print "  >>> entry.page_name.text"
print "  'name'"
print "  >>> feed.entry[0].content.html.ToString()"
print "  '<html...'"
print

# List pages in a content feed.
def lFeed(feed):
    for e in feed.entry:
        print e.kind() + ' ' + e.page_name.text + '   ' + (e.find_url('alternate') or '?')

def alternateURL(entry):
    """Return the Sites URL for this page if we find one."""
    for el in entry.get_elements():
        if el.tag == 'link' and el.rel == 'alternate':
            return el.href
    return None


# Enumerate HTML content.
def pHTML(element, indent = 0):
    """Print HTML content. Pass in entry.content.html or some child thereof."""
    print ' ' * indent + element.tag + '  ' + (element.text or '')
    for el in element.get_elements():
        pHTML(el, indent + 4)


# Modify the gdata.sites Query class to accept more Sites query parameters.
class Query(object):
    def __init__(self, text_query=None, categories=None, author=None, alt=None,
            updated_min=None, updated_max=None, pretty_print=False,
            published_min=None, published_max=None, start_index=None,
            max_results=None, strict=False, path=None, parent=None,
            ancestor=None, kind=None):
        """Constructs a Google Data Query to filter feed contents serverside.
        Args:
        text_query: Full text search str (optional)
        categories: list of strings (optional). Each string is a required
          category. To include an 'or' query, put a | in the string between
          terms. For example, to find everything in the Fitz category and
          the Laurie or Jane category (Fitz and (Laurie or Jane)) you would
          set categories to ['Fitz', 'Laurie|Jane'].
        author: str (optional) The service returns entries where the author
          name and/or email address match your query string.
        alt: str (optional) for the Alternative representation type you'd like
          the feed in. If you don't specify an alt parameter, the service
          returns an Atom feed. This is equivalent to alt='atom'.
          alt='rss' returns an RSS 2.0 result feed.
          alt='json' returns a JSON representation of the feed.
          alt='json-in-script' Requests a response that wraps JSON in a script
          tag.
        alt='atom-in-script' Requests an Atom response that wraps an XML
          string in a script tag.
        alt='rss-in-script' Requests an RSS response that wraps an XML
          string in a script tag.
        updated_min: str (optional), RFC 3339 timestamp format, lower bounds. 
          For example: 2005-08-09T10:57:00-08:00
        updated_max: str (optional) updated time must be earlier than timestamp.
        pretty_print: boolean (optional) If True the server's XML response will
          be indented to make it more human readable. Defaults to False.
        published_min: str (optional), Similar to updated_min but for published
          time.
        published_max: str (optional), Similar to updated_max but for published
          time.
        start_index: int or str (optional) 1-based index of the first result to
          be retrieved. Note that this isn't a general cursoring mechanism. 
          If you first send a query with ?start-index=1&max-results=10 and
          then send another query with ?start-index=11&max-results=10, the
          service cannot guarantee that the results are equivalent to
          ?start-index=1&max-results=20, because insertions and deletions
          could have taken place in between the two queries.
        max_results: int or str (optional) Maximum number of results to be
          retrieved. Each service has a default max (usually 25) which can
          vary from service to service. There is also a service-specific
          limit to the max_results you can fetch in a request.
        strict: boolean (optional) If True, the server will return an error if
          the server does not recognize any of the parameters in the request
          URL. Defaults to False.
        path: str (optional) Path to desired page, e.g., "/finance/news".
        parent: str (optional) ID key of parent page, e.g., if parent's id.text
          is 'http://sites.google.com/feeds/content/site/foobar/8950174928365',
          the ID key is '8950174928365'. Retrieve only immediate children of
          the specified page.
        ancestor: str (optional) ID key of ancestor page, e.g., if ancestor's id.text
          is 'http://sites.google.com/feeds/content/site/foobar/8950174928365',
          the ID key is '8950174928365'. Retrieve all descendants of the
          specified page.
        kind: comma-separated list of strings (optional) types of entries (pages) to
          return. Possible values are announcement, announcementspage, attachment, 
          comment, filecabinet, listitem, listpage, webpage, webattachment.
        """
        self.text_query = text_query
        self.categories = categories or []
        self.author = author
        self.alt = alt
        self.updated_min = updated_min
        self.updated_max = updated_max
        self.pretty_print = pretty_print
        self.published_min = published_min
        self.published_max = published_max
        self.start_index = start_index
        self.max_results = max_results
        self.strict = strict
        self.path = path
        self.parent = parent
        self.ancestor = ancestor
        self.kind = kind or []

    def modify_request(self, http_request):
        add_param = gdata.client._add_query_param
        add_param('q', self.text_query, http_request)
        if self.categories:
            http_request.uri.query['categories'] = ','.join(self.categories)
        add_param('author', self.author, http_request)
        add_param('alt', self.alt, http_request)
        add_param('updated-min', self.updated_min, http_request)
        add_param('updated-max', self.updated_max, http_request)
        if self.pretty_print:
            http_request.uri.query['prettyprint'] = 'true'
        add_param('published-min', self.published_min, http_request)
        add_param('published-max', self.published_max, http_request)
        if self.start_index is not None:
            http_request.uri.query['start-index'] = str(self.start_index)
        if self.max_results is not None:
            http_request.uri.query['max-results'] = str(self.max_results)
        if self.strict:
            http_request.uri.query['strict'] = 'true'
        if self.path is not None:
            http_request.uri.query['path'] = str(self.path)
        if self.parent is not None:
            http_request.uri.query['parent'] = str(self.parent)
        if self.ancestor is not None:
            http_request.uri.query['ancestor'] = str(self.ancestor)
        if self.kind:
            http_request.uri.query['kind'] = ','.join(self.kind)
  
    ModifyRequest = modify_request


'''
class Site():
    """A convenient reading and writing channel to Google Sites."""

    def __init__(self,
         site_name = 'cedutd3',  # default value here for convenience
         token_save_path = r'Z:\education-data-ma\sites\python\td3-site-auth.txt'):
    self.site_name = site_name
    self.token_save_path = token_save_path
    # Set up a client object to communicate with the site.
    self.client = gdata.sites.client.SitesClient(site_name)
    # See if we have a saved token to try.
    # [It might be a good idea to discard saved tokens
    # more than a day old.]
    token = self.saved_token()
    if len(token) > 0:
        # store it in our feed client
        self.client.auth_token = gdata.gauth.ClientLoginToken(token)

    # Make our own exception.
    class WrongNumberError(Exception):
    def __init__(self, expected, received):
        self.expected = expected
        self.received = received

    # Do we have a saved token?
    def saved_token(self):
    try:
        token_file = open(self.token_save_path)
    except IOError as (errno, strerror):
        if errno == 2:
        # file not found, return an empty string
        return ''
        else:
        # some other exception
        raise
    token = token_file.readline()
    token_file.close
    if token[-1] == '\n':
        token = token[:-1]
    return token

    def save_token(self, path=''):
    if path == '':
        path = self.token_save_path
    self.token_save_path = path
    f = open(path, 'w')
    f.write(self.client.token.token_string + '\n')
    f.close

    # Get authorization to act as you with the site.
    def authorize(self):
    user = raw_input('User (Google email address): ')
    password = raw_input('Password: ')
    self.client.ClientLogin(user, password, 'CEDU-TD3Maint-1', 'jotspot', 'GOOGLE')

    # Define a query specification to pass into GET functions.
    # E.g.
    # >>> just1 = site.query(max_results=1)
    # >>> entry = site.get(q=just1)
    def query(self, **spec):
    if len(spec) > 0:
        return gdata.client.Query(**spec)
    else:
        print """  """

    def key(self, **query):
    feed = self.client.get_content_feed(**query)
    if len(feed.entry) != 1:
        raise WrongNumberError(1, len(feed.entry))
    key = feed.entry[0].id.text.split('/')[-1]

content=td3.get_content_feed(q=q)
a=content.entry[0]
a.title.text # 'David Olsson'

nw = gdata.sites.data.ContentEntry('webpage')
nw.content = gdata.sites.data.Content('<p>You are here.</p>')
nw.title = atom.data.Title()
nw.title.text = 'Where are you?'
td3.post(nw, td3.make_content_feed_uri()) # works

# or maybe just
td3.create_page('webpage', title='Where?', html='<p>Here.</p>', page_name='location', parent=<parent_entry>)
# plus any other parameters for gdata.client.post()
'''
