#!/usr/bin/python2.5
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import config
import datetime
import logging
import pfif
import StringIO
import time
import uuid
import urllib

from google.appengine.api import urlfetch
import tipfy

PF = config.PF
if tipfy.Tipfy.app.dev:
  KEY = config.PF_DEV_KEY
  URL = config.PF_DEV_URL
  SUBDOMAIN = config.PF_DEV_SUBDOMAIN
  DOMAIN = config.PF_DEV_DOMAIN
else:
  KEY = config.PF_PROD_KEY
  URL = config.PF_PROD_URL
  SUBDOMAIN = config.PF_PROD_SUBDOMAIN
  DOMAIN = config.PF_PROD_DOMAIN

SERIALIZER = pfif.PFIF_VERSIONS['1.2']

class PfApiError(Exception):
  pass

class Person(object):
  def __init__(self, **kwargs):
    self.notes = []
    for key, value in kwargs.items():
      if value:
        setattr(self, key, value)
    if not hasattr(self, 'person_record_id'):
      self.person_record_id = '%s/%d%s' % (DOMAIN, time.time(), uuid.uuid4())
    self.entry_date = datetime.datetime.now()
    self.source_data = self.entry_date

  def to_dict(self):
    d = SERIALIZER.person_to_dict(self)
    d['notes'] = [n.to_dict() for n in self.notes if n.text]
    return d

class Note(object):
  def __init__(self, **kwargs):
    for key, value in kwargs.items():
      setattr(self, key, value)
    if not hasattr(self, 'note_record_id'):
      self.note_record_id = '%s/%d%s' % (DOMAIN, time.time(), uuid.uuid4())
    if not hasattr(self, 'entry_date'):
      self.entry_date = datetime.datetime.now()
    if not hasattr(self, 'source_date'):
      self.source_date = self.entry_date

  def to_dict(self):
    d = SERIALIZER.note_to_dict(self)
    return d

class PfApi(object):
  def __init__(self, key=KEY, url=URL, subdomain=SUBDOMAIN, pf=PF):
    self.key = key
    self.url = url
    self.subdomain = subdomain

    self.pf = PF
    self.default_args = {
        'key': self.key,
        'subdomain': self.subdomain,
        'version': '1.2'
        }

  def process_search_result(self, result):
    if result.status_code == 200:
      return pfif.parse(result.content)
    else:
      logging.debug('Error in search: %s' % result.content)
      raise PfApiError('Error in search')

  def get_args(self, extra_args):
    args = {}
    args.update(self.default_args)
    args.update(extra_args)
    return args

  def search(self, query, num, async=False):
    data = self.get_args({
        'q': query.encode('utf-8'),
        'max_results': num,
        })
    url = self.url + '/search?' + urllib.urlencode(data)
    for i in range(2,5):
      rpc = urlfetch.create_rpc(i)
      urlfetch.make_fetch_call(rpc, url)
      if async:
        return rpc
      else:
        try:
          return self.process_search_result(rpc.get_result())
        except urlfetch.DownloadError:
          logging.warning('RPC reaches deadline %i for %s' % (i, query))
    logging.error('Failed to get search result')
    raise PfApiError('Failed to get search result')

  def read(self, id_param):
    data = self.get_args({
        'id': id_param
        })
    result = None
    fetch_url = self.url + '/read?' + urllib.urlencode(data)
    for i in range(2,5):
      try:
        result = urlfetch.fetch(fetch_url,
                                method=urlfetch.GET,
                                deadline = i)
      except urlfetch.DownloadError:
        logging.warning('RPC reaches deadline %i' % i)

    if result and result.status_code == 200:
      return pfif.parse(result.content)
    else:
      content = None
      if result != None:
        content = result.content
      logging.error('Error in read: %s' % content)
      raise PfApiError('Error in read')

  def create(self, people=(), notes=()):
    xml_file = StringIO.StringIO();
    SERIALIZER.write_file(xml_file, people, lambda p: p['notes'], notes)
    xml = xml_file.getvalue()
    result = urlfetch.fetch(self.url +
                            '/write?subdomain=%s&key=%s&version=1.2' %
                            (self.subdomain, self.key),
                            method=urlfetch.POST,
                            payload=xml,
                            headers={'Content-Type': 'application/xml'})
    if result.status_code == 200:
      return result.content
    else:
      logging.error('Error in create: %s' % result.content)
      raise PfApiError('Error in create')

  def subscribe(self, person_id, email):
    data = {
        'id': person_id,
        'lang': 'ja',
        'subscribe_email': email,
        }
    result = urlfetch.fetch(self.url +
                            '/subscribe?subdomain=%s&key=%s' %
                            (self.subdomain, self.key),
                            method=urlfetch.POST,
                            payload=urllib.urlencode(data))
    if result.status_code == 200:
      return result.content
    else:
      logging.error('Error in subscribe: %s' % result.content)
      raise PfApiError('Error in subscribe')
