'''
adapter class to make gae datastore behave similar to ft table
'''

import ftclient
from authorization.clientlogin import ClientLogin
from sql.sqlbuilder import SQL
import logging, time, datetime
from google.appengine.api import memcache
import urllib2
from errors import *
#import model
from google.appengine.api import users
from google.appengine.ext import db
import model

class ft():
    def __init__(self, username, password):
        if username is None:
            username = USERNAME
        if password is None:
            password = PASSWORD
        ft_client = getFtClient(username, password)
        self.client = ft_client

    
    def _fetch(self, sql, t=None):
        try:
            time.sleep(0.3)
            response = processResponse(self.client.query(sql))
            logging.info('response from fusion table: %s'  % (str(response)[:100]))
            return response
        except urllib2.HTTPError, he:
            
#            s = model.SqlRequest()
#            s.description = str(he)
#            s.sql = sql
#            s.status = 'new'
#            s.owner = users.GetCurrentUser()
#            s.mtime = datetime.datetime.now()
#            if t: s.transaction_ref = t
#            s.put()
            logging.fatal(str(he))
            raise DLZError(sql, str(he))
        except:
#            s = model.SqlRequest()
#            s.description = 'Failed to execute: %s' % sql
#            s.sql = sql
#            s.status = 'new'
#            s.mtime = datetime.datetime.now()
#            s.put()
#            logging.fatal('Failed to execute: %s' % sql)
            raise DLZError(sql)
        
    def SqlQuery(self, sql, t=None):
        #to avoid too many request
        logging.info('execute: %s ' % sql)
        
        if sql.split()[0].upper() in ['UPDATE', 'DELETE', 'INSERT']:
            memcache.flush_all()
            response = self._fetch(sql, t)
        else:
            data = memcache.get(sql)
            if data is not None:
                logging.info('hit memcache: %s' % str(data)[:100])
                return data
            response = self._fetch(sql, t)
            memcache.set(sql, response)
        return response
    
            
    def getTableList(self):
        return self.SqlQuery(SQL().showTables())
    def getTableId(self, tablename):
        for table in self.getTableList():
            if table['name']==tablename:
                return table['table id']
            

class GaeTable():
    '''
    tableid: the model name
    '''
    def SqlQuery(self, sql):
        q = db.GqlQuery(sql)
        return model.process_query(q, fields)
        
            
    def get_records(self, options, include_rowid=False):
        '''
        options: 
        {'where': 'where a = b',
         'sort': fld_c,
         'dir': 'ASC',
         'tableid': 456751,
         'start': None
         'limit': None
        }
        '''
        if include_rowid:
            fields = [fld['name'] for fld in self.fields()]
            options['fields'] = ','.join(['__key__'] + fields)
        else:
            options['fields'] = '*'
        options['tableid'] = self.tableid
        
        sql1 = 'select %(fields)s from %(tableid)s  %(where)s ORDER BY %(sort)s  %(dir)s ' % options
        if options.get('start'):
            sql1 = sql1 + ' OFFSET %(start)s '  % options
        if options.get('limit'):
            sql1 = sql1 + ' LIMIT %(limit)s ' % options
        else:
            #ONLY THE FIRST 1000 RECORDS ARE RETURNED
            sql1 = sql1 + 'LIMIT 1000'
         
        return self.SqlQuery(sql1)
    
    def __init__(self, username, password, tableid):
        ft.__init__(self, username, password)
        self.tableid = tableid
        self.where = None
        self.orderby = None
    
    def fields(self):
        return self.SqlQuery('describe %s' % self.tableid)
        
    def all(self):
        sql = 'select * from %s' % self.tableid
        return self.SqlQuery(sql)
    
    def filter(self, condition):
        self.where = condition
        return self
        
    def order(self, order):
        self.orderby = order
        return self
    
    def get(self, rowid):
        '''get a record by rowid'''
        sql = "select * from %s where rowid='%s'" % (self.tableid, rowid)
        logging.info(sql)
        response = self.SqlQuery(sql)
        if len(response)==1:
            return response[0]
    
    def fetch(self, limit=None, offset=None, cols=None):
        if self.where:
            condition = self.where
        else:
            condition = None
            
        if self.orderby:
            if self.orderby.startswith('-'):
                order = '%s DESC' % self.orderby
            else:
                order = self.orderby
        if limit:
            limit = 'limit %s' % limit
        if offset:
            offset = 'offset %s' % offset

        sql = SQL().select(self.tableid, cols, condition, offset, limit)
        return self.SqlQuery(sql)
    
    def count(self):
        if self.where:
            sql = 'select count() as count from %s where %s' % (self.tableid, self.where)
        else:
            sql = 'select count() as count from %s' % self.tableid
        response = self.SqlQuery(sql)
        return response[0]['count']
    
    def insert(self, record, t=None):
        sql = SQL().insert(self.tableid, record)
        rowid = self.SqlQuery(sql, t)
        return rowid 
        
    
    def update(self, record, rowid, t=None):
        '''
        if success return 'OK'
        '''
        cols = record.keys()
        values = record.values()
        sql = SQL().update(self.tableid, cols, values, rowid)
        status = self.SqlQuery(sql, t)
        return status
    
    def delete_all(self, t=None):
        sql = "delete from %s" % self.tableid
        logging.info(self.SqlQuery(sql, t))
    
    def batch_insert(self, records):
        max_per_batch = 500
        current_row = 0
        sqls = []
        results = []
        for record in records:
            current_row += 1
            sqls.append(SQL().insert(self.tableid, record))
            if current_row == max_per_batch:
                sql = ';'.join(sqls)
                results.append(self.SqlQuery(sql))
                sqls = []
                current_row = 0
        if len(sqls)>0:
            sql = ';'.join(sqls)
            results.append(self.SqlQuery(sql))
        return results
    
    def delete(self, rowid, t=None):
        '''
        if success return ok
        '''
            
        sql = "delete from %s where rowid='%s'" % (self.tableid, rowid)
        status = self.SqlQuery(sql, t) 
        return status 
    
    def batch_delete(self, rowids):
        '''not supported'''
        pass
        sqls = []
        for rowid in rowids:
            sqls.append("delete from %s where rowid='%s'" % (self.tableid, rowid))
        sql = ';'.join(sqls)
        return self.SqlQuery(sql)
    
        
            
            

    

        