from google.appengine.ext import db
from google.appengine.api import memcache
import model, logging, datetime, StringIO, csv
'''
managing all the tables saved in gae data store

Basically it is a data conversion utility to import and export data from the GAE datastore.

Data format:
- gae datastore: the native storage of GAE
- records: This is the universal format for a collection of table rows. The format is [{fld1: v1, fld2: v2....}]. All the data should be consistent with the GAE table definition.
- csvtext: a string that represents a csv text file. The format is defined in dlz_table_schema
- csvfiltered: a string that represents a csv text file with only selected fields.
- extjson: the extjs json format with meta definition
- iformjson: the iform json file format

The process is convert all the formats into records, then convert it to any other format. 

Currently, only the following conversion is needed.

gae=>records=>records
gae=>records=>csvtext
gae=>records=>extjson

iformjson=>records=>gae
csvtext=>records=>gae
'''





def download_whole_table(tbl):
    gdm = GaeDbManager(tbl)
    import csv, StringIO
    out = StringIO.StringIO()
    writer = csv.writer(out, lineterminator='\n')
    
    records = gdm.query({'where':'', 'sort':'', 'dir':'', 'limit':'', 'start':''}, 'object')
    logging.info(len(records))
    header = records[0].keys()
    writer.writerow(header)
    for row in records:
        r = []
        for fld in header:
            r.append(row[fld])
        writer.writerow(r)
    results = out.getvalue()
    out.close()
    return results

EXT_TYPES_MAPPING = {'str': 'String', 'float': 'Float', 'datetime':'date', 'location': 'String', 'int': 'int', 'boolean': 'boolean', 'date': 'date'}

def get_fields(table_name):
    '''get the schema for a table, dlz_table_schema is hard coded'''
    mkey = '%s-fields' % table_name
    
    if memcache.get(mkey):
        logging.info('%s Schema from memcache' % table_name)
        results =  memcache.get(mkey)
    else:
        q = model.dlz_table_schema.gql("where table_name = :1 order by field_position asc", table_name)
        results = []
        for r in q:
            result = {}
            for fld in r.fields().keys():
                result[fld] = getattr(r, fld)
            results.append(result)
        memcache.set(mkey, results)
    fields = []
    for fld in results:
        d =  {'name': fld['field_name'], 'type': EXT_TYPES_MAPPING[fld['field_type']],'header': fld['field_label'],'dataIndex': fld['field_name']}
        if EXT_TYPES_MAPPING[fld['field_type']]=='date':
            if fld['field_format']=='mm/dd/yyyy':
                d['dateFormat'] ='m/d/Y'
            elif fld['field_format']=='yyyy-mm-dd hh:mm:ss':
                d['dateFormat'] ="Y-m-d H:i:s"
        d['conFn'] = get_conversion_function(fld)
        d['formatFn'] = get_format_function(fld)
        d['exportable'] = fld['exportable']
        fields.append(d)
    logging.info('%s Schema' % (table_name))
    return fields

def get_conversion_function(field):
    '''
    conversion function 
    field is a dictionary:
    {
        field_name: xxx
        field_type: int/str/datetime
        field_format: mm/dd/yyyy
        etc...
    }
    '''
    fn = None
    field_type = field['field_type']
    
    if field_type=='float':
        fn = float
    elif field_type=='int':
        fn = int
    elif field_type=='str':
        fn = str
    elif field_type=='datetime':
        field_format = field['field_format']
        if field_format=='mm/dd/yyyy':
            fn = lambda v: datetime.datetime.strptime(v, '%m/%d/%Y')
        elif field_format=='yyyy-mm-dd hh:mm:ss':
            fn = lambda v: datetime.datetime.strptime(v, '%Y-%m-%d %H:%M:%S')
    else:
        fn = lambda v: v
    return fn

def get_format_function(field):
    field_type = field['field_type']
    field_format = field['field_format']
    if not field_format:
        return lambda v: v
    if field_format.startswith('%'):
        return lambda v: field['field_format'] % v
    if field_type=='datetime':
        if field_format=='mm/dd/yyyy':
            fn = lambda v: v.strftime('%m/%d/%Y')
        elif field_format=='yyyy-mm-dd hh:mm:ss':
            fn = lambda v: v.strftime('%Y-%m-%d %H:%M:%S')
    else:
        fn = lambda v: v
    return fn
            
    
   
#def list_table():
#    '''
#    get a list of tables defined in model.py
#    '''
#    table_list = []
#    for p in dir(model):
#        if type(getattr(model, p))==type(db.Model) or type(getattr(model, p))==type(db.Expando):
#            table_list.append(p)
#    return table_list




def formatValue(fld, v):
    '''format value using the field definition. If failed, using original value.'''
    if v:
        if fld['formatFn']:
            try:
                return fld['formatFn'](v)
            except:
                logging.warning('failed to format field: "%s" value: %s, %s' % (fld['name'], v, str(type(v))))
                return v
    return v


def castValue(fld, v):
    if v:
        try:
            return fld['conFn'](v)
        except:
            if 'date' in fld['type']:
                try:
                    y, m, d = v.split('-')
                    v = datetime.datetime(int(y), int(m), int(d))
                    return v
                except:
                    logging.warning('Failed to cast value(%s) for field:"%s"' % (v, fld['name']))
            else:
                logging.warning('Failed to cast value(%s) for field:"%s"' % (v, fld['name']))
            
    else:
        return None

def isDuplicate(table, values):
    rules = []
    i = 1
    vs = []
    for k, v in values.items():
        rules.append('%s = :%s' % (k,i))
        i = i + 1
        vs.append(v)
    args = ['where ' + ' and '.join(rules)] + vs
    logging.info(args)
    if table.gql(*args).count()>0:
        return True
    else:
        return False
    
class GaeDbManager(object):
    '''
    records are a list of dict
    '''
    def __init__(self, tbl, fields=None):
        self.table = getattr(model, tbl)
        self.table_name = tbl
        if fields:
            self.fields = fields
        else:
            self.fields = get_fields(tbl)
    def insert_iformjson(self, json):
        records = self._iformjson2records(json)
        self.insert(records, False, ['ID'])
    def update_schema(self):
        '''when the dlz_table_schema is updated, the following things need to be done
        1. data type change: cast the data to new type
        2. new column: add new column and set value to None
        '''
        memcache.flush_all()
        logging.info('Start updating %s using the schema definition in dlz_table_schema:' % self.table_name)
        t = self.table
        q = t.all()
        batch = []
        for r in q:
            for fld in self.fields:
                n = fld['name']
                if hasattr(r, n):
                    #an existing field
                    
                    v = getattr(r, n)
                    if v is None:
                        if n=='del':
                            setattr(r, 'del', 'NO')
                            logging.info('mark del as no')
                    else:
                        
                        if type(v)==datetime.datetime and 'date' in fld['type']:
                            #if it is a date field and value is already casted.
                            pass
                        else:
                            try:
                                setattr(r, n, fld['conFn'](v))
                            except:
                                # if it is a date field, in the database it is already converted, so there is no need to convert.
                                logging.warning('Failed to cast "%s" for field "%s" value "%s"' % (type(v), fld['type'], v))
                        if n=='del' and v!='NO':
                            setattr(r, 'del', 'YES')
                            logging.info('mark del as yes')
                else:
                    #a new field
                    if n=='del':
                        setattr(r, 'del', 'NO')
                        logging.info('mark del as no')
                    else:
                        setattr(r, n, None)
            batch.append(r)
            if len(batch)>500:
                #in case it is a giant table, don't overwhelm the memory
                db.put(batch)
                batch = []
        db.put(batch)
        logging.info('Table Schema Update Completed.')  
    def _iformjson2records(self, json):
        '''convert iform json to records'''
        results = []
        for item in json:
            result = {}
            record = item.get('record')
            location = item.get('location')
            for fld in self.fields:
                k = fld['name']
                
                if record.get(k):
                    result[k] = castValue(fld, record[k])
                else:
                    #set it to None if no value
                    result[k] = None
            #add location
            if location:
                result['lat'] = location.get('lat')
                result['lon'] = location.get('lon')
            results.append(result)
        return results
    def _records2csvtext(self, records, header_rows=1, exclude_fields=[]):
        '''convert records to csvtext'''
        out = StringIO.StringIO()
        header = []
        header_2 = [] #label of the header
        writer = csv.writer(out, lineterminator='\n')
        for fld in self.fields:
            if fld['name'] in exclude_fields:
                pass
            else:
                header.append(fld['name'])
                header_2.append(fld['header'])
        writer.writerow(header+['google_key'])
        if header_rows==2:
            writer.writerow(header_2)
        for r in records:
            row = []
            for fld in self.fields:
                if fld['name'] in exclude_fields:
                    pass
                elif fld['name']=='project_number':
                    if r['project_number']:
                        row.append('p%s' % r['project_number'])
                    else:
                        row.append('')
                else:
                    row.append(formatValue(fld, r[fld['name']]))
            row.append(r['google_key'])
            writer.writerow(row)
        results = out.getvalue()
        out.close()
        return results
    def _csvtext2records(self, csvtext, skip=1):
        '''convert csvtext into records'''
        '''convert a string of a csv file and parse it into a list of records,
         assuming first line is header, skip is the number of rows not used as data'''
        fhandler = StringIO.StringIO(csvtext)
        reader = csv.reader(fhandler)
        h = reader.next()
        for i in range(skip-1):
            reader.next()
        results = [] # a list each line represents a line in the csv file
        for l in reader:
            results.append(dict(zip(h, l)))
        fhandler.close()
        for r in results:
            for fld in self.fields:
                k = fld['name']
                v = r.get(k)
                r[k] = castValue(fld, v)
        return results
    
        
    def insert(self, records, allowDup=True, key_fields=[]):
        '''
        insert records into database
        '''
        memcache.flush_all()
        
        batch = []
        for r in records:
            if allowDup==False:
                values = {}
                for fld in key_fields:
                    values[fld] = r[fld]
                if isDuplicate(self.table, values):
                    #duplicate
                    logging.info('Skip record: %s=%s' % (key_fields[0], str(r.get(key_fields[0]))))
                else:
                    t = self.table()
                    for fld in self.fields:
                        #special field called del, mark a field as deleted
                        if fld['name']=='del':
                            setattr(t, 'del', 'NO')
                        else:
                            setattr(t, fld['name'], r.get(fld['name']))
                    batch.append(t)
            else:
                t = self.table()
                for fld in self.fields:
                    setattr(t, fld['name'], r[fld['name']])
                batch.append(t)
        logging.info('insert %s records into %s' % (len(batch), self.table_name))
        db.put(batch)
        logging.info('insert operation is successful.')
    def insert_csv(self, csvtext, skip=1):
        records = self._csvtext2records(csvtext, skip)
        self.insert(records)
    def empty(self):
        '''empty a table'''
        memcache.flush_all()
        logging.info('Emptying Table %s' % self.table_name)
        q = self.table.all()
        db.delete(q)
        logging.info('table emptied')
    def update_csv(self, csvtext, skip_fields=[], skip=2):
        '''
        update the database with a csv file. It is preformatted using the query(format=csvfilterd)
        '''
        records = self._csvtext2records(csvtext, skip)
        self.update(records, skip_fields)
    def update(self, records, skip_fields):
        '''update a table with records, google_key is used to identify a record'''
        memcache.flush_all()
        logging.info('Updating %s records for %s.' % (len(records), self.table_name))
        batch = []
        for record in records:
            #reocrd in database
            r = db.get(record.get('google_key'))
            isDirty = False
            for fld in self.fields:
                k = fld['name']
                if k in skip_fields:
                    pass
                else:
                    v = record.get(k)
                    if v:
                        try:
                            new_v = fld['conFn'](v)
                        except:
                            new_v = v
                        if k=='project_number':
                            if new_v[0]=='p':
                                new_v = new_v[1:]
                        old_v = getattr(r, k) 
                        if old_v==new_v:
                            #no change
                            pass
                        else:
                            setattr(r, k, new_v)
                            isDirty = True
                            logging.info('%s: %s=>%s' % (k, old_v, new_v))
            if isDirty:
                batch.append(r)
        db.put(batch)
        logging.info('%s records updated.' % len(batch))
                
        
    def backup(self, tbl):
        '''backup a table into a table'''
        pass
    def query(self, options, format):
        '''query the table'''
        table_model = self.table
        tableid = table_model.kind()
        logging.info('Query Table: %s' % self.table_name)
        logging.info(options)
        mkey = str(options) + str(table_model.kind())+format
        
        results = memcache.get(mkey)
        
        if results:
            logging.info('query table, hit memcache.')
            return results

        rows = []
        sql1 = '%(where)s' % options
        if options['sort']:
            if options.get('to_dt'):
            #form_date is an inequality query, so the first ordered item must be this field.
                sql1 = sql1 + ' ORDER BY form_date, %(sort)s ' % options
            elif options['sort']=='test_no':
                #if sort by test_no, it should sort by date first.
                sql1 = sql1 + ' ORDER BY form_date, %(sort)s ' % options
            else:
                sql1 = sql1 + ' ORDER BY %(sort)s ' % options
            if options['dir']:
                sql1 = sql1 + ' %(dir)s ' % options
        if options['limit']:
            sql1 = sql1 + ' LIMIT %(limit)s ' % options
        if options['start']:
            sql1 = sql1 + ' OFFSET %(start)s '  % options
#        logging.info('where clause: %s' % sql1) 
        logging.info(table_model)
        for r in table_model.gql(sql1):
            row = {}
            for fld in self.fields:
                row[fld['name']] = getattr(r, fld['name'])
            row['google_key'] = str(r.key())
            rows.append(row)
        
        ct = len(rows)
        
        if format=='records':
            results =  rows
        elif format=='csv':
            results = self._records2csvtext(rows)
        elif format=='csvfiltered':
            logging.info('render data as csvfiltered text.')
            ex_flds = [fld['name'] for fld in self.fields if fld['exportable']=='NO']
            logging.info('exclude fields from the results: %s' % str(ex_flds))
            results = self._records2csvtext(rows, 2, ex_flds)
        elif format=='extjson':
            sql2 = '%(where)s' % options
            logging.info('total where clause: %s' % sql2)
            total = table_model.gql(sql2).count()
           
            meta = {
                "idProperty": "google_key",
                "root": "rows",
                "totalProperty": "total",
                "successProperty": "success",
                #"fields":[],
                # used by store to set its sortInfo
                #"sortInfo":[],
                # paging data (if applicable)
                #"start": paging['start'],
                #"limit": paging['limit'],
                #'groupField': 'project_name'
                # custom property
                #"foo": "bar"
            }
            
            if options.get('start') and options.get('limit'):
                meta['start'] = options.get('start')
                meta['limit'] = options.get('limit')
            
            if options.get('sort') and options.get('dir'):
                meta['sort'] = options.get('sort')
                meta['dir'] = options.get('dir')
            
            fields = []
            for row in rows:
                for fld in self.fields:
                    row[fld['name']] = formatValue(fld, row[fld['name']])
            for fld in self.fields:
                field = dict(fld) # make a copy of fld
                del field['conFn']
                del field['formatFn']
                del field['exportable']
                fields.append(field)
            meta['fields'] = fields
            
            results = {'metaData': meta, 'success': True, 'total': total, 'rows': rows}
        else:
            results = rows
                
        logging.info(str(results)[:100])
        memcache.set(mkey, results)
        logging.info('query table, miss memcache. %s records' % ct)
        return results