import json
import argparse, os
import numbers
from elasticsearch import Elasticsearch
from elasticsearch import helpers


import logging
logging.basicConfig(
    format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)

Meta_Fields = ['_index', '_uid', '_type', '_id', '_source', '_size', '_all', \
                '_field_names', '_parent', '_routing', '_meta']
# Key_to_Remove = ['id', 'url', 'size', 'indices', 'place', 'color', 'profile', 'coord', \
#                 'truncated', 'default', 'source', 'filter', 'offset', 'notification', \
#                 'geo', 'contributor', 'translator', 'screen', 'background']
Key_to_Remove = ['id', 'url', 'sizes', 'place', 'color', 'coord', 'truncated', 'filter', \
                'offset', 'notification', 'contributor', 'translator', 'screen', 'background']
Key_to_Replace = {'user': 'usr_', 'entities':'ent_'}

def parse_args():
    """
    Parse input arguments
    """
    parser = argparse.ArgumentParser(
        description='Index tweets to elasticsearch')
    parser.add_argument('--es_host', dest='es_host',
                        help='es sever host',
                        default='localhost', type=str)
    parser.add_argument('--es_index', dest='es_index',
                        help='index name',
                        default='tweets', type=str)
    parser.add_argument('--es_type', dest='es_type',
                        help='index type',
                        default='obj', type=str)
    parser.add_argument('--es_port', dest='es_port',
                        help='es server port',
                        default=9200, type=int)

    args = parser.parse_args()
    return args

def removeExtraInfo(key, doc):
    ret = False
    for term in Key_to_Remove:
        if term in key:
            del doc[key]
            return True

keysDict = {}
def preProcessKeys(oldKey, doc):
    if hasattr(doc, 'keys'):
        keys = doc.keys()
        for key in keys:
            if removeExtraInfo(key, doc): continue
            nKey = oldKey + '_' + str(key)
            if nKey not in keysDict:
                keysDict[nKey] = type(doc[key])
                doc[nKey] = doc[key]
            else:
                if keysDict[nKey] != type(doc[key]) and keysDict[nKey] != type(None) \
                and type(doc[key]) != type(None):
                    try:
                        doc[nKey] = keysDict[nKey](doc[key])
                        if type(doc[nKey]) is dict:
                            preProcessKeys(str(nKey), doc[nKey])
                        elif isinstance(doc[nKey], list):
                            for item in doc[nKey]:
                                preProcessKeys(str(nKey), item)
                    except:
                        print nKey, ':', keysDict[nKey], ' >>>>>>> ', type(doc[key])
                else:
                    doc[nKey] = doc[key]
                    if type(doc[nKey]) is dict:
                        preProcessKeys(str(nKey), doc[nKey])
                    elif isinstance(doc[nKey], list):
                        for item in doc[nKey]:
                            preProcessKeys(str(nKey), item)
            del doc[key]

if __name__ == '__main__':
    args = parse_args()

    bulk, actions = [], []
    count, num_docs = 0, 0
    es_index = args.es_index
    es_type = args.es_type

    # create elasticsearch client
    es = Elasticsearch(hosts='{}:{}'.format(args.es_host, args.es_port))

    with open("tweets_raw_3G.json") as json_file:
        lines = json_file.readlines()
        for line in lines:
            line = str(line)
            num_docs += 1

            # protect from extra data strings
            if num_docs >= 750886:
                print line

            # if num_docs >= 750887: break
            # if num_docs >= 5: break
            # if num_docs >= 750886:
            #     print "size of fields: %d for %d documents" % (len(keysDict), num_docs)
            #     break

            # # create and pre-process documents
            # doc = json.loads(line)
            # keys = doc.keys()
            # for key in keys:
            #     if removeExtraInfo(key, doc): continue
            #     if key in Meta_Fields:
            #         doc['tw'+key] = doc[key]
            #         del doc[key]
            #         key = 'tw'+key
            #     preProcessKeys(key, doc[key])

            # print json.dumps(doc, indent=4, separators=(',', ': '))

        #     # create index action
        #     action = {
        #         "_index": es_index,
        #         "_type": es_type,
        #         "_source": doc
        #     }
        #     actions.append(action)

        #     # write to es
        #     if len(actions) == 2000:
        #         logger.info('Bulking {} docs to sever, indexed {}'
        #                     .format(len(actions), num_docs))
        #         helpers.bulk(es, actions)
        #         del actions[:]

        # if len(actions) > 0:
        #     helpers.bulk(es, actions)
        #     logger.info('Bulking {} docs to sever,  total {}'
        #                 .format(len(actions), num_docs))





