from elasticsearch import Elasticsearch
from elasticsearch import helpers

HOST = '192.168.31.200'


def create_doc(d, source, desc={}):
    res = {}
    c = [int(i) for i in d[1].split(' ')]
    w = [float(i) for i in d[2].split(' ')]
    res['feature'] = ' '.join(tokenize(d[0]))
    res['category'] = c[0]
    res['category_weight'] = w[0]
    res['categories'] = c
    res['categories_weight'] = w
    res['img_name'] = d[3].split('/')[-1]
    res['source'] = source
    res['desc'] = desc.get(res['img_name'], '')
    return res


def tokenize(b64_str, str_len=4):
    length = len(b64_str)
    n = int(length / str_len)
    out = []
    for i in range(n):
        out.append(b64_str[int(i * str_len): int((i+1) * str_len)])
    return out


if __name__ == '__main__':
    import sys

    input_file = sys.argv[1]
    src = sys.argv[2]
    desc_file = ''
    if len(sys.argv) > 2:
        desc_file = sys.argv[3]

    if src not in ['instagram', 'pinterest', 'imagenet', 'jd']:
        print('source be should be: instagram or pinterest or imagenet or jd')
        exit(1)

    es = Elasticsearch(hosts='%s:9200' % HOST)
    desc = {}
    if desc_file:
        with open(desc_file) as fd:
            for l in fd:
                d = l.strip().split('\t')
                if len(d) > 1:
                    desc[d[1].strip()] = d[0].strip()

    fd = open(input_file)
    actions = []
    n = 0
    for l in fd:
        d = l.strip().split('\t')
        if len(d) != 4:
            continue

        doc = create_doc(d, src, desc)

        action = {
            "_index": 'img_search_v3',
            "_type": 'fulltext',
            "_source": doc,
            "_id": doc['img_name']
        }

        n += 1
        actions.append(action)
        if n % 1000 == 0:
            helpers.bulk(es, actions)
            del actions[:]

    helpers.bulk(es, actions)
