#!/usr/bin/env python
# encoding:utf-8
# @Time   : 2020/2/18
# @Author : 茶葫芦
# @Site   : 
# @File   : es_import.py

import json
import os
import pandas as pd
import pathlib
import sys
import time

from elasticsearch import Elasticsearch, helpers
from hashlib import md5
from settings import es_host, sep_every, update_every
from tools import load_xls_config

project_path = str(pathlib.Path(os.path.dirname(__file__)).parent.parent)
sys.path.append(project_path)
from utility.import_log import flog

"""
加载配置文件
table_cols_dict：表列对应关系 {表：[字段列表],}字典
table_to_es_cols_json_list：table字段名es属性对应表集合 [{'value': '贷款金额', 'attr': 'amount', 'table': 'loan', 'is_attr': True},。。。]
value_es_cols_dict：各列值入es的格式dict {table:{column1:{'attr': 'name', 'table': 'personal', 'is_attr': False}...},...}
"""
log = flog(__file__)

log.info('开始加载Excel配置')
table_cols_dict, table_to_es_cols_json_list, value_es_cols_dict = load_xls_config()
log.info('加载Excel配置成功')


def ins_es():
    return Elasticsearch(host=es_host)


def csv_value_to_es(table, filepath, every=1000, action='add'):
    """字段值批量入es"""
    assert os.path.isfile(filepath), '传入的文件路径不存在或者非文件目录'
    log.info(f"开始加载{filepath}")
    table_df = pd.read_csv(filepath, dtype=str)
    es_table = value_es_cols_dict.get(table, '')
    op_type = 'index'
    if es_table:
        table_df = table_df[es_table.keys()]
    else:
        log.error('未找到该表导入es的字段信息')
        return
    # 取得{列名:[值],}字典
    values_dict = table_df.to_dict(orient='list')
    # 创建es连接，根据现场情况看，是否下移(会影响效率)
    es = ins_es()
    for d in values_dict:
        info_dict = es_table.get(d)
        json_dict = info_dict.copy()
        value_list = values_dict[d]
        value_len = len(value_list)
        d_index = json_dict.get('index', '')
        del json_dict['fieldtype']
        # 每every条数据bulk更新一次
        start_time = time.time()
        for i in range(0, value_len, every):
            end_num = i + every if i + every < value_len else value_len
            action = [{
                "_op_type": op_type,
                "_index"  : d_index,
                "_id"     : md5((table + str(d) + value_list[k]).encode(encoding='utf-8')).hexdigest(),
                "_source" : get_row_info_dict(value_list, k, json_dict)
            } for k in range(i, end_num)]
            res = ''
            try:
                res = helpers.bulk(es, actions=action)
                log.info(f'成功处理{len(action)}条值数据：请求返回 {res}')
            except Exception as e:
                log.info(f'值数据处理失败:请求返回 {res}')
                return 1
        log.info(f'耗时{int(time.time() - start_time)} seconds 成功处理{value_len}条值数据')
    return 0


def col2es(action='add'):
    """字段名批量入es"""
    op_type = 'index'
    es = ins_es()
    start_time = time.time()
    action = []
    for json_dict in table_to_es_cols_json_list:
        d_index = json_dict.get('index', '')
        d_table = json_dict.get('table', ' ')
        del json_dict['fieldtype']
        del json_dict['index']
        action.append({
            "_op_type": op_type,
            "_index"  : d_index,
            "_id"     : md5((d_table + json_dict['attr'] + json_dict['value']).encode(encoding='utf-8')).hexdigest(),
            "_source" : json_dict
        })
    res = ''
    # log.info(action)
    try:
        res = helpers.bulk(es, actions=action)
        log.info(f'成功处理{len(action)}条数据：请求返回 {res}')
    except Exception as e:
        log.error(f'列信息处理失败:请求返回 {res}')
        log.error(f'错误信息：{e}')
        return 1
    log.info(f'耗时{int(time.time() - start_time)} seconds 成功处理{len(table_to_es_cols_json_list)}条列信息数据')
    return 0


def get_row_info_dict(values_list, row_no, json_dict):
    """根据行值填充返回json串"""
    json_dict['value'] = values_list[row_no]
    return json_dict.copy()


def file_split_csv(file_path, sep, table_name, data_month, dir_path="", every_rows=2000, action=""):
    assert table_name in table_cols_dict, '表名未定义具体列，dat文件列名将未知'
    assert table_name in value_es_cols_dict, '该表无字段值需要导入es'
    colus = table_cols_dict.get(table_name)
    df_reader = pd.read_csv(file_path, sep=sep, header=None, names=colus, iterator=True, dtype=str, engine='Python')
    save_dir_path = dir_path if dir_path else os.path.dirname(os.path.abspath(__file__))
    file_dir = os.path.join(save_dir_path, table_name + data_month)
    # log.info(f'开始分割保存{table_name}{data_month} 文件')
    if not os.path.exists(file_dir):
        os.mkdir(file_dir)
    itercount = 0
    while True:
        try:
            itercount = itercount + 1
            df = df_reader.get_chunk(size=every_rows, )
            # 只需要es需要的列，加快切割速度
            es_need_colums = value_es_cols_dict[table_name].keys()
            df = df[es_need_colums]
            log.info(df.head(5))
            f_abs_name = f"{table_name}{data_month}-{str(itercount).zfill(5)}.csv"
            filename = os.path.join(file_dir, f_abs_name)
            df.to_csv(path_or_buf=filename, sep=',', encoding='utf-8', index=False)
        except StopIteration:
            break
    log.info(f'分割保存{table_name}{data_month} 文件完毕')
    return file_dir


def file2es(table_name, data_month, datafile, sep, action):
    log.info(f'开始更新{table_name}{data_month}节点')
    try:
        files_dir = file_split_csv(datafile, sep, table_name, data_month, every_rows=sep_every, action=action)
    except Exception as e:
        log.error(f'{table_name}{data_month}分割csv失败，程序退出')
        log.error(e)
        raise e
    log.info(f'{table_name}{data_month}节点导出csv完毕，开始入Elasticsearch')
    import_dir_files = sorted([os.path.join(files_dir, f) for f in os.listdir(files_dir) if f.endswith('.csv')])
    file_num = 0
    for filepath in import_dir_files:
        try:
            file_num += 1
            log.info(f'导入文件路径:{filepath},进度【{file_num}/{len(import_dir_files)}】')
            csv_value_to_es(table_name, filepath, every=update_every)
        except Exception as e:
            log.error(f'{table_name}{data_month}中{filepath} 值导入失败:{str(e)}')
            raise e
    log.info(f'============{table_name}{data_month}值导入完毕==============')


if __name__ == '__main__':
    # csv_bulk2es('t0202_kgem_test', './test.csv', every=10000)
    col2es()

    # file2csv(file_path='./test.csv', sep=',', table_name='t0202_kgem_test', data_month='02')
