"""
-------------------------------------------------
   File Name：     MultipleEsPath
   Description :
   Author :       willis
   date：          2019/4/24
-------------------------------------------------
   Change Activity:
                   2019/4/24:

-------------------------------------------------
    https://www.elastic.co/guide/en/elasticsearch/reference/6.2/docs-bulk.html
    https://elasticsearch-py.readthedocs.io/en/master/api.html
    py3 中文：
    https://python3-cookbook.readthedocs.io/zh_CN/latest/c02/p14_combine_and_concatenate_strings.html
    query:
    https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html
-------------------------------------------------
"""
__author__ = 'willis'

import logging.handlers
import logging
import sys
import os
import time
import json
from elasticsearch import Elasticsearch
from elasticsearch import helpers
import types
import psutil


# import copyreg
#
# def _pickle_method(m):
#     if m.im_ is None:
#         return getattr, (m.im_class, m.im_func.func_name)
#     else:
#         return getattr, (m.im_, m.im_func.func_name)
# copyreg.pickle(types.MethodType, _pickle_method)




# ES地址
es_host = '172.22.56.34:9200,172.22.56.31:9200'
# 索引名称
es_index = 'multiples'
# 每bulk导入es数量
es_batch_size = 6000
# 需要导入数据的总量，累加最大值
es_add_total = 1000000
# es主分片数量
es_pshards_total = 64
# ES副本数量
es_rshards_total = 1
# 进程数量
es_proc_total = 4
# 索引类型
es_doc_type = "type_doc_test"
# es refresh时间
es_refresh_time = -1
# 节点数据磁盘数量
# es_disk_num = 1
# 导入ES的源文件
source_dir = 'input'
source_file = 'source.txt'
source_file_count = 20
source_start_id = 0
# json格式的源文件
source_json = 'package.json'
# 报告文件，多次的进行累加
report_file = 'reportup.txt'
report_outfile = 'reportout.txt'
# 报告文件，每次运行的结果
create_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
# io 记录文件
write_io_file = 'io_file.txt'


def Log_Out():
    '''
    日志输出
    :return:
    '''

    log_file = os.path.realpath(sys.argv[0]) + '.log'

    # 定义对应的程序模块名name，默认是root
    logs = logging.getLogger()
    logs.setLevel(logging.INFO)

    # 定义日志回滚
    logrota = logging.handlers.RotatingFileHandler(log_file, maxBytes=10 * 1024 * 1024, backupCount=20)

    # 设置日志等级
    logrota.setLevel(logging.INFO)

    # 日志输出到屏幕控制台
    console = logging.StreamHandler()
    console.setLevel(logging.INFO)

    # 定义日志格式
    format = ('%(asctime)s|%(name)s|%(levelname)s|'
              # '%(pathname)s|'
              '%(thread)s|%(process)s|%(lineno)d| %(message)s')

    # 实例化handler
    formatter = logging.Formatter(format)

    # 格式化handler
    logrota.setFormatter(formatter)
    console.setFormatter(formatter)

    # 添加handler
    logs.addHandler(logrota)
    logs.addHandler(console)
    #
    # logs.debug('my is debug')
    # logs.info('my is info')
    # logs.warning('my is warning')
    # logs.error('my is error')
    # logs.critical('my is critical')
    logs.info('Loging start ...')
    return logs

def InitEs(es_host):
    host = {}
    host_list = []
    for hosts in es_host.split(','):
        ip = hosts.split(":")[0]
        port = hosts.split(":")[1]
        host['host'] = ip
        host['port'] = port
        host_list.append(host)
        host = {}
    logs.info("连接ES主机：{}".format(host_list))

    myes = Elasticsearch(
        host_list,
        # sniff before doing anything
        sniff_on_start=True,
        # refresh nodes after a node fails to respond
        sniff_on_connection_fail=True,
        # and also every 60 seconds
        sniffer_timeout=60,
        bulk_size=10000,
        timeout=20
    )
    logs.info("初始化es:{}".format(myes))
    # logs.info("dir myes: {}".format(dir(myes)))

    return myes

def Report_Out(msg):
    '''
    报告输出
    :return:
    '''
    # RefreshEs()

    logs.info('输出报告 ...')
    # logs.info('msg:{}'.format(msg))
    if msg['tag'] == 0:
        result = '{0}, 进程:[{1}-{2}-{3}], 批次:{4}, 文件:[{5}], 增量:{6}, ES总量:{7}, 批量:{8}, 磁盘:[{9}]{10}, ES副本:{11}, 耗时:{12}s计{13}m'.format(
            msg['now_time'],
            msg['proc_total'],
            msg['proc_name'],
            msg['proc_id'],
            msg['run_total'],
            msg['import_file'],
            msg['line_total'],
            msg['es_total'],
            msg['es_batch_size'],
            msg['dir_count'],
            msg['dir_list'],
            msg['es_rshards_total'],
            msg['bulk_import_use_sec'],
            msg['bulk_import_use_min']

        )
        # result = '{0}, 进程:[{1}-{2}-{3}], 批次:{4}, 文件:[{5}], 增量:{6}, ES总量:{7}, 批量:{8}, 磁盘:[{9}]{10}, ES副本:{11}, 耗时:{12}s计{13}m'.format(
        #     msg[1],
        #     msg[2],
        #     msg[3],
        #     msg[4],
        #     msg[5],
        #     msg[14],
        #     msg[6],
        #     msg[7],
        #     msg[8],
        #     msg[9],
        #     msg[10],
        #     msg[11],
        #     msg[12],
        #     msg[13]
        # )
        logs.info(result)
    else:
        # result = '{0}, 进程:[{1}-{2}-{3}], 批次状态:{4}-run{5}, 增量:{6}, ES总量:{7}, 批量:{8}, 磁盘:[{9}]{10}, ES副本:{11}, 分片:{12}, 耗时:{13}s计{14}m'.format(
        #     msg[1],
        #     msg[2],
        #     msg[3],
        #     msg[4],
        #     msg[5],
        #     msg[6],
        #     msg[7],
        #     msg[8],
        #     msg[9],
        #     msg[10],
        #     msg[11],
        #     msg[12],
        #     msg[13],
        #     msg[14],
        #     msg[15]
        # )

        result = '{0}, 进程:[{1}-{2}-{3}], 批次状态:{4}-run{5}, 增量:{6}, ES总量:{7}, 批量:{8}, 磁盘:[{9}]{10}, ES副本:{11}, 分片:{12}, 耗时:{13}s计{14}m'.format(
            msg['now_time'],
            msg['proc_total'],
            msg['proc_name'],
            msg['proc_id'],
            msg['run_total'],
            msg['running_total'],
            msg['line_total'],
            msg['es_total'],
            msg['es_batch_size'],
            msg['dir_count'],
            msg['dir_list'],
            msg['es_rshards_total'],
            msg['es_pshards_total'],
            msg['bulk_import_use_sec'],
            msg['bulk_import_use_min']
        )
        result_out = '{0}, 进程:[{1}], 批次状态:{2}-run{3}, 增量:{4}, ES总量:{5}, 批量:{6}, 磁盘:[{7}], ES副本:{8}, 分片:{9}, 耗时:{10}s计{11}m'.format(
            msg['now_time'],
            msg['proc_total'],
            msg['run_total'],
            msg['running_total'],
            msg['line_total'],
            msg['es_total'],
            msg['es_batch_size'],
            msg['dir_count'],
            msg['es_rshards_total'],
            msg['es_pshards_total'],
            msg['bulk_import_use_sec'],
            msg['bulk_import_use_min']
        )
        logs.info(result)

        # <editor-fold desc="写入统一输出报告">
        with open(report_file, 'a+') as report_f_one:
            report_f_one.write(result)
            report_f_one.write('\n')
        # </editor-fold>

        # <editor-fold desc="写入统一输出精简报告">
        with open(report_outfile, 'a+') as report_f_two:
            report_f_two.write(result_out)
            report_f_two.write('\n')
        # </editor-fold>

    file_time_name = ''.join(create_time.split()[0].split('-')) + ''.join(create_time.split()[1].split(':'))
    # <editor-fold desc="写入单进程报告">
    with open(report_file.replace('up', file_time_name), 'a+') as report_f:
        report_f.write(result)
        report_f.write('\n')
    # </editor-fold>

def TimeCal(start=None, end=None):
    if start and end == None:
        return time.time()
    if start and end:
        # if not isinstance(start, float):
        #     return
        sec = time.time() - start
        mint = sec / 60
        return round(sec, 1), round(mint, 1)
    # if end and start == None:
    #     return

def CreateFileThread():
    start_time = TimeCal(start=True)
    logs.info("开始生成数据：")

    if os.path.exists(source_dir):
        for f in os.listdir(source_dir):
            logs.info("清理历史数据[{}]".format(os.path.join(source_dir, f)))
            os.remove(os.path.join(source_dir, f))
    else:
        logs.info("创建源数据目录[{}]".format(source_dir))
        os.mkdir(source_dir)

    input_file_total = es_add_total // source_file_count
    input_file_add = es_add_total % source_file_count
    input_list = [input_file_total, input_file_total + input_file_add]
    logs.info('文件信息：{}'.format(input_list))

    num = 0
    name = 'index'
    ip = '192.168.1.1'
    port = 0
    dep_path = '/var/bh/dep'
    put_path = '/var/bh/put'
    bak_path = '/var/bh/bak'
    data_create_time = create_time
    data_update_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())

    input_id = 0
    for f in range(1, source_file_count + 1):
        # 输出文件
        if f == source_file_count:
            input_max_line = input_list[1]
        else:
            input_max_line = input_list[0]

        if f == 1:
            input_id = int(GetMaxId())
        else:
            input_id = int(input_list[0]) + input_id

        logs.debug('当前文件行数：{}'.format(input_max_line))

        file = os.path.join(source_dir, source_file.replace('.txt', str(f) + '.txt'))
        logs.info('生成文件[{}],行数:[{}]'.format(file, input_max_line))
        with open(file, 'w') as f_show:
            for line in range(input_id + 1, int(input_max_line) + input_id + 1):
                data_line = [
                    line,
                    name + str(line),
                    ip,
                    port + line,
                    dep_path,
                    put_path,
                    bak_path,
                    data_create_time,
                    data_update_time
                ]
                w_line = '|'.join(str(d) for d in data_line)
                # logs.info("插入数据：{}".format(w_line))
                f_show.write(w_line)
                f_show.write('\n')
    use_sec, use_min = TimeCal(start=start_time, end=True)
    logs.info("生成数据[{}]完成,用时[{}]秒,计[{}]分".format(es_add_total, use_sec, use_min))
    
def ReadJson():
    result = ''
    with open(source_json, 'r') as sf_show:
        sf_data = sf_show.read()
        result = json.loads(sf_data)

    return result
    
def CreateFileFromJson():
    start_time = TimeCal(start=True)
    logs.info("开始生成数据：")

    if os.path.exists(source_dir):
        for f in os.listdir(source_dir):
            logs.info("清理历史数据[{}]".format(os.path.join(source_dir, f)))
            os.remove(os.path.join(source_dir, f))
    else:
        logs.info("创建源数据目录[{}]".format(source_dir))
        os.mkdir(source_dir)

    # region 计算每个文件行数
    input_file_total = es_add_total // source_file_count
    input_file_add = es_add_total % source_file_count
    input_list = [input_file_total, input_file_total + input_file_add]
    logs.info('文件信息：{}'.format(input_list))
    # endregion

    input_id = 0
    for f in range(1, source_file_count + 1):
        # region 最后一个文件的行数，如果数据平分到每个文件后，余数部分的数据添加到最后一个文件中
        if f == source_file_count:
            input_max_line = input_list[1]
        else:
            input_max_line = input_list[0]
        # endregion

        # region 第一个文件从ES中取最大ID，在写文件时+1
        if f == 1:
            input_id = int(GetMaxId())
        else:
            input_id = int(input_list[0]) + input_id
        # endregion

        # region 取出对应的文件
        logs.debug('当前文件行数：{}'.format(input_max_line))
        file = os.path.join(source_dir, source_file.replace('.txt', str(f) + '.txt'))
        logs.info('生成文件[{}],行数:[{}]'.format(file, input_max_line))
        # endregion

        # 获取JSON数据模板
        json_data = ReadJson()

        # 输出文件
        with open(file, 'w') as f_show:
            # inline = 1000
            # inlist = []
            for line in range(input_id + 1, int(input_max_line) + input_id + 1):
                json_data['num'] = line

                json.dump(json_data, f_show)
                f_show.write('\n')

    use_sec, use_min = TimeCal(start=start_time, end=True)
    logs.info("生成数据完成,用时[{}]秒,计[{}]分".format(use_sec, use_min))

def GetDiskNum():
    cfg = '/home/willislong/app/elasticsearch-6.2.4/cfg/config-node-1/elasticsearch.yml'
    disk_list = ''
    with open(cfg, 'r') as c_f:
        for line in c_f.readlines():
            if line.split(':')[0] == 'path.data':
                disk_list = line.split(':')[1].split(',')
    if disk_list == '':
        return []

    disk_list_result = []
    for d in disk_list:
        if d.strip()[-1] == '/':
            disk_list_result.append(d.strip()[:-1])
        else:
            disk_list_result.append(d.strip())
    return disk_list_result

def GetDiskUse():
    if disk_list == '':
        return ['size:0m,Null']
    list = []
    for l in disk_list:
        path = l.strip()
        disk_use = round(psutil.disk_usage(path).used / 1024 / 1024)
        use_msg = psutil.disk_usage(path)
        use_msg2 = use_msg.used
        use_msg3 = use_msg2 / 1024 / 1024
        # print("use_msg：{}".format(use_msg))
        # print("use_msg2：{}".format(use_msg2))
        # print("use_msg3：{}".format(use_msg3))
        # print("use：{}".format(disk_use))
        l_msg = 'size:' + str(disk_use) + "m," + path
        list.append(l_msg)
    return list

def GetDiskIoDelta(start=None, end=None):
    '''计算导入数据批次的IO'''

    disk_io = psutil.disk_io_counters(perdisk=True)
    # logs.info('disk io:{}'.format(disk_io))

    p = psutil.disk_partitions()
    # logs.info(p)

    disk_list = GetDiskNum()
    # print(disk_list)
    disk_list_new = []
    for disk in disk_list:
        disk_list_new.append(disk.strip())
    # print(disk_list_new)

    io_device_list = []
    for device in p:
        if device[1] in disk_list:
            # print(device[0])
            # print(device[1])
            # print(device[0].split('/')[-1])
            io_device_list.append(device[0].split('/')[-1])
    # print(io_device_list)

    io_result = {}
    for device_for in io_device_list:
        io_device_msg = disk_io[device_for]
        # print("{}:{}".format(device_for, io_device_msg))
        # print("分区{}写入数据：{}b".format(device_for, io_device_msg[3]))
        io_result[device_for] = io_device_msg[3]

    if start == '':
        return io_result, {}
    else:
        io_devicd_dela = {}
        for device_for, write_b in start.items():
            dela = io_result[device_for] - write_b
            io_devicd_dela[device_for] = round(dela / 1024 / 1024, 2)

        for d, w in io_devicd_dela.items():
            logs.info("分区{}写入数据：{}m".format(d, w))
        return io_result, io_devicd_dela

def WriteDiskIoDelta(p, msg):
    '''IO差量写入文件'''
    if msg:
        with open(write_io_file, 'a+') as io_f:
            json.dump(msg, io_f)
            io_f.write('\n')
        logs.info('进程{}记录IO信息完成'.format(p))

def GetDiskIoAverage():
    '''IO平均值 '''
    result = {}
    with open(write_io_file, 'r') as io_f:
        data = io_f.readlines()
        for device in data.keys():
            print(device)



def GetSysLoad():
    pass

def GetMaxId():
    query_data = {
        "sort": [
            {
                "num": {
                    "order": "desc"
                }
            }
        ],
        "size": 1,
        "_source": "{num}",
        "query": {
            "match_all": {}
        }
    }

    try:
        res = myes.search(index=es_index, doc_type=es_doc_type, body=query_data)
        logs.info("MAX ID res:{}".format(res))
        if res['hits']['hits']:
            maxId = res['hits']['hits'][0]['_id']
        else:
            maxId = 0
        logs.info("MAX ID :{}".format(maxId))
    except Exception as e:
        maxId = 0

    return maxId

def GetEsTotal():
    count = myes.count(index=es_index)
    logs.info("索引总量：{}".format(count))
    if count['_shards']['failed'] == 0:
        return count['count']
    else:
        return count['count']-count['failed']

def CreateIndex(es_index):
    if myes.indices.exists(index=es_index) is not True:
        logs.info("开始创建索引：{}".format(es_index))
        mappings = {
            "mappings": {
                es_doc_type: {
                    "properties": {
                        "num": {
                            "type": "long",
                            "index": "true"
                        },
                        "name": {
                            "type": "long",
                            "index": "true"
                        },
                            "ip": {
                            "type": "long",
                            "index": "true"
                        },
                        "port": {
                            "type": "long",
                            "index": "true"
                        },
                        "dep_path": {
                            "type": "long",
                             "index": "true"
                        },
                        "put_path": {
                            "type": "long",
                            "index": "true"
                        },

                        "bak_path": {
                          "type": "long",
                          "index": "true"
                        },

                        "createTime": {
                            "type": "date",
                            "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis"
                        },
                        "updateTime": {
                            "type": "date",
                            "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis"
                        }
                    }
                }
            },
            "settings": {
                "index": {
                    "mapping": {
                        "nested_fields": {
                            "limit": "2000"
                        }
                    },
                    "refresh_interval": es_refresh_time,
                    "number_of_shards": es_pshards_total,
                    "number_of_replicas": es_rshards_total
                }
            }
        }

        indexing = myes.indices.create(index=es_index, body=mappings)
        if indexing["acknowledged"] and \
            indexing["shards_acknowledged"] and \
            indexing["index"] == 'multiples':

            logs.info("创建索引：{}成功".format(es_index))
        else:
            logs.error("创建索引：{}失败".format(es_index))

def CreateEmptyIndex():
    if myes.indices.exists(index=es_index) is not True:
        logs.info("开始创建索引：{}".format(es_index))
        mappings = {
            "settings": {
                "index": {
                    "mapping": {
                        "nested_fields": {
                            "limit": "2000"
                        }
                    },
                    "refresh_interval": es_refresh_time,
                    "number_of_shards": es_pshards_total,
                    "number_of_replicas": es_rshards_total
                }
            }
        }

        indexing = myes.indices.create(index=es_index, body=mappings)
        if indexing["acknowledged"] and \
            indexing["shards_acknowledged"] and \
            indexing["index"] == 'multiples':

            logs.info("创建索引：{}成功".format(es_index))
        else:
            logs.error("创建索引：{}失败".format(es_index))

def DelIndex(es_index):
    logs.info("开始删除索引：{}".format(es_index))
    try:
        result = myes.indices.delete(index=es_index)
        if result["acknowledged"]:
            logs.info("删除索引[{}]成功".format(es_index))
        else:
            logs.error("删除索引[{}]失败".format(es_index))
    except Exception as e:
        logs.error("删除索引[{}]失败,{}".format(es_index, e))

def RefreshEs():
    refresh_start_time  = TimeCal(start=True)
    logs.info("开始刷新ES")
    res = myes.indices.refresh(index=es_index)
    logs.info("res:{}".format(res))
    if res["_shards"]:
        if res["_shards"]['total'] == res["_shards"]['successful'] and res["_shards"]['failed'] == 0:
            logs.info("刷新成功:{}".format(res["_shards"]))
        else:
            logs.error("res:{}".format(res["_shards"]))
    else:
        logs.error("刷新失败:{}".format(res))

    refresh_use_sec, refresh_use_min  = TimeCal(start=refresh_start_time, end=True)
    logs.info("refresh 共计用时：{}s计{}分".format(refresh_use_sec, refresh_use_min))

def GetRShards():
    '''
    {
        'multiples': {
            'settings': {
                'index': {
                    'mapping': {
                        'nested_fields': {
                            'limit': '2000'
                        }
                    },
                    'refresh_interval': '-1',
                    'number_of_shards': '4',
                    'provided_name': 'multiples',
                    'creation_date': '1557122871790',
                    'number_of_replicas': '1',
                    'uuid': 'sshCVzbMTK-4wrxu5Yk-PA',
                    'version': {
                        'created': '6020499'
                    }
                }
            }
        }
    }
    :return:
    '''
    result = myes.indices.get_settings(index=es_index)
    rshards_num = result[es_index]['settings']['index']['number_of_replicas']
    logs.info("副本数量：{}".format(rshards_num))
    return rshards_num

def UpdataRShards():
    rshards_num = int(GetRShards())
    if rshards_num != es_rshards_total:
        body = {
            "index": {
                "number_of_replicas": es_rshards_total
            }
        }
        result = myes.indices.put_settings(index=es_index, body=body)
        if result['acknowledged']:
            logs.info("副本更新：{}->{} 完成.".format(rshards_num, es_rshards_total))
        else:
            logs.error("副本更新：{}->{} 失败.".format(rshards_num, es_rshards_total))

def IndexBulkImportData(n, file):
    start_time = TimeCal(start=True)

    myes = InitEs(es_host)

    pname = multiprocessing.current_process().name
    logs.info("进程 [{}:{}] 开始导入[{}]数据到ES".format(pname, n, file))
    file = os.path.join(source_dir, file)

    if not os.path.exists(file) or os.path.getsize(file) == 0:
        logs.error("进程 [{}] 源文件不存在或为空文件!!!".format(n))
        return

    # es_max_id = int(GetMaxId())
    with open(file, 'r', encoding='utf-8') as f_show:
        lines = f_show.readlines()
        first_line = int(lines[0].strip().split('|')[0])

        # # ES中的总量为0时，指定起始值 为1，用于生成文件和导入ES的起始ID
        # if first_line <= es_max_id:
        #     logs.error("进程 [{}] 文件起始ID必须大于ES最大ID, [{}] > [{}]".format(n, first_line, es_max_id))
        #     return
        # if first_line > es_max_id + 1:
        #     # ES中的总量不为0时，指定起始值加1，避免第一条和最大值重复，用于生成文件和导入ES的起始ID
        #     logs.error("进程 [{}] 文件起始ID必须等于于ES最大ID+1, [{}] = [{}] + 1".format(n, first_line, es_max_id))
        #     return

        logs.info("进程 [{}] 源数据文件总量：{}".format(n, len(lines)))
        actions = []
        # id = int(GetMaxId()) + 1
        for data in lines:
            line = data.strip().split('|')
            logs.debug("进程 [{}] 导入数据：{}".format(n, line))

            # seria_num = 0
            # seria_dic = {}
            # while(seria_num<1000):
            #     seria_dic['seria' + seria_num] = seria_num
            action = {
                "_index": es_index,
                "_type": es_doc_type,
                "_id": line[0],
                "_source": {
                    "num": line[0],
                    "名称": line[1],

                    "ip地址": line[2],
                    "端口": line[3],
                    "部署路径": line[4],
                    "上传路径": line[5],
                    "备份路径": line[6],
                    "创建时间": line[7],
                    "更新时间": line[8]
                }
            }
            # action['_source'].update(seria_dic)

            actions.append(action)
            if len(actions) == es_batch_size:
                logs.info("进程 [{}] batch size {}".format(n, es_batch_size))
                res = helpers.bulk(myes, actions)
                del actions[:]
            # id = id + 1

        if len(actions) > 0:
            logs.info("进程 [{}] batch size {}".format(n, es_batch_size))
            res = helpers.bulk(myes, actions)
            batch_size = len(actions)
            del actions[:]

    logs.info("进程 [{}] 导入数据到ES完成.".format(n))
    bulk_import_use_sec, bulk_import_use_min = TimeCal(start=start_time, end=True)
    logs.info("indexing 进程 [{}] 共计用时：{}s,计{}m".format(n, bulk_import_use_sec, bulk_import_use_min))

    # RefreshEs()

    # GetMaxId()
    bulk_import_use_sec, bulk_import_use_min = TimeCal(start=start_time, end=True)

    result = [
        0,
        time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),
        es_proc_total,
        multiprocessing.current_process().name,
        multiprocessing.current_process().pid,
        n,
        len(lines),
        GetEsTotal(),
        batch_size,
        GetDiskNum(),
        es_rshards_total,
        bulk_import_use_sec,
        bulk_import_use_min,
        file
        ]

    return result

    # Report_Out('{}, 进程:[{}序号{}], 增量:{}, ES总量:{}, 批量:{}, 进程:{}, 磁盘:{}, ES副本:{}, 耗时:{}s计{}m'.format(
    #                                     time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),
    #                                     multiprocessing.current_process().name,
    #                                     n,
    #                                     len(lines),
    #                                     GetMaxId(),
    #                                     es_batch_size,
    #                                     es_proc_total,
    #                                     es_disk_num,
    #                                     es_rshards_total,
    #                                     bulk_import_use_sec,
    #                                     bulk_import_use_min
    #                                     ))

def IndexBulkImportJsonData(n, file):
    start_time = TimeCal(start=True)

    # es_init = 'myes' + n

    es_init = InitEs(es_host)

    pname = multiprocessing.current_process().name
    file = os.path.join(source_dir, file)
    logs.info("进程 [{}:{}] 开始导入[{}]数据到ES".format(pname, n, file))

    if not os.path.exists(file) or os.path.getsize(file) == 0:
        logs.info("进程 [{}] 源文件不存在或为空文件!!!".format(n))
        return

    count = 0
    with open(file, 'r', encoding='utf-8') as f_show:
        lines = f_show.readlines()

        logs.info("进程 [{}] 源数据文件总量：{}".format(n, len(lines)))
        actions = []
        start_io = ''
        for data in lines:
            line = json.loads(data)
            logs.debug("进程 [{}] 导入数据：{}".format(n, line))

            action = {
                "_index": es_index,
                "_type": es_doc_type,
                # "_id": line['num'],
                "_source": line
            }

            actions.append(action)
            if len(actions) == es_batch_size:
                count = count + es_batch_size
                res = helpers.bulk(es_init, actions)
                del actions[:]
                start_io, delta = GetDiskIoDelta(start_io)
                logs.info(
                    "进程 [{}] batch size {}, all {}, indexed {}, code:{}, write {}".format(n, es_batch_size, len(lines),
                                                                                          count, res, delta))
                WriteDiskIoDelta(pname, delta)
        if len(actions) > 0:
            count = count + es_batch_size
            res = helpers.bulk(es_init, actions)
            start_io, delta = GetDiskIoDelta(start_io)
            logs.info(
                "进程 [{}] batch size {}, all {}, indexed {}, code:{}, write {}".format(n, es_batch_size, len(lines),
                                                                                      count, res, delta))
            batch_size = len(actions)
            del actions[:]
            WriteDiskIoDelta(pname, delta)

    logs.info("进程 [{}] 导入数据到ES完成.".format(n))
    bulk_import_use_sec, bulk_import_use_min = TimeCal(start=start_time, end=True)
    logs.info("indexing 进程 [{}] 共计用时：{}s,计{}m".format(n, bulk_import_use_sec, bulk_import_use_min))

    # RefreshEs()

    # GetMaxId()
    # disk_msg = GetDiskUse()
    disk_msg = []
    logs.info("进程 [{}] disk_msg:{}".format(n, disk_msg))
    # bulk_import_use_sec, bulk_import_use_min = TimeCal(start=start_time, end=True)

    # print([0, time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()), es_proc_total, multiprocessing.current_process().name, multiprocessing.current_process().pid, n, len(lines), GetEsTotal(), batch_size, len(disk_msg), disk_msg, es_rshards_total, bulk_import_use_sec, bulk_import_use_min, file])
    result = {
        "tag": 0,
        "now_time": time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),
        "proc_total": es_proc_total,
        "proc_name": multiprocessing.current_process().name,
        "proc_id": multiprocessing.current_process().pid,
        "run_total": n,
        "line_total": len(lines),
        "es_total": GetEsTotal(),
        "es_batch_size": batch_size,
        "dir_count": len(disk_msg),
        "dir_list": disk_msg,
        "es_rshards_total": es_rshards_total,
        "bulk_import_use_sec": bulk_import_use_sec,
        "bulk_import_use_min": bulk_import_use_min,
        "import_file": file
    }
    logs.info("进程 [{}] Report result:{}".format(n, result))
    return result

    # Report_Out('{}, 进程:[{}序号{}], 增量:{}, ES总量:{}, 批量:{}, 进程:{}, 磁盘:{}, ES副本:{}, 耗时:{}s计{}m'.format(
    #                                     time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),
    #                                     multiprocessing.current_process().name,
    #                                     n,
    #                                     len(lines),
    #                                     GetMaxId(),
    #                                     es_batch_size,
    #                                     es_proc_total,
    #                                     es_disk_num,
    #                                     es_rshards_total,
    #                                     bulk_import_use_sec,
    #                                     bulk_import_use_min
    #                                     ))

def IndexBulkImportJsonPTData(n, file):
    start_time = TimeCal(start=True)

    # es_init = 'myes' + n

    es_init = InitEs(es_host)

    pname = multiprocessing.current_process().name
    file = os.path.join(source_dir, file)
    logs.info("进程 [{}:{}] 开始导入[{}]数据到ES".format(pname, n, file))

    if not os.path.exists(file) or os.path.getsize(file) == 0:
        logs.info("进程 [{}] 源文件不存在或为空文件!!!".format(n))
        return

    with open(file, 'r', encoding='utf-8') as f_show:
        lines = f_show.readlines()

        logs.info("进程 [{}] 源数据文件总量：{}".format(n, len(lines)))
        actions = []
        for data in lines:
            line = json.loads(data)
            logs.debug("进程 [{}] 导入数据：{}".format(n, line))

            action = {
                "_index": es_index,
                "_type": es_doc_type,
                # "_id": line['num'],
                "_source": line
            }

            actions.append(action)
            if len(actions) == es_batch_size:
                logs.info("进程 [{}] batch size {}".format(n, es_batch_size))
                helpers.bulk(es_init, actions)
                del actions[:]

        if len(actions) > 0:
            logs.info("进程 [{}] batch size {}".format(n, es_batch_size))
            helpers.bulk(es_init, actions)
            batch_size = len(actions)
            del actions[:]

    logs.info("进程 [{}] 导入数据到ES完成.".format(n))
    bulk_import_use_sec, bulk_import_use_min = TimeCal(start=start_time, end=True)
    logs.info("indexing 进程 [{}] 共计用时：{}s,计{}m".format(n, bulk_import_use_sec, bulk_import_use_min))

    # RefreshEs()

    # GetMaxId()
    bulk_import_use_sec, bulk_import_use_min = TimeCal(start=start_time, end=True)

    # disk_msg = GetDiskNum()
    result = [
        0,
        time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),
        es_proc_total,
        multiprocessing.current_process().name,
        multiprocessing.current_process().pid,
        n,
        len(lines),
        GetEsTotal(),
        batch_size,
        len(disk_msg),
        disk_msg,
        es_rshards_total,
        bulk_import_use_sec,
        bulk_import_use_min,
        file
        ]

    return result

    # Report_Out('{}, 进程:[{}序号{}], 增量:{}, ES总量:{}, 批量:{}, 进程:{}, 磁盘:{}, ES副本:{}, 耗时:{}s计{}m'.format(
    #                                     time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),
    #                                     multiprocessing.current_process().name,
    #                                     n,
    #                                     len(lines),
    #                                     GetMaxId(),
    #                                     es_batch_size,
    #                                     es_proc_total,
    #                                     es_disk_num,
    #                                     es_rshards_total,
    #                                     bulk_import_use_sec,
    #                                     bulk_import_use_min
    #                                     ))

def process_pool_import():
    logs.info('启动多进程...')
    from multiprocessing import Pool, Manager
    import multiprocessing
    pp = multiprocessing.Pool(processes=3)

    s_file_list = ['a', 'b', 'c', 'd']
    t_list = []
    count = 0
    for sf in s_file_list:
        res = pp.apply_async(IndexBulkImportData, args=(Manager().dict(myes), count, sf))
        count += 1
        t_list.append(res)
    logs.info("队列 信息：{},{}".format(pp, dir(pp)))

    # 关闭进程池，停止接受其它进程
    pp.close()

    # 阻塞进程
    pp.join()

    for r in t_list:
        logs.info(r.get())

def work(myes, n, name):
    logs.info("procees {}, name: {}, myes:{}".format(n, name, myes))


if __name__ == '__main__':
    global logs, myes, disk_list

    logs = Log_Out()

    disk_list = GetDiskNum()
    # print(disk_list)
    # print(len(disk_list))
    # print(GetDiskUse())
    # sys.exit()

    myes = InitEs(es_host)
    logs.info(myes)

    # DelIndex(es_index)
    CreateEmptyIndex()
    # CreateIndex(es_index)
    # sys.exit()
    UpdataRShards()
    # sys.exit()

    # CreateFileThread()
    # CreateFileFromJson()
    # sys.exit()

    start_time = TimeCal(start=True)

    logs.info('启动多进程...')
    from multiprocessing import Pool, Manager, Process

    try:
        import multiprocessing
    except:
        pass

    s_file_list = os.listdir(source_dir)
    s_file_list.sort()
    t_list = []
    t_dict = {}
    count = 1
    # for sf in s_file_list:
    #     pname = 'pp' + str(count)
    #     pname = multiprocessing.Process(target=IndexBulkImportData, args=(count, sf))
    #     count += 1
    #     # pname.daemon = True
    #     pname.start()
    #     pname.join()
    #     logs.info("names:{}".format(pname.name))
    #     logs.info("pid:{}".format(pname.pid))

    pools = multiprocessing.Pool(processes=es_proc_total, maxtasksperchild=es_rshards_total)
    for f in s_file_list:
        # pools.apply_async(IndexBulkImportData, args=(count, f), callback=Report_Out)
        res = pools.apply_async(IndexBulkImportJsonData, args=(count, f, ), callback=Report_Out)
        # logs.info(res)
        # t_list.append(res)
        t_dict[count] = res
        count += 1
        # time.sleep(2)
        # logs.info("name:{}".format(multiprocessing.current_process().name))
        # logs.info("pid:{}".format(multiprocessing.current_process().pid))

    pools.close()
    pools.join()

    run_num = []
    for p, c in t_dict.items():
        if c.successful():
            logs.info("批次[{}]导入成功.进程{}-PID-{},文件{}".format(c.get()['run_total'], c.get()['proc_name'], c.get()['proc_id'], c.get()['import_file']))
            run_num.append(c.get()['run_total'])
        else:
            logs.info("批次[{}]导入失败.".format(p))

    RefreshEs()

    disk_msg = GetDiskUse()
    bulk_import_use_sec, bulk_import_use_min = TimeCal(start=start_time, end=True)
    result = {
        "tag": 1,
        "now_time": time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),
        "proc_total": es_proc_total,
        "proc_name": multiprocessing.current_process().name,
        "proc_id": multiprocessing.current_process().pid,
        "run_total": len(s_file_list),
        "running_total": len(run_num),
        "line_total": es_add_total,
        "es_total": GetEsTotal(),
        "es_batch_size": es_batch_size,
        "dir_count": len(disk_msg),
        "dir_list": disk_msg,
        "es_rshards_total": es_rshards_total,
        "es_pshards_total": es_pshards_total,
        "bulk_import_use_sec": bulk_import_use_sec,
        "bulk_import_use_min": bulk_import_use_min
    }
    Report_Out(result)
    # Report_Out([
    #     1,
    #     time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),
    #     es_proc_total,
    #     multiprocessing.current_process().name,
    #     multiprocessing.current_process().pid,
    #     len(s_file_list),
    #     len(run_num),
    #     es_add_total,
    #     GetEsTotal(),
    #     es_batch_size,
    #     len(disk_msg),
    #     disk_msg,
    #     es_rshards_total,
    #     es_pshards_total,
    #     bulk_import_use_sec,
    #     bulk_import_use_min
    # ])
    # Report_Out('{}, 进程ID:[{}:{}], 增量:{}, ES总量:{}, 批量:{}, 进程:{}run{}, 磁盘:{}, ES副本:{}, 耗时:{}s计{}m'.format(
    #             time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),
    #             multiprocessing.current_process().name,
    #             multiprocessing.current_process().pid,
    #             es_add_total,
    #             GetMaxId(),
    #             es_batch_size,
    #             es_proc_total,
    #             len(run_num),
    #             es_disk_num,
    #             es_rshards_total,
    #             bulk_import_use_sec,
    #             bulk_import_use_min
    #             ))