"""
-------------------------------------------------
   File Name：     MultipleEsPath
   Description :
   Author :       willis
   date：          2019/4/24
-------------------------------------------------
   Change Activity:
                   2019/4/24:

-------------------------------------------------
    https://www.elastic.co/guide/en/elasticsearch/reference/6.2/docs-bulk.html
    https://elasticsearch-py.readthedocs.io/en/master/api.html
    py3 中文：
    https://python3-cookbook.readthedocs.io/zh_CN/latest/c02/p14_combine_and_concatenate_strings.html
    query:
    https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html
-------------------------------------------------
"""
__author__ = 'willis'

import logging.handlers
import logging
import sys
import os
import time
import json
from elasticsearch import Elasticsearch
from elasticsearch import helpers
import types
# import copyreg
#
# def _pickle_method(m):
#     if m.im_self is None:
#         return getattr, (m.im_class, m.im_func.func_name)
#     else:
#         return getattr, (m.im_self, m.im_func.func_name)
# copyreg.pickle(types.MethodType, _pickle_method)


class Main:
    def __init__(self, es_disk_num=2, shards=1, process=1, es_batch_size=1500, es_add_total=1000000):
        self.Log_Out()
        # ES地址
        self.es_host = '172.22.56.34:9200,172.22.56.31:9200'
        # 索引名称
        self.es_index = 'multiples'
        # 每bulk导入es数量
        self.es_batch_size = es_batch_size
        # 需要导入数据的总量，累加最大值
        self.es_add_total = es_add_total
        # ES副本数量
        self.es_shards_total = shards
        # 线程数量
        self.es_proc_total = process
        # 索引类型
        self.es_doc_type = "type_doc_test"
        # 节点数据磁盘数量
        self.es_disk_num = es_disk_num
        # 导入ES的源文件
        self.source_dir = 'input'
        self.source_file = 'source.txt'
        self.source_file_count = 10
        # json格式的源文件
        self.source_json = 'package.json'
        # 报告文件，多次的进行累加
        self.report_file = 'reportup.txt'
        # 报告文件，每次运行的结果
        self.create_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())


    def Log_Out(self):
        '''
        日志输出
        :return:
        '''

        log_file = os.path.realpath(sys.argv[0]) + '.log'

        # 定义对应的程序模块名name，默认是root
        self.logs = logging.getLogger()
        self.logs.setLevel(logging.INFO)

        # 定义日志回滚
        logrota = logging.handlers.RotatingFileHandler(log_file, maxBytes=10 * 1024 * 1024, backupCount=20)

        # 设置日志等级
        logrota.setLevel(logging.DEBUG)

        # 日志输出到屏幕控制台
        console = logging.StreamHandler()
        console.setLevel(logging.DEBUG)

        # 定义日志格式
        format = ('%(asctime)s|%(name)s|%(levelname)s|'
                  # '%(pathname)s|'
                  '%(thread)s|%(process)s|%(lineno)d|%(message)s')

        # 实例化handler
        formatter = logging.Formatter(format)

        # 格式化handler
        logrota.setFormatter(formatter)
        console.setFormatter(formatter)

        # 添加handler
        self.logs.addHandler(logrota)
        self.logs.addHandler(console)
        #
        # self.logs.debug('my is debug')
        # self.logs.info('my is info')
        # self.logs.warning('my is warning')
        # self.logs.error('my is error')
        # self.logs.critical('my is critical')
        self.logs.info('Loging start ...')

    def Report_Out(self, msg):
        '''
        报告输出
        :return:
        '''
        self.logs.info('输出报告 ...')
        self.logs.info(msg)
        file_time_name = ''.join(self.create_time.split()[0].split('-')) + ''.join(self.create_time.split()[1].split(':'))
        with open(self.report_file.replace('up', file_time_name), 'w') as report_f:
            report_f.write('时间, 总量, 批量, 耗时')
            report_f.write('\n')
            report_f.write(msg)
            report_f.write('\n')
        with open(self.report_file, 'a+') as report_f_one:
            report_f_one.write(msg)
            report_f_one.write('\n')

    def TimeCal(self, start=None, end=None):
        if start and end == None:
            return time.time()
        if start and end:
            # if not isinstance(start, float):
            #     return
            sec = time.time() - start
            mint = sec / 60
            return round(sec, 1), round(mint, 1)
        # if end and start == None:
        #     return

    def OutputTotal(self):
        self.input_total = int(self.maxId) + int(self.es_add_total)

        self.logs.info("本次更新后总量：{}".format(self.input_total))

    def ReadJson(self):
        with open(self.source_json, 'r') as sf_show:
            sf_data = sf_show.read()
            self.source_json_data = json.loads(sf_data)

    def CreateFileFromJson(self):
        start_time = self.TimeCal(start=True)
        self.logs.info("开始生成数据：")

        self.ReadJson()
        # 输出文件
        with open(self.source_file, 'w') as f_show:
            self.OutputTotal()
            for line in range(int(self.maxId) + 1, int(self.input_total) + 1):
                input_line = self.source_json_data
                input_line['id'] = line
                json.dump(input_line, f_show)
                f_show.write('\n')

        use_sec, use_min = self.TimeCal(start=start_time, end=True)
        self.logs.info("生成数据完成,用时[{}]秒,计[{}]分".format(use_sec, use_min))

    def CreateFile(self):
        start_time = self.TimeCal(start=True)
        self.logs.info("开始生成数据：")

        num = 0
        name = 'index'
        ip = '192.168.1.1'
        port = 0
        dep_path = '/var/bh/dep'
        put_path = '/var/bh/put'
        bak_path = '/var/bh/bak'
        data_create_time = self.create_time
        data_update_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())

        # 输出文件
        with open(self.source_file, 'w') as f_show:
            self.OutputTotal()
            for line in range(int(self.maxId) + 1, int(self.input_total) + 1):
                data_line = [
                    line,
                    name + str(line),
                    ip,
                    port + line,
                    dep_path,
                    put_path,
                    bak_path,
                    data_create_time,
                    data_update_time
                ]
                w_line = '|'.join(str(d) for d in data_line)
                # self.logs.info("插入数据：{}".format(w_line))
                f_show.write(w_line)
                f_show.write('\n')

        use_sec, use_min = self.TimeCal(start=start_time, end=True)
        self.logs.info("生成数据完成,用时[{}]秒,计[{}]分".format(use_sec, use_min))

    def CreateFileThread(self):
        start_time = self.TimeCal(start=True)
        self.logs.info("开始生成数据：")

        if os.path.exists(self.source_dir):
            for f in os.listdir(self.source_dir):
                self.logs.info("清理历史数据[{}]".format(f))
                os.remove(os.path.join(self.source_dir, f))
        else:
            self.logs.info("创建源数据目录[{}]".format(self.source_dir))
            os.mkdir(self.source_dir)

        input_file_total =round(self.es_add_total / self.source_file_count)
        input_file_add = self.es_add_total % self.source_file_count
        input_list = [input_file_total, input_file_total + input_file_add]
        self.logs.info('文件信息：{}'.format(input_list))

        num = 0
        name = 'index'
        ip = '192.168.1.1'
        port = 0
        dep_path = '/var/bh/dep'
        put_path = '/var/bh/put'
        bak_path = '/var/bh/bak'
        data_create_time = self.create_time
        data_update_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())

        input_id = 0
        for f in range(1, self.source_file_count + 1):
            # 输出文件
            if f == self.source_file_count:
                input_max_line = input_list[1]
            else:
                input_max_line = input_list[0]

            if f == 1:
                input_min_line = 0
            else:
                input_min_line = input_list[0]

            self.logs.debug('当前文件行数：{}'.format(input_max_line))

            file = os.path.join(self.source_dir, self.source_file.replace('.txt', str(f) + '.txt'))
            self.logs.info('生成文件[{}],行数:[{}]'.format(file, input_max_line))
            with open(file, 'w') as f_show:
                for line in range(input_id + 1, int(input_max_line) + input_id + 1):
                    self.logs.info('')
                    data_line = [
                        line,
                        name + str(line),
                        ip,
                        port + line,
                        dep_path,
                        put_path,
                        bak_path,
                        data_create_time,
                        data_update_time
                    ]
                    w_line = '|'.join(str(d) for d in data_line)
                    # self.logs.info("插入数据：{}".format(w_line))
                    f_show.write(w_line)
                    f_show.write('\n')

        use_sec, use_min = self.TimeCal(start=start_time, end=True)
        self.logs.info("生成数据[{}]完成,用时[{}]秒,计[{}]分".format(self.es_add_total, use_sec, use_min))

    def InitEs(self):
        host = {}
        host_list = []
        for hosts in self.es_host.split(','):
            ip = hosts.split(":")[0]
            port = hosts.split(":")[1]
            host['host'] = ip
            host['port'] = port
            host_list.append(host)
            host = {}
        self.logs.info("连接ES主机：{}".format(host_list))

        self.myes = Elasticsearch(
            host_list,
            # sniff before doing anything
            sniff_on_start=True,
            # refresh nodes after a node fails to respond
            sniff_on_connection_fail=True,
            # and also every 60 seconds
            sniffer_timeout=60,
            bulk_size=10000,
            timeout=20
        )
        self.logs.info("初始化es:{}".format(self.myes))

        self.logs.info("dir myes: {}".format(dir(self.myes)))

    def GetIndex(self):
        self.logs.info("获取索引[{}]信息".format(self.es_index))

        try:
            self.index_get_msg = self.myes.indices.get_mapping(index=self.es_index)
        except Exception as e:
            self.logs.error("获取索引信息异常：{}".format(e))
        else:
            self.logs.info("索引信息：{}".format(self.index_get_msg))

    def CreateIndex(self):
        if self.myes.indices.exists(index=self.es_index) is not True:
            self.logs.info("开始创建索引：{}".format(self.es_index))
            mappings = {
                "mappings": {
                    self.es_doc_type: {
                        "properties": {
                            "num": {
                                "type": "long",
                                "index": "true"
                            },
                            "name": {
                                "type": "long",
                                "index": "true"
                            },
                                "ip": {
                                "type": "long",
                                "index": "true"
                            },
                            "port": {
                                "type": "long",
                                "index": "true"
                            },
                            "dep_path": {
                                "type": "long",
                                 "index": "true"
                            },
                            "put_path": {
                                "type": "long",
                                "index": "true"
                            },

                            "bak_path": {
                              "type": "long",
                              "index": "true"
                            },

                            "createTime": {
                                "type": "date",
                                "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis"
                            },
                            "updateTime": {
                                "type": "date",
                                "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis"
                            }
                        }
                    }
                },
                "settings": {
                    "index": {
                        "mapping": {
                            "nested_fields": {
                                "limit": "2000"
                            }
                        },
                        "refresh_interval": "-1",
                        "number_of_shards": "4",
                        "number_of_replicas": self.es_shards_total
                    }
                }
            }

            indexing = self.myes.indices.create(index=self.es_index, body=mappings)
            if indexing["acknowledged"] and \
                indexing["shards_acknowledged"] and \
                indexing["index"] == 'multiples':

                self.logs.info("创建索引：{}成功".format(self.es_index))
            else:
                self.logs.error("创建索引：{}失败".format(self.es_index))

    def CreateEmptyIndex(self):
        if self.myes.indices.exists(index=self.es_index) is not True:
            self.logs.info("开始创建索引：{}".format(self.es_index))
            mappings = {
                "settings": {
                    "index": {
                        "mapping": {
                            "nested_fields": {
                                "limit": "2000"
                            }
                        },
                        "refresh_interval": "-1",
                        "number_of_shards": "4",
                        "number_of_replicas": self.es_shards_total
                    }
                }
            }

            indexing = self.myes.indices.create(index=self.es_index, body=mappings)
            if indexing["acknowledged"] and \
                indexing["shards_acknowledged"] and \
                indexing["index"] == 'multiples':

                self.logs.info("创建索引：{}成功".format(self.es_index))
            else:
                self.logs.error("创建索引：{}失败".format(self.es_index))

    def DelIndex(self):
        self.logs.info("开始删除索引：{}".format(self.es_index))
        try:
            result = self.myes.indices.delete(index=self.es_index)
            if result["acknowledged"]:
                self.logs.info("删除索引[{}]成功".format(self.es_index))
            else:
                self.logs.error("删除索引[{}]失败".format(self.es_index))
        except Exception as e:
            self.logs.error("删除索引[{}]失败,{}".format(self.es_index, e))

    def IndexImportData(self):
        f = open(self.source_file, 'r', encoding='utf-8')
        id = 1

        for data in f.readlines():
            line = data.strip().split('|')
            self.logs.debug("导入数据：{}".format(line))

            action = {
                # "_index": self.es_index,
                # "_type": "doc_type_test",
                # "_id": id,
                # "_source": {
                "num": line[0],
                "名称": line[1],
                "ip地址": line[2],
                "端口": line[3],
                "部署路径": line[4],
                "上传路径": line[5],
                "备份路径": line[6],
                "创建时间": line[7],
                "更新时间": line[8]
                # }
            }
            try:
                res = self.myes.index(index=self.es_index, doc_type=self.es_doc_type, body=action)

                self.logs.info("insert id {}".format(id))
            except Exception as e:
                self.logs.error("insert id {} retry ... [{}]".format(id, e))

            if id == 5:
                break

            id = id + 1

    def IndexBulkImportData(self, n, file):
        start_time = self.TimeCal(start=True)
        self.logs.info("线程 [{}] 开始导入[{}]数据到ES".format(n, file))
        file = os.path.join(self.source_dir, file)

        if not os.path.exists(file) or os.path.getsize(file) == 0:
            self.logs.error("线程 [{}] 源文件不存在或为空文件!!!".format(n))
            return

        es_max_id = int(self.maxId)
        with open(file, 'r', encoding='utf-8') as f_show:
            lines = f_show.readlines()
            first_line = int(lines[0].strip().split('|')[0])

            # ES中的总量为0时，指定起始值 为1，用于生成文件和导入ES的起始ID
            if first_line <= es_max_id:
                self.logs.error("线程 [{}] 文件起始ID必须大于ES最大ID, [{}] > [{}]".format(n, first_line, es_max_id))
                return
            if first_line > es_max_id + 1:
                # ES中的总量不为0时，指定起始值加1，避免第一条和最大值重复，用于生成文件和导入ES的起始ID
                self.logs.error("线程 [{}] 文件起始ID必须等于于ES最大ID+1, [{}] = [{}] + 1".format(n, first_line, es_max_id))
                return

            self.logs.info("线程 [{}] 源数据文件总量：{}".format(n, len(f_show.readlines())))
            actions = []
            id = int(self.maxId) + 1
            for data in lines:
                line = data.strip().split('|')
                self.logs.debug("线程 [{}] 导入数据：{}".format(n, line))

                action = {
                    "_index": self.es_index,
                    "_type": self.es_doc_type,
                    "_id": id,
                    "_source": {
                        "num": line[0],
                        "名称": line[1],
                        "ip地址": line[2],
                        "端口": line[3],
                        "部署路径": line[4],
                        "上传路径": line[5],
                        "备份路径": line[6],
                        "创建时间": line[7],
                        "更新时间": line[8]
                    }
                }

                actions.append(action)
                if len(actions) == self.es_batch_size:
                    res = helpers.bulk(self.myes, actions)
                    self.logs.info("线程 [{}] batch size {}, code: {}".format(n, id, res))
                    del actions[:]
                id = id + 1

            if len(actions) > 0:
                res = helpers.bulk(self.myes, actions)
                self.logs.info("线程 [{}] batch size {}, code: {}".format(n, id, res))
                del actions[:]

        self.logs.info("线程 [{}] 导入数据到ES完成.".format(n))
        bulk_import_use_sec, bulk_import_use_min = self.TimeCal(start=start_time, end=True)
        self.logs.info("indexing 线程 [{}] 共计用时：{}s,计{}m".format(n, bulk_import_use_sec, bulk_import_use_min))

        self.RefreshEs()

        self.GetMaxId()
        bulk_import_use_sec, bulk_import_use_min = self.TimeCal(start=start_time, end=True)
        self.Report_Out('{}, 线程:[{}], 请求总量:{}, 增量:{}, ES总量:{}, 批量:{}, 线程:{}, 磁盘:{}, ES副本:{}, 耗时:{}s计{}m'.format(
                                            time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),
                                            n,
                                            # self.input_total,
                                            n,
                                            self.es_add_total,
                                            self.maxId,
                                            self.es_batch_size,
                                            self.es_proc_total,
                                            self.es_disk_num,
                                            self.es_shards_total,
                                            bulk_import_use_sec,
                                            bulk_import_use_min
                                            ))

    def IndexBulkImportDataThread(self, n):
        start_time = self.TimeCal(start=True)
        self.logs.info("线程 [{}] 开始导入数据到ES".format(n))

        if not os.path.exists(self.source_file) or os.path.getsize(self.source_file) == 0:
            self.logs.error("源文件不存在或为空文件!!!")
            return

        es_max_id = int(self.maxId)
        with open(self.source_file, 'r', encoding='utf-8') as f_show:
            lines = f_show.readlines()
            first_line = int(lines[0].strip().split('|')[0])

            # ES中的总量为0时，指定起始值 为1，用于生成文件和导入ES的起始ID
            if first_line <= es_max_id:
                self.logs.error("文件起始ID必须大于ES最大ID, [{}] > [{}]".format(first_line, es_max_id))
                return
            if first_line > es_max_id + 1:
                # ES中的总量不为0时，指定起始值加1，避免第一条和最大值重复，用于生成文件和导入ES的起始ID
                self.logs.error("文件起始ID必须等于于ES最大ID+1, [{}] = [{}] + 1".format(first_line, es_max_id))
                return

            self.logs.info("源数据文件总量：{}".format(len(f_show.readlines())))
            actions = []
            id = int(self.maxId) + 1

            for data in lines:
                line = data.strip().split('|')
                self.logs.debug("导入数据：{}".format(line))

                action = {
                    "_index": self.es_index,
                    "_type": self.es_doc_type,
                    "_id": id,
                    "_source": {
                        "num": line[0],
                        "名称": line[1],
                        "ip地址": line[2],
                        "端口": line[3],
                        "部署路径": line[4],
                        "上传路径": line[5],
                        "备份路径": line[6],
                        "创建时间": line[7],
                        "更新时间": line[8]
                    }
                }

                actions.append(action)
                if len(actions) == self.es_batch_size:
                    res = helpers.bulk(self.myes, actions)
                    self.logs.info("batch size {}, code: {}".format(id, res))
                    del actions[:]
                id = id + 1

            if len(actions) > 0:
                res = helpers.bulk(self.myes, actions)
                self.logs.info("batch size {}, code: {}".format(id, res))
                del actions[:]

        self.logs.info("线程 [{}] 导入数据到ES完成.".format(n))
        bulk_import_use_sec, bulk_import_use_min = self.TimeCal(start=start_time, end=True)
        self.logs.info("indexing 线程 [{}] 共计用时：{}s,计{}m".format(n, bulk_import_use_sec, bulk_import_use_min))

        self.RefreshEs()

        self.GetMaxId()
        bulk_import_use_sec, bulk_import_use_min = self.TimeCal(start=start_time, end=True)
        self.Report_Out('{}, 线程:[{}], 请求总量:{}, 增量:{}, ES总量:{}, 批量:{}, 线程:{}, 磁盘:{}, ES副本:{}, 耗时:{}s计{}m'.format(
                                            time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),
                                            n,
                                            self.input_total,
                                            self.es_add_total,
                                            self.maxId,
                                            self.es_batch_size,
                                            self.es_proc_total,
                                            self.es_disk_num,
                                            self.es_shards_total,
                                            bulk_import_use_sec,
                                            bulk_import_use_min
                                            ))

    def IndexBulkImportJson(self):
        start_time = self.TimeCal(start=True)
        self.logs.info("开始导入数据到ES")

        if not os.path.exists(self.source_file) or os.path.getsize(self.source_file) == 0:
            self.logs.error("源文件不存在或为空文件!!!")
            return

        es_max_id = int(self.maxId)
        with open(self.source_file, 'r', encoding='utf-8') as f_show:
            lines = f_show.readlines()
            # first_line = int(lines[0].strip().split('|')[0])
            #
            # # ES中的总量为0时，指定起始值 为1，用于生成文件和导入ES的起始ID
            # if first_line <= es_max_id:
            #     self.logs.error("文件起始ID必须大于ES最大ID, [{}] > [{}]".format(first_line, es_max_id))
            #     return
            # if first_line > es_max_id + 1:
            #     # ES中的总量不为0时，指定起始值加1，避免第一条和最大值重复，用于生成文件和导入ES的起始ID
            #     self.logs.error("文件起始ID必须等于于ES最大ID+1, [{}] = [{}] + 1".format(first_line, es_max_id))
            #     return

            self.logs.info("源数据文件总量：{}".format(len(f_show.readlines())))
            actions = []
            id = int(self.maxId) + 1
            for data in lines:
                # line = data.strip().split('|')
                self.logs.debug("导入数据：{}".format(data))

                action = {
                    "_index": self.es_index,
                    "_type": self.es_doc_type,
                    "_id": id,
                    "_source": data
                }

                actions.append(action)
                if len(actions) == self.es_batch_size:
                    res = helpers.bulk(self.myes, actions)
                    self.logs.info("batch size {}, code: {}".format(id, res))
                    del actions[:]
                id = id + 1

            if len(actions) > 0:
                res = helpers.bulk(self.myes, actions)
                self.logs.info("batch size {}, code: {}".format(id, res))
                del actions[:]

        self.logs.info("导入数据到ES完成.")
        bulk_import_use_sec, bulk_import_use_min = self.TimeCal(start=start_time, end=True)
        self.logs.info("indexing 共计用时：{}s,计{}m".format(bulk_import_use_sec, bulk_import_use_min))

        self.RefreshEs()

        self.GetMaxId()
        bulk_import_use_sec, bulk_import_use_min = self.TimeCal(start=start_time, end=True)
        self.Report_Out('{}, 请求总量:{}, 增量:{}, ES总量:{}, 批量:{}, 线程:{}, 磁盘:{}, ES副本:{}, 耗时:{}s计{}m'.format(
                                            time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()),
                                            self.input_total,
                                            self.es_add_total,
                                            self.maxId,
                                            self.es_batch_size,
                                            self.es_proc_total,
                                            self.es_disk_num,
                                            self.es_shards_total,
                                            bulk_import_use_sec,
                                            bulk_import_use_min
                                            ))

    def IndexRemoveData(self):
        id_list = []
        outf = open(self.source_file, 'r', encoding='utf-8')
        for lines in outf.readlines():
            id = lines.strip().split('|')[0]
            id_list.append(id)

        id_list.sort()

        minId = id_list[0]
        maxId = id_list[-1]

        query_data = {
            "query": {
                "bool": {
                    "must": [
                        {
                            "match_all": {}
                        },
                        {
                            "range": {
                                "num": {
                                    "gte": minId,
                                    "lte": maxId
                                }
                            }
                        }
                    ],
                    "must_not": []
                }
            }
        }

        try:
            res = self.myes.delete_by_query(index=self.es_index, doc_type=self.es_doc_type, body=query_data, _source=True)

            self.logs.info("删除ES数据完成, {}".format(res))
        except Exception as e:
            self.logs.error("删除ES数据失败, {}".format(e))

    def IndexCleanData(self):
        query_data = {
            "query": {
                "match_all": {

                }
            }
        }
        self.logs.info("开始清空 ES 数据")
        try:
            res = self.myes.delete_by_query(index=self.es_index, body=query_data)
            self.logs.info("res:{}".format(res))
            self.logs.info("清空 ES 数据完成:{}".format(res))
        except Exception as e:
            self.logs.info("清空 ES 数据失败:{}".format(e))
            raise e

    # 获取最大ID
    def GetMaxId(self):
        query_data = {
            "sort": [
                {
                    "num": {
                        "order": "desc"
                    }
                }
            ],
            "size": 1,
            "_source": "{num}",
            "query": {
                "match_all": {}
            }
        }

        try:
            res = self.myes.search(index=self.es_index, doc_type=self.es_doc_type, body=query_data)
            self.logs.info("MAX ID res:{}".format(res))
            if res['hits']['hits']:
                self.maxId = res['hits']['hits'][0]['_id']
            else:
                self.maxId = 0
            self.logs.info("MAX ID :{}".format(self.maxId))
        except Exception as e:
            self.maxId = 0

    # 刷新ES
    def RefreshEs(self):
        refresh_start_time  = self.TimeCal(start=True)
        self.logs.info("开始刷新ES")
        res = self.myes.indices.refresh(index=self.es_index)
        self.logs.info("res:{}".format(res))
        if res["_shards"]:
            if res["_shards"]['total'] == res["_shards"]['successful'] and res["_shards"]['failed'] == 0:
                self.logs.info("刷新成功:{}".format(res["_shards"]))
            else:
                self.logs.error("res:{}".format(res["_shards"]))
        else:
            self.logs.error("刷新失败:{}".format(res))

        refresh_use_sec, refresh_use_min  = self.TimeCal(start=refresh_start_time, end=True)
        self.logs.info("refresh 共计用时：{}s计{}分".format(refresh_use_sec, refresh_use_min))

    # 查询所有数据
    def IndexSearchAllData(self):
        result = self.myes.search(index=self.es_index, doc_type=self.es_doc_type)
        self.logs.info(result['hits']['hits'])

    # 查询一条数据
    def IndexSearchOneData(self):
        result = self.myes.get(index=self.es_index, doc_type=self.es_doc_type, id='1')
        self.logs.info(result)

    # 关键词查找
    def IndexSearchDocData(self):
        doc = {
            "query": {
                "match": {
                    "_id": "1"
                }
            }
        }
        result = self.myes.get(index=self.es_index, doc_type=self.es_doc_type, body=doc)
        self.logs.info(result)

    def ShardsSet(self, num=0):
        query_data = {
            "index": {
                "number_of_replicas": num
            }
        }
        retuslt = self.myes.indices

    def ShardsGet(self):
        result = self.myes.search_shards(index=self.es_index)
        r_shards_num = result['shards']
        self.logs.info("副本分片信息：{}".format(result))



    # 插入指定的单条数据内容
    def insert_single_data(self, index_name, doc_type, data):
        '''
        :param index_name: 索引名称
        :param doc_type: 文档类型
        :param data: 需要插入的数据内容
        :return: 执行结果
        '''
        res = self.es.index(index=index_name, doc_type=doc_type, body=data)
        return res

    # 向ES中新增数据,批量插入
    def insert_datas(self, index_name):
        '''
        :desc 通过读取指定的文件内容获取需要插入的数据集
        :param index_name: 索引名称
        :return: 插入成功的数据条数
        '''
        insert_datas = []
        # 判断插入数据的索引是否存在
        self.createIndex(index_name=index_name)
        # 获取插入数据的文件地址
        data_file_path = self.ini.get_key_value("datafile", "datafilepath")
        # 获取需要插入的数据集
        with open(data_file_path, "r+") as data_file:
            # 获取文件所有数据
            data_lines = data_file.readlines()
            for data_line in data_lines:
                # string to json
                data_line = json.loads(data_line)
                insert_datas.append(data_line)
        # 批量处理
        res = self.es.bulk(index=index_name, body=insert_datas, raise_on_error=True)
        return res

    # 从ES中在指定的索引中删除指定数据（根据id判断）
    def delete_data_by_id(self, index_name, doc_type, id):
        '''
        :param index_name: 索引名称
        :param index_type: 文档类型
        :param id: 唯一标识id
        :return: 删除结果信息
        '''
        res = self.es.delete(index=index_name, doc_type=doc_type, id=id)
        return res

    # 根据条件删除数据
    def delete_data_by_query(self, index_name, doc_type, param, gt_time, lt_time):
        '''
        :param index_name:索引名称，为空查询所有索引
        :param doc_type:文档类型，为空查询所有文档类型
        :param param:过滤条件值
        :param gt_time:时间范围，大于该时间
        :param lt_time:时间范围，小于该时间
        :return:执行条件删除后的结果信息
        '''
        # DSL语句
        query_data = {
            # 查询语句
            "query": {
                "bool": {
                    "must": [
                        {
                            "query_string": {
                                "query": param,
                                "analyze_wildcard": True
                            }
                        },
                        {
                            "range": {
                                "@timestamp": {
                                    "gte": gt_time,
                                    "lte": lt_time,
                                    "format": "epoch_millis"
                                }
                            }
                        }
                    ],
                    "must_not": []
                }
            }
        }
        res = self.es.delete_by_query(index=index_name, doc_type=doc_type, body=query_data, _source=True)
        return res

    # 指定index中删除指定时间段内的全部数据
    def delete_all_datas(self, index_name, doc_type, gt_time, lt_time):
        '''
        :param index_name:索引名称，为空查询所有索引
        :param doc_type:文档类型，为空查询所有文档类型
        :param gt_time:时间范围，大于该时间
        :param lt_time:时间范围，小于该时间
        :return:执行条件删除后的结果信息
        '''
        # DSL语句
        query_data = {
            # 查询语句
            "query": {
                "bool": {
                    "must": [
                        {
                            "match_all": {}
                        },
                        {
                            "range": {
                                "@timestamp": {
                                    "gte": gt_time,
                                    "lte": lt_time,
                                    "format": "epoch_millis"
                                }
                            }
                        }
                    ],
                    "must_not": []
                }
            }
        }
        res = self.es.delete_by_query(index=index_name, doc_type=doc_type, body=query_data, _source=True)
        return res

    # 修改ES中指定的数据
    def update_data_by_id(self, index_name, doc_type, id, data):
        '''
        :param index_name: 索引名称
        :param doc_type: 文档类型，为空表示所有类型
        :param id: 文档唯一标识编号
        :param data: 更新的数据
        :return: 更新结果信息
        '''
        res = self.es.update(index=index_name, doc_type=doc_type, id=id, body=data)
        return res

    def thread_import(self):

        s_file_list = os.listdir(self.source_dir)
        s_file_list.sort()

        t_list = []
        start_count = 1
        count = 0
        for sf in s_file_list:
            t = threading.Thread(target=self.IndexBulkImportData, args=(count, sf))
            t_list.append(t)
            count += 1
        self.logs.info("队列 信息：{}".format(t_list))
        for n in range(len(s_file_list)):
            t_list[n].start()
        for n in range(len(s_file_list)):
            t_list[n].join()

    def process_import(self):
        # self.logs.info('启动多线程...')

        from multiprocessing import Pool, Manager
        # import multiprocessing
        # ps = multiprocessing.Pool(processes=3)

        ps = Pool(processes=3)
        s_file_list = os.listdir(self.source_dir)
        s_file_list.sort()

        p_list = []
        count = 0
        for sf in s_file_list:
            fun = self.work
            res = ps.apply_async(fun, args=(count, sf))
            count += 1
            p_list.append(res)
        # self.logs.info("队列 信息：{},{}".format(ps, dir(ps)))

        # 关闭进程池，停止接受其它进程
        ps.close()

        # 阻塞进程
        ps.join()

        for r in p_list:
            print(r)
            print(r.get())

    def work(self, n, name):
        print("procees {}, name: {}".format(n, name))




def Log_Out():
    '''
    日志输出
    :return:
    '''

    log_file = os.path.realpath(sys.argv[0]) + '.log'

    # 定义对应的程序模块名name，默认是root
    logs = logging.getLogger()
    logs.setLevel(logging.INFO)

    # 定义日志回滚
    logrota = logging.handlers.RotatingFileHandler(log_file, maxBytes=10 * 1024 * 1024, backupCount=20)

    # 设置日志等级
    logrota.setLevel(logging.DEBUG)

    # 日志输出到屏幕控制台
    console = logging.StreamHandler()
    console.setLevel(logging.DEBUG)

    # 定义日志格式
    format = ('%(asctime)s|%(name)s|%(levelname)s|'
              # '%(pathname)s|'
              '%(thread)s|%(process)s|%(lineno)d|%(message)s')

    # 实例化handler
    formatter = logging.Formatter(format)

    # 格式化handler
    logrota.setFormatter(formatter)
    console.setFormatter(formatter)

    # 添加handler
    logs.addHandler(logrota)
    logs.addHandler(console)
    #
    # logs.debug('my is debug')
    # logs.info('my is info')
    # logs.warning('my is warning')
    # logs.error('my is error')
    # logs.critical('my is critical')
    logs.info('Loging start ...')
    return logs


global logs
# ES地址
es_host = '172.22.56.34:9200,172.22.56.31:9200'
# 索引名称
es_index = 'multiples'
# 每bulk导入es数量
es_batch_size = 1500
# 需要导入数据的总量，累加最大值
es_add_total = 1000000
# ES副本数量
es_shards_total = 1
# 线程数量
es_proc_total = 3
# 索引类型
es_doc_type = "type_doc_test"
# 节点数据磁盘数量
es_disk_num = 2
# 导入ES的源文件
source_dir = 'input'
source_file = 'source.txt'
source_file_count = 10
# json格式的源文件
source_json = 'package.json'
# 报告文件，多次的进行累加
report_file = 'reportup.txt'
# 报告文件，每次运行的结果
create_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())

def InitEs(es_host):
        host = {}
        host_list = []
        for hosts in es_host.split(','):
            ip = hosts.split(":")[0]
            port = hosts.split(":")[1]
            host['host'] = ip
            host['port'] = port
            host_list.append(host)
            host = {}
        logs.info("连接ES主机：{}".format(host_list))

        myes = Elasticsearch(
            host_list,
            # sniff before doing anything
            sniff_on_start=True,
            # refresh nodes after a node fails to respond
            sniff_on_connection_fail=True,
            # and also every 60 seconds
            sniffer_timeout=60,
            bulk_size=10000,
            timeout=20
        )
        logs.info("初始化es:{}".format(myes))
        logs.info("dir myes: {}".format(dir(myes)))

        return myes

def process_import():
    print('启动多线程...')
    from multiprocessing import Pool
    import multiprocessing
    pp = multiprocessing.Pool(processes=3)

    s_file_list = ['a', 'b', 'c', 'd']
    t_list = []
    count = 0
    for sf in s_file_list:
        res = pp.apply_async(work, args=(count, sf))
        count += 1
        t_list.append(res)
    print("队列 信息：{},{}".format(pp, dir(pp)))

    # 关闭进程池，停止接受其它进程
    pp.close()

    # 阻塞进程
    pp.join()

    for r in t_list:
        print(r.get())

def work(n, name):
    print("procees {}, name: {}".format(n, name))


if __name__ == '__main__':
    run = Main(es_batch_size=1500, es_add_total=1000888, process=3)
    run.InitEs()
    run.GetMaxId()
    run.CreateFileThread()
    # run.DelIndex()
    # run.CreateIndex()
    # process_import()



