# -*- coding:utf-8 -*-

# @File     :es_handler
# @Author   :Yupeng
# @Date     :2018/11/18 18:57
'''
这个文件用来处理和ES相关的东西：
1、postgresql和mongodb数据库和ES的同步
2、
'''

from elasticsearch_dsl import Search
from elasticsearch_dsl import connections
from elasticsearch.helpers import bulk, BulkIndexError
from .hte_formatter import DocumentLister
from .operator import DataOperator
from .cache import update_full_text_cache, update_template_cache
from typing import *
from mongoengine import DoesNotExist

from apps.storage.models.data import DataMeta, DataContent, DataFieldContainer, DataFieldTable, DataFieldTableRow
from apps.storage.models.template import TemplateFieldEnum, Template, RangePart
from apps.storage.models.dataset import DataSet
from apps.search.models import ElasticSearchImportRecord
from apps.search.utils.serilizers.json import JSONHandler
from apps.search.utils.serilizers.common import DataMode

# from apps.storage.models.template import

ES_QUERY_MAX_SIZE = 100000000

documentFormatter = DocumentLister()


class Transform:
    '''
    转换为ElasticSearch所需的查询格式，json
    '''

    @staticmethod
    def term_dic(field: str, value):
        return {'term': {field: value}}

    @staticmethod
    def terms_dic(field: str, value: list):
        return {'terms': {field: value}}

    @staticmethod
    def match_phrase_dic(field: str, value: str):
        return {'match_phrase': {field: value}}

    @staticmethod
    def match_phrase_prefix_dic(field: str, value: str):
        return {'match_phrase_prefix': {field: value}}

    @staticmethod
    def prefix(field: str, value: str):
        return {'prefix': {field: value}}

    @staticmethod
    def match_phrase_suffix_dic(field: str, value: str):
        return {'match_phrase_prefix': {field + '.reverse': value}}

    @staticmethod
    def suffix_dic(field: str, value: str):
        return {'prefix': {field + '.reverse': value}}

    @staticmethod
    def not_dic(query: dict):
        return {'bool': {'must_not': [query]}}

    @staticmethod
    def range_dic(field: str, value):
        return {'range': {field: value}}

    @staticmethod
    def exist_dic(field: str):
        return {'exists': {'field': field}}

    @staticmethod
    def match_dic(field: str, value: str):
        return {'match': {field: value}}

    @staticmethod
    def match_all_dic():
        return {'match_all': {}}

    @staticmethod
    def bool_dic(must: list = None, must_not: list = None, should: list = None):
        ret = {}
        if must is not None:
            ret['must'] = must
        if must_not is not None:
            ret['must_not'] = must_not
        if should is not None:
            ret['should'] = should
        return {'bool': ret}

    @staticmethod
    def query_dic(query: dict):
        return {'query': query}

    @staticmethod
    def nested_dic(path: str, query: dict):
        return {'nested': {'path': path, 'query': query}}

    @staticmethod
    def multi_match_dic(query_string, field: list):
        return {'multi_match': {'query': query_string, 'fields': field}}


class ElasticsearchManager(object):
    '''
    ElasticSearch的管理方法类，包含很多管理方法
    '''
    buf = []
    buf_size = 12345
    step = 100

    @staticmethod  # 静态方法，通过类名直接调用，无需实例化对象
    def _base_bulk_import(index=None, doc_type=None, body=None, end=True):
        '''
        往ES中批量导入数据的基本方法
        :param index:
        :param doc_type:文档类型
        :param body:已经格式化之后的文档内容
        :param end:
        :return:
        '''
        if index is not None:
            ElasticsearchManager.buf.append({'_index': index, '_type': doc_type, '_source': body})
        if end or len(ElasticsearchManager.buf) >= ElasticsearchManager.buf_size:
            try:
                bulk(connections.get_connection(), ElasticsearchManager.buf)  # (es示例， 迭代器（要插入的dict）)
                print('Already insert %d docs.' % len(ElasticsearchManager.buf))
                ElasticsearchManager.buf[:] = []
            except BulkIndexError as e:
                pass
            except Exception as e:
                print('error..')

    @staticmethod
    def _extract_data_meta(data_meta: DataMeta):
        ret = dict()
        ret['_meta_id'] = data_meta.id
        ret['title'] = data_meta.title
        ret['category_zh'] = data_meta.category.name_zh or ''
        ret['category_en'] = data_meta.category.name_en or ''
        ret['keywords'] = data_meta.keywords
        ret['doi'] = data_meta.doi
        ret['abstract'] = data_meta.abstract or ''
        ret['purpose'] = data_meta.purpose or ''
        ret['source'] = data_meta.source or ''
        ret['author'] = data_meta.author.real_name
        return ret

    @staticmethod
    def _extract_dataset_meta(dataset: DataSet):
        ret = dict()
        ret['_dataset_id'] = dataset.id
        ret['title'] = dataset.title
        ret['file_name'] = dataset.file_name
        ret['author'] = dataset.author.real_name
        ret['ref_count'] = dataset.ref_count
        # ret['upload_time'] = dataset.update_time
        # ret['update_time'] = dataset.update_time

        ret['cols'] = dataset.cols
        ret['rows'] = dataset.rows
        ret['purpose'] = dataset.purpose
        ret['downloads'] = dataset.downloads
        return ret

    @staticmethod
    def extract_data_string(data):
        '''
        提取数据为空格相隔的字符串
        {"a": {"b": [1, 2, 3, 4]}} ==>a b 1  2  3  4
        :param data:
        :return:
        '''
        ret = ''  # ret==return
        if isinstance(data, dict):
            for k, v in data.items():
                ret += k + ' '
                ret += ElasticsearchManager.extract_data_string(v) + ' '
        elif isinstance(data, str):
            ret = data + ' '
        elif isinstance(data, (int, float)):
            ret = str(data) + ' '
        elif isinstance(data, list):
            for v in data:
                ret += ElasticsearchManager.extract_data_string(v) + ' '
        return ret

    @staticmethod
    def insert(meta_ids: Union[int, List[int]]):
        '''
        将mongodb和postgresql的数据映射迁移到es中，是在_base_bulk_insert之上的封装
        :param meta_ids: 可以是一个id，也可以是一个id的list,ids
        :return:
        '''
        if not isinstance(meta_ids, list):
            meta_ids = [meta_ids]
        ElasticSearchImportRecord.objects.filter(meta_id__in=meta_ids).update(status='IMPORTING')
        try:
            # mongodb中的数据映射到ES中
            for collection, index in zip((DataContent, DataFieldContainer, DataFieldTable, DataFieldTableRow),
                                         ('data_content', 'data_field_container', 'data_field_table_row', 'data_field_table')):
                docs = collection.objects.filter(_meta_id__in=meta_ids)
                print('Begin to insert %d docs in %s.' % (docs.count(), index))
                for doc in docs:
                    ElasticsearchManager._base_bulk_import(index=index, doc_type='_doc', body=documentFormatter.format_document(doc), end=False)
                print('%d docs in %s inserted.' % (docs.count(), index))
            ElasticsearchManager._base_bulk_import(end=True)
            data_metas = DataMeta.objects.filter(id__in=meta_ids)
            print('%d docs in %s are in the insert list.' % (data_metas.count(), 'data_meta'))
            # postgresql中的数据映射到ES中
            for data_meta in data_metas:
                ElasticsearchManager._base_bulk_import(index='data_meta', doc_type='_doc',
                                                       body=documentFormatter.format_document(ElasticsearchManager._extract_data_meta(data_meta)),
                                                       end=False)
            ElasticsearchManager._base_bulk_import()
            print('%d docs in %s are in the insert list.' % (data_metas.count(), 'data_meta'))

            # *******************************************
            # ElasticsearchManager.update_snapshot()
            ElasticsearchManager.param_merge_insert(meta_ids)
            ElasticSearchImportRecord.objects.filter(meta_id__in=meta_ids).update(status='SUCCESS')
            update_full_text_cache()
            template_id_list = [x['tid'] for x in
                                DataMeta.objects.filter(id__in=meta_ids).values('tid')]
            update_template_cache(template_id_list)

        except Exception:
            raise

    @staticmethod
    def remove(meta_ids: Union[int, List[int]]):
        if not isinstance(meta_ids, list):
            meta_ids = [meta_ids]
        s = Search.from_dict(Transform.query_dic(Transform.terms_dic('_meta_id', meta_ids)))  # 创建搜索对象
        s._index = ['data_meta', 'data_content', 'data_field_container', 'data_field_table', 'data_field_table_row']
        s.delete()


    # @staticmethod
    # def dataset_remove(dataset_ids:Union[int, List[int]]):
    #     if not isinstance(dataset_ids, list):
    #         dataset_ids = [dataset_ids]
    #     s = Search.from_dict(Transform.query_dic(Transform.terms_dic('_')))

    @staticmethod
    def update(meta_id):
        ElasticsearchManager.remove(meta_id)
        ElasticsearchManager.insert(meta_id)

    @staticmethod
    def update_all():
        '''
        用于将mongo和postgresql中的数据全部更新到ES中，ES的操作具体为先全部删除，然后全部重新导入
        :return:
        '''
        meta_ids = [x for x in DataMeta.objects.all().values_list('pk', flat=True)]  # 获取'pk'字段的值列表
        s = Search.from_dict(Transform.query_dic(Transform.match_all_dic()))
        # s._index = ['data_meta', 'data_content', 'data_field_container', 'data_field_table', 'data_field_table_row']
        s._index = ['data_meta']
        s.delete()
        ElasticsearchManager.insert(meta_ids)
        #  添加dataset的处理
        # dataset_ids = [x for x in DataSet.objects.all().values_list('pk', flat=True)]
        # s._index = ['dataset_meta', ]
        # s.delete()
        # ElasticsearchManager.dataset_insert(dataset_ids)

    @staticmethod
    def delete_all():
        '''
        将ES索引中的内容全部删除
        :return:
        '''
        s = Search.from_dict(Transform.query_dic(Transform.match_all_dic()))
        s._index = ['data_meta', 'data_content', 'data_field_container', 'data_field_table',
                    'data_field_table_row', 'data_snapshot']
        s.delete()

    @staticmethod
    def param_merge_insert(meta_ids):
        '''
        字段合并导入（数据）,用于全文检索
        针对meda_ids对应的所有数据，对每一条数据进行所有字段的合并，然后再批量导入ES
        :param meta_ids:
        :return:
        '''
        if isinstance(meta_ids, (int, str)):
            meta_ids = [meta_ids]

        # 把meta_ids对应的数据之前导入ES的数据删除，重新导入
        s = Search.from_dict(Transform.query_dic(Transform.terms_dic('_meta_id', meta_ids)))
        s._index = ['data_snapshot']
        s.delete()

        # 对postgresql中的各数据(元数据)的各字段合并导入
        data_metas = DataMeta.objects.filter(id__in=meta_ids)
        print('%d docs in %s are in the insert list.' % (data_metas.count(), 'data_snapshot'))
        cnt = 1
        for data_meta in data_metas:
            template = Template.objects.get(pk=data_meta.tid)
            body = dict()
            body['_meta_id'] = data_meta.id
            data_dict = {}
            serializer = JSONHandler(mode=DataMode.WRITE, template=template)  # ？？
            data_dict['_content'] = serializer.data_to_dict(data_meta)
            data_dict['_meta'] = ElasticsearchManager._extract_data_meta(data_meta)
            body['_data'] = ElasticsearchManager.extract_data_string(data_dict)
            ElasticsearchManager._base_bulk_import(index='data_snapshot',
                                                   doc_type='_doc',
                                                   body=body,
                                                   end=False)
            print(cnt)
            cnt += 1
        ElasticsearchManager._base_bulk_import(end=True)

    @staticmethod
    def param_merge_insert_all():
        '''
        针对所有数据：字段合并导入
        :return:
        '''
        s = Search.from_dict(Transform.query_dic(Transform.match_all_dic()))
        s._index = ['data_snapshot']
        s.delete()

        data_metas = DataMeta.objects.all()
        print('%d docs in %s are in the insert list.' % (data_metas.count(), 'data_snapshot'))
        cnt = 1
        for data_meta in data_metas:
            template = Template.objects.get(pk=data_meta.tid)
            body = dict()  # 包含元数据和数据内容全部的字典
            body['_meta_id'] = data_meta.id
            data_dict = {}
            serializer = JSONHandler(mode=DataMode.WRITE, template=template)
            # 对mongodb中的各数据(元数据)的各字段合并导入
            data_dict['_content'] = serializer.data_to_dict(data_meta)
            # 对postgresql中的各数据(元数据)的各字段合并导入
            data_dict['_meta'] = ElasticsearchManager._extract_data_meta(data_meta)
            body['_data'] = ElasticsearchManager.extract_data_string(data_dict)
            ElasticsearchManager._base_bulk_import(index='data_snapshot',
                                                   doc_type='_doc',
                                                   body=body,
                                                   end=False)
            print(cnt)
            cnt += 1
        ElasticsearchManager._base_bulk_import(end=True)

    # @staticmethod
    # def dataset_insert(dataset_ids: Union[int, List[int]]):
    #     '''
    #     将postgresql中的dataset数据映射迁移到es中，是在_base_bulk_insert之上的封装
    #     :param dataset_ids: 可以是一个id，也可以是一个id的list,ids
    #     :return:
    #     '''
    #     if not isinstance(dataset_ids, list):
    #         dataset_ids = [dataset_ids]
    #     ElasticSearchImportRecord.objects.filter(meta_id__in=dataset_ids).update(status='IMPORTING')
    #     try:
    #         datasets = DataSet.objects.filter(id__in=dataset_ids)
    #         for dataset in datasets:
    #             ElasticsearchManager._base_bulk_import(index='dataset_meta', doc_type='_doc',
    #                                                    body=documentFormatter.format_document(ElasticsearchManager._extract_dataset_meta(dataset)),
    #                                                    end=False)
    #         ElasticsearchManager._base_bulk_import()
    #         print('%d docs in %s are in the insert list.' % (datasets.count(), 'dataset_meta'))
    #         ElasticsearchManager.dataset_param_merge_insert(dataset_ids)
    #
    #         update_full_text_cache()
    #
    #     except Exception:
    #         raise

    # @staticmethod
    # def dataset_param_merge_insert(dataset_ids):
    #     '''
    #     字段合并导入（数据）,用于全文检索
    #     针对dataset_ids对应的所有数据，对每一条数据进行所有字段的合并，然后再批量导入ES
    #     :param dataset_ids:
    #     :return:
    #     '''
    #     if isinstance(dataset_ids, (int, str)):
    #         dataset_ids = [dataset_ids]
    #
    #     s = Search.from_dict(Transform.query_dic(Transform.terms_dic('_dataset_id', dataset_ids)))
    #     s._index = ['dataset_snapshot']
    #     s.delete()
    #
    #     datasets = DataSet.objects.filter(id__in=dataset_ids)
    #     print('%d docs in %s are in the insert list.' % (datasets.count(), 'data_snapshot'))
    #     cnt = 1
    #     for dataset in datasets:
    #         body = dict()
    #         body['_dataset_id'] = dataset.id
    #         data_dict = {}
    #         data_dict['_meta'] = ElasticsearchManager._extract_dataset_meta(dataset)
    #         body['_data'] = ElasticsearchManager.extract_data_string(data_dict)
    #         ElasticsearchManager._base_bulk_import(index='dataset_snapshot',
    #                                                doc_type='_doc',
    #                                                body=body,
    #                                                end=False)
    #         print(cnt)
    #         cnt += 1
    #     ElasticsearchManager._base_bulk_import(end=True)
    #
    # @staticmethod
    # def dataset_param_merge_insert_all():
    #     '''
    #     针对数据集的所有数据，逐一做字段合并导入
    #     :return:
    #     '''
    #     s = Search.from_dict(Transform.query_dic(Transform.match_all_dic()))
    #     s._index = ['dataset_snapshot']
    #     s.delete()
    #
    #     datasets = DataSet.objects.all()
    #     print('%d docs in %s are in the insert list.' % (datasets.count(), 'dataset_snapshot'))
    #     cnt = 1
    #     for dataset in datasets:
    #         body = dict()
    #         body['_dataset_id'] = dataset.id
    #         data_dict = {}
    #         data_dict['_meta'] = ElasticsearchManager._extract_dataset_meta(dataset)
    #         body['_data'] = ElasticsearchManager.extract_data_string(data_dict)
    #         ElasticsearchManager._base_bulk_import(index='dataset_snapshot',
    #                                                doc_type='_doc',
    #                                                body=body,
    #                                                end=False)
    #         print(cnt)
    #         cnt += 1
    #     ElasticsearchManager._base_bulk_import(end=True)
