import sys,os
import json
import time
from datetime import datetime
import elasticsearch
from elasticsearch import Elasticsearch
from elasticsearch import helpers
from  loguru  import logger
logger.add("for-debug-import-json.log", level="DEBUG")

from pymongo import MongoClient

def get_next_sequence(name):
    result = None
    try:
        host = "119.254.155.105"
        port = 27017
        username = "root"
        password="yyccYOUyun"
        mongo_app = MongoClient(host, port, username=username, password=password)
        collection_name = "counters"
        collection =mongo_app["test"][collection_name]
        result = collection.find_one_and_update(
                  {"_id": name},
                  {"$inc": {"sequence_value": 1}},
                  upsert=True,
                  return_document=True
                )
        logger.trace('MongoDB 用于记录ID的集合为 {},本次获取的目标是{},获取结果是{} .'.format(collection_name,name,result["sequence_value"]))
    except Exception as e:
        logger.error("Get Seq ID Failed For {}".format(e))
    
    mongo_app.close()
    if not result:
        return None
    else:
        return result["sequence_value"]


def insert_docs(es_instance,index="test",docs=[],id_to_unique='id'):
    start_commit = time.time()
    try:
        actions = []
        actions =[{"_index":index,"_source":doc,"_id":doc[id_to_unique]} for doc in docs]
        logger.debug("insert_docs bulk start for index {}, id_to_unique is {}".format(index,id_to_unique))
        commit_len = len(actions)
        if commit_len > 2000:
            logger.debug("insert_docs bulk for index {}, id_to_unique is {},commit_len is {}".format(index,id_to_unique,commit_len))
            current = 0
            step = 1000
            while (current<commit_len):
                helpers.bulk(es_instance, actions[current:current+step])
                current = current + step
        else:
            helpers.bulk(es_instance, actions)
        end_commit = time.time()
        logger.debug("insert_docs bulk end for index {}, id_to_unique is {}, commit_len is {},cost is {}ms.".format(index,id_to_unique,commit_len,1000*(end_commit-start_commit)))
        es_instance.indices.refresh(index=index)
    except Exception as e:
        logger.error("insert_docs bulk for index {},actions {},exception {}".format(index,actions,e))
        raise e
    return

def update_docs(es_instance,index="test",docs=[],id_to_unique='_id'):
    try:
        indexs_to_refresh = []
        actions = []
        for doc in docs:
            id_to_use = doc[id_to_unique]
            if id_to_unique =='_id':
                doc.pop('_id')
            doc_index = doc.pop('_index',None)
            if doc_index is None:
                doc_index = index
            indexs_to_refresh.append(doc_index)
            actions.append({"_index":doc_index,"doc":doc,"_id":id_to_use,"_op_type":"update"})
        successful, failed  = helpers.bulk(es_instance, actions)
        for in_ in list(set(indexs_to_refresh)):
            es_instance.indices.refresh(index=in_)
    except Exception as e:
        logger.error(f"Error update docs to index in Elasticsearch: {str(e)}.")
        raise e
    return successful, failed


dst_index_name = "test_180项目"
directory = '/data/es_import/人参网站'
#用于区分文档的字段，用于防止插入ES重复
src_increase_field = "internal_id_elasticsearch" 
handle_file_paths = []
handle_file_count_map = {}

es_instance = Elasticsearch(hosts=["https://119.254.155.105:19202"], request_timeout=3600, basic_auth=('elastic', 'EPPso10r8Ja'),verify_certs=False) #,ca_certs='/path/to/your/ca.pem')  # 如果有 CA 证书的路
index_settings = {
    "settings": {
                    "index.analysis.analyzer.default.type": "ik_smart",
                    "index.highlight.max_analyzed_offset":50000000,
                    "index.max_terms_count":50000000,
                    "index.max_result_window":50000000,
                    "index.max_inner_result_window":50000000,
                    "number_of_shards": 2,
                    "number_of_replicas": 0,
                    "index.analysis.analyzer.my_ik_analyzer":{
                        'type':"custom",
                        "tokenizer":"ik_smart",
                        "filter":["len"]
                    },
                    "index.analysis.filter":{
                        "len": {
                            "type": "length",
                            "min": 2
                        }
                    }

        },
}

try:
    es_instance.indices.create(index=dst_index_name, body=index_settings)
except Exception as e:
    print("Exception {} when create index ,ignore and continue".format(e))
    logger.error("exception {}".format(e))

MAX_COMMIT_LIMIT = 2000 
try:
    file_path = None
    # 遍历目录和子目录
    for root, dirs, files in os.walk(directory):
        for file in files:
            file_path = os.path.join(root, file)
            logger.info("当前正在处理的文件是 {}".format(file_path))
            with open(file_path, 'r') as file:
                json_data = json.load(file)
                #可能需要根据格式做调整
                #json_data = json_data.get('RECORDS')
                to_handle = len(json_data)
                logger.debug("当前正在处理的文件的条目 {}个".format(to_handle))
            logger.debug("Write to elasticsearch for index {}".format(dst_index_name))
            insert_data_length_all = 0 
            docs_to_add_fields = []
            for doc_data in json_data:
                src_increase_field_value = doc_data.get(src_increase_field,None)
                if src_increase_field_value is None:
                    #没有这个ID的值，不得不自己生成
                    doc_data[src_increase_field] = get_next_sequence("global") 
                logger.trace("当前的 src_increase_field(id_to_unique) 为 {}".format(doc_data.get(src_increase_field,None)))
                for time_field_key in ['时间','采集时间','更新日期','出版日期','application_date','成文日期','发文时间','发文日期','成文日期','发布日期','发布时间','实施日期','失效日期','发布时间_实施时间','备案日期','批准日期','发表时间','确认日期','实施或试行日期','publish_date','publication_date']:
                    time_field_value = doc_data.get(time_field_key,None)
                    if time_field_value is not None:
                        time_field_value = time_field_value.strip(' ').strip('\xa0')
                    if time_field_value is None:
                        logger.trace("原始字段{} 值 {},不进行处理。".format(time_field_key,time_field_value))
                        continue
                    elif time_field_value=='':
                        logger.warning("文档 {} 原始字段{} 值 {},替换为 1970-01-01。".format(doc_data.get(src_increase_field,None),time_field_key,time_field_value))
                        doc_data[time_field_key] = '1970-01-01'
                        time_field_value_format = datetime.strptime('1970-01-01', "%Y-%m-%d")
                    else:
                        logger.debug("原始字段{} 值{},开始进行处理。".format(time_field_key,time_field_value))
                        try:
                            time_field_value_format = datetime.strptime(time_field_value, "%Y-%m-%d")
                            logger.debug("原始字段值 {} 被转变为 {}".format(time_field_value,time_field_value_format))
                        except Exception as e:
                            try:
                                logger.debug("-------->")
                                time_field_value_format = datetime.strptime(time_field_value, "%Y-%m-%d %H:%M")
                                logger.debug("原始字段值{} 被转变为 {}".format(time_field_value,time_field_value_format))
                            except Exception as e:
                                try:
                                    time_field_value_format = datetime.strptime(time_field_value, "%Y-%m-%d %H:%M:%S")
                                    logger.trace("原始字段值 {} 被转变为 {}".format(time_field_value,time_field_value_format))
                                except Exception as e:
                                    try:
                                        time_field_value_format = datetime.strptime(time_field_value, "%Y-%m")
                                        logger.trace("原始字段值 {} 被转变为 {}".format(time_field_value,time_field_value_format))
                                    except Exception as e:
                                        try:
                                            time_field_value_format = datetime.strptime(time_field_value, "%Y-%m-%d %H:%M:%S.%f")
                                            logger.trace("原始字段值 {} 被转变为 {}".format(time_field_value,time_field_value_format))
                                        except Exception as e:
                                            try:
                                                time_field_value_format = datetime.strptime(time_field_value, "%Y年%m月%d日")
                                                logger.trace("原始字段值 {} 被转变为 {}".format(time_field_value,time_field_value_format))
                                            except Exception as e:
                                                try:
                                                    time_field_value_format = datetime.strptime(time_field_value, "%Y-%m-%dT%H:%M:%SZ")
                                                    logger.trace("原始字段值 {} 被转变为 {}".format(time_field_value,time_field_value_format))
                                                except Exception as e:
                                                    logger.error("Exception try is {} for field {} value {},current dos is {}".format(e,time_field_key,time_field_value,doc_data))
                                                    doc_data[time_field_key] = '1970-01-01'
                                                    time_field_value_format = datetime.strptime('1970-01-01', "%Y-%m-%d")
                                                    logger.error("Exception try is {} for field {} value {},changed to {}".format(e,time_field_key,time_field_value,time_field_value_format))
                        doc_data[time_field_key] = "{}".format(time_field_value_format.strftime('%Y-%m-%dT%H:%M:%SZ'))
                        logger.trace("Format from {} to {}".format(time_field_value,doc_data[time_field_key]))
                        continue
                #break
                logger.trace("Write to elasticsearch for index {} Current Doc is {}".format(dst_index_name,doc_data.get('篇名',doc_data.get('主题',None))))
                docs_to_add_fields.append(doc_data)
                if len(docs_to_add_fields) >=MAX_COMMIT_LIMIT:
                    insert_docs(es_instance,dst_index_name,docs_to_add_fields,id_to_unique=src_increase_field)
                    insert_data_length_all = len(docs_to_add_fields)+insert_data_length_all
                    logger.debug("elasticsearch_to_elasticsearch commit to es index {} for {} insert reached {}.".format(dst_index_name,len(docs_to_add_fields),insert_data_length_all))
                    docs_to_add_fields = []
            if len(docs_to_add_fields) >=1:
                insert_docs(es_instance,dst_index_name,docs_to_add_fields,id_to_unique=src_increase_field)
                insert_data_length_all = len(docs_to_add_fields)+insert_data_length_all
                logger.debug("elasticsearch_to_elasticsearch commit to es index {} for {} insert reached {}.".format(dst_index_name,len(docs_to_add_fields),insert_data_length_all))
                docs_to_add_fields = []
            logger.trace("当前已处理的文件的条目 {},处理完成 {}".format(to_handle,insert_data_length_all))
            if(to_handle!=insert_data_length_all):
                logger.error("not match,当前已经处理的文件{} 的条目 {},处理完成 {}".format(file_path,to_handle,insert_data_length_all))
                break
            else:
                logger.info("当前已经处理的文件{} 的条目 {},处理完成 {}".format(file_path,to_handle,insert_data_length_all))
            handle_file_paths.append(file_path)
            handle_file_count_map[file_path] = insert_data_length_all
    print('所有的文件已经处理完成，具体文件包括 {}，文件和处理数量的映射结果为 {}'.format(handle_file_paths,handle_file_count_map))
except Exception as e:
    import traceback
    traceback.print_exc()
    logger.error("exception {} for current path {}, handled_path {},已经处理的文件数量映射 {}".format(traceback.format_exc(),file_path,handle_file_paths,handle_file_count_map))
