import json
import logging
import time

import requests
from elasticsearch import Elasticsearch, helpers
import pymysql


logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


# 连接到数据库
def connect_to_db():
    try:
        connection = pymysql.connect(
            host='rds50g3807a68zwc9soo969.mysql.rds.aliyuncs.com',
            user='data_browser',
            password='D6a_T3_Brs',
            database='data_browser',
            charset='utf8mb4',
            cursorclass=pymysql.cursors.DictCursor
        )
        logging.info("连接到数据库成功")
        return connection
    except Exception as e:
        logging.error(f"连接到数据库时出错: {e}")
        return None


# 创建索引
def create_index(es, index_name):
    # 定义索引映射
    mappings = {
        "properties": {
            "title": {"type": "text", "analyzer": "ik_smart"},
            "creator": {"type": "keyword"},
            "type": {"type": "keyword"},
            "accessCount": {"type": "keyword"},
            "appId": {"type": "keyword"},
            "createdAt": {"type": "keyword"},
            "dataSetId": {"type": "keyword"},
            "dataSetTitle": {"type": "keyword"},
            "isDelete": {"type": "keyword"},
            "path": {"type": "text", "analyzer": "ik_smart"},
            "folderId": {"type": "text", "analyzer": "ik_smart"},
            "typeCode": {"type": "keyword"},
            "folderName": {"type": "keyword"}

        }
    }

    request_body = {
        "mappings": mappings
    }

    # 检查索引是否存在
    if es.indices.exists(index=index_name):
        logging.info(f"索引 {index_name} 已经存在")
    else:
        # 执行创建索引操作
        try:
            response = es.indices.create(index=index_name, body=request_body)
            logging.info(f"创建索引 {index_name} 成功: {response}")
        except Elasticsearch.ElasticsearchException as e:
            logging.error(f"创建索引时出错 (ElasticsearchException): {e}")
        except Exception as e:
            logging.error(f"创建索引时出错 (未知错误): {e}")


# 连接到Elasticsearch
def create_es_client(es_url):
    try:
        es = Elasticsearch(
            [es_url],  # 包含协议的完整 URL
            basic_auth=('elastic', 'elaKib#1111')  # 如果有认证
        )
        if not es.ping():
            raise ValueError("Connection failed")
        logging.info("连接到Elasticsearch成功")
        return es
    except Exception as e:
        logging.error(f"连接到Elasticsearch时出错: {e}")
        return None


# 从 MySQL 数据库读取数据
def read_data_from_mysql(connection, query):
    try:
        with connection.cursor() as cursor:
            cursor.execute(query)
            row = cursor.fetchone()
            if row:
                return row['param_value']
            else:
                logging.warning("未找到匹配的记录")
                return None
    except Exception as e:
        logging.error(f"从 MySQL 数据库读取数据时出错: {e}")
        return None


# 获取Elasticsearch索引中所有文档的ID
def get_existing_ids(es, index_name):
    existing_ids = {}
    for hit in helpers.scan(es, index=index_name, query={"query": {"match_all": {}}}):
        existing_ids[hit['_id']] = hit['_source']
    return existing_ids


# 将数据导入到Elasticsearch
def import_data_to_es_bulk(es, data, index_name, batch_size):
    try:
        # 获取当前Elasticsearch索引中的所有文档ID及其源数据
        existing_ids = get_existing_ids(es, index_name)

        # 提取当前数据中的所有ID
        current_ids = {item['_id']: item['_source'] for item in data}

        # 确定需要删除的文档
        delete_docs = [{"_op_type": "delete", "_index": index_name, "_id": doc_id} for doc_id in
                       set(existing_ids.keys()) - set(current_ids.keys())]

        # 确定需要新增的文档
        new_docs = [
            {
                "_op_type": "index",
                "_index": index_name,
                "_id": item['_id'],
                "_source": item['_source']
            }
            for item in data if item['_id'] not in existing_ids.keys()
        ]

        # 构建所有操作
        actions = []
        actions.extend(new_docs)
        actions.extend(delete_docs)

        # 检查是否有任何操作需要执行
        if not actions:
            logging.info(f"数据库和Elasticsearch中的数据已经一致，无需执行任何操作")
            return

        # 计算总批次数
        total_batches = (len(actions) + batch_size - 1) // batch_size
        logging.info(f"总共有 {len(actions)} 条操作需要执行，每次批量导入 {batch_size} 条，共需 {total_batches} 批次")

        # 打印前几个动作以进行调试
        logging.debug(f"前几个动作: {actions[:5]}")

        # 执行批量操作
        for i in range(total_batches):
            start = i * batch_size
            end = (i + 1) * batch_size
            batch_actions = actions[start:end]

            # 分离删除和新增操作
            delete_count = sum(1 for action in batch_actions if action["_op_type"] == "delete")
            new_count = sum(1 for action in batch_actions if action["_op_type"] == "index")

            success, errors = helpers.bulk(es, batch_actions, chunk_size=batch_size)
            logging.info(f"第 {i + 1} 批次: 成功删除 {delete_count} 条记录，成功新增 {new_count} 条记录到 {index_name}")

            if errors:
                for error in errors:
                    logging.error(f"导入数据时出现错误: {error}")
                    logging.error(f"具体错误信息: {json.dumps(error, indent=2)}")
                    # 打印失败的文档
                    failed_doc = next((doc for doc in batch_actions if doc['_id'] == error['index']['_id']), None)
                    if failed_doc:
                        logging.error(f"失败的文档: {json.dumps(failed_doc, indent=2)}")

    except Exception as e:
        logging.error(f"导入数据到Elasticsearch时出错: {e}")


# 处理数据
def process_data(data):
    processed_data = []
    for item in data:
        # 使用 'id' 和 'appId' 组合生成唯一的 _id
        unique_id = f"{item['appId']}_{item['id']}"
        processed_data.append({
            '_id': unique_id,
            '_source': item
        })

    return processed_data


# 主函数
def main():
    db_connection = connect_to_db()

    if db_connection is None:
        return

    # 从数据库读取数据
    query = (
        "select param_value from service_config where param_code = 'ES_URL_DEV'"
    )
    # 从数据库读取 ES_URL_DEV 的值
    es_url = read_data_from_mysql(db_connection, query)

    if es_url:
        logging.info(f"获取到的 ES_URL_DEV 值是: {es_url}")
    else:
        logging.info("未获取到 ES_URL_DEV 的值")

    es = create_es_client(es_url)
    if es is None:
        return

    index_name = "edb_chart_library"  # ES索引名称

    # 创建索引
    create_index(es, index_name)

    # 读取数据
    url = 'http://10.2.25.4:9011/data_browser/myChart/hsApi/queryAllChart'
    # 发送get请求
    response = requests.get(url)
    data = json.loads(response.text)

    if data is None:
        return

    # 处理数据
    processed_data = process_data(data)

    # 将数据导入到Elasticsearch
    import_data_to_es_bulk(es, processed_data, index_name, batch_size=5000)

    # 等待一段时间以确保数据已更新
    time.sleep(1)

    # 获取Elasticsearch索引中所有文档的数量
    count = es.count(index=index_name)['count']
    logging.info(f"ES中的总数是: {count}")

    # 关闭数据库连接
    db_connection.close()


if __name__ == "__main__":
    main()
