from elasticsearch8 import Elasticsearch, helpers
from elasticsearch8.exceptions import ConnectionError
from pymysql import connect
from pymysql.cursors import SSCursor
import json
from datetime import datetime


class ElasticsearchDataTransfer:
    def __init__(self, host="http://localhost:9200", username=None, password=None):
        """
        初始化 Elasticsearch 客户端
        :param host: ES 主机地址
        :param username: 用户名（可选）
        :param password: 密码（可选）
        """
        # 根据是否提供了用户名和密码决定是否设置 basic_auth
        auth = (username, password) if username and password else None
        self.es = Elasticsearch(
            hosts=[host],
            basic_auth=auth  # 如果 auth 是 None，则不会设置 basic auth
        )
        try:
            if not self.es.ping():
                raise ConnectionError("❌ 连接 Elasticsearch 失败，请检查配置。")
        except ConnectionError as e:
            print(e)
            raise

    # ===================== 数据导出 =====================
    def export_data(self, index_name, output_file, query=None, scroll="2m", size=1000):
        """
        从指定索引导出数据到 JSON 文件
        :param index_name: 源索引名
        :param output_file: 输出文件路径
        :param query: 查询语句，默认 match_all
        :param scroll: scroll 生命周期
        :param size: 每批获取数量
        """
        query = query or {"query": {"match_all": {}}}

        response = self.es.search(index=index_name, body=query, scroll=scroll, size=size)
        scroll_id = response["_scroll_id"]
        hits = response["hits"]["hits"]

        with open(output_file, "w", encoding="utf-8") as f:
            while len(hits) > 0:
                for hit in hits:
                    f.write(json.dumps(hit, ensure_ascii=False) + "\n")
                # 获取下一批数据
                response = self.es.scroll(scroll_id=scroll_id, scroll=scroll)
                scroll_id = response["_scroll_id"]
                hits = response["hits"]["hits"]

        self.es.clear_scroll(scroll_id=scroll_id)
        print(f"✅ 数据已成功导出至 {output_file}")

    # ===================== 数据导入 =====================
    def import_data(self, input_file, target_index=None, chunk_size=500, override_index=True):
        """
        从 export_data 导出的数据导入 ES，保持 _id / _index / _source 一致
        :param input_file: JSON 文件路径（每行是一个完整 hit）
        :param target_index: 是否强制导入到新索引，None 表示保留原 _index
        :param chunk_size: 批量提交数量
        :param override_index: 是否覆盖原 _index，仅在 target_index 存在时生效
        """

        def generate_actions():
            with open(input_file, "r", encoding="utf-8") as f:
                for line in f:
                    if not line.strip():
                        continue
                    try:
                        doc = json.loads(line.strip())
                        if "_id" not in doc or "_source" not in doc:
                            print(f"⚠️ 跳过非法记录: {doc}")
                            continue
                        index_name = target_index if override_index and target_index else doc.get("_index")
                        yield {
                            "_op_type": "index",
                            "_index": index_name,
                            "_id": doc["_id"],
                            "_source": doc["_source"]
                        }
                    except json.JSONDecodeError as e:
                        print(f"❌ JSON 解码失败: {e}")
                        continue

        print(f"🚀 开始导入数据到索引 '{target_index or '原索引'}'...")

        success, failed = 0, 0
        actions = generate_actions()

        for ok, result in helpers.streaming_bulk(client=self.es, actions=actions, chunk_size=chunk_size):
            if ok:
                success += 1
            else:
                failed += 1

        print(f"✅ 导入完成！成功: {success} 条，失败: {failed} 条")

        # ===================== 数据导入 =====================
        def import_data(self, input_file, target_index=None, chunk_size=500, override_index=True):
            """
            从 export_data 导出的数据导入 ES，保持 _id / _index / _source 一致
            :param input_file: JSON 文件路径（每行是一个完整 hit）
            :param target_index: 是否强制导入到新索引，None 表示保留原 _index
            :param chunk_size: 批量提交数量
            :param override_index: 是否覆盖原 _index，仅在 target_index 存在时生效
            """

            def generate_actions():
                with open(input_file, "r", encoding="utf-8") as f:
                    for line in f:
                        if not line.strip():
                            continue
                        try:
                            doc = json.loads(line.strip())
                            if "_id" not in doc or "_source" not in doc:
                                print(f"⚠️ 跳过非法记录: {doc}")
                                continue
                            index_name = target_index if override_index and target_index else doc.get("_index")
                            yield {
                                "_op_type": "index",
                                "_index": index_name,
                                "_id": doc["_id"],
                                "_source": doc["_source"]
                            }
                        except json.JSONDecodeError as e:
                            print(f"❌ JSON 解码失败: {e}")
                            continue

            print(f"🚀 开始导入数据到索引 '{target_index or '原索引'}'...")

            success, failed = 0, 0
            actions = generate_actions()

            for ok, result in helpers.streaming_bulk(client=self.es, actions=actions, chunk_size=chunk_size):
                if ok:
                    success += 1
                else:
                    failed += 1

            print(f"✅ 导入完成！成功: {success} 条，失败: {failed} 条")

    # ===================== JSONL 文件导入 =====================
    def import_jsonlines(self, file_path, target_index, id_field=None, chunk_size=500):
        """
        导入 .jsonl 格式文件到指定索引
        :param file_path: JSONL 文件路径
        :param target_index: 目标索引名称
        :param id_field: 如果希望用某字段作为 _id，可传入该字段名
        :param chunk_size: 每批提交数量
        """

        def generate_actions():
            with open(file_path, "r", encoding="utf-8") as f:
                for line in f:
                    if not line.strip():
                        continue
                    try:
                        doc = json.loads(line.strip())

                        action = {
                            "_op_type": "index",
                            "_index": target_index,
                            "_source": doc
                        }

                        if id_field and id_field in doc:
                            action["_id"] = str(doc[id_field])

                        yield action
                    except json.JSONDecodeError as e:
                        print(f"❌ JSON 解析错误: {e}")
                        continue

        print(f"🚀 开始导入 JSONL 数据到索引 '{target_index}'...")

        success, failed = 0, 0
        actions = generate_actions()

        for ok, result in helpers.streaming_bulk(client=self.es, actions=actions, chunk_size=chunk_size):
            if ok:
                success += 1
            else:
                failed += 1

        print(f"✅ 导入完成！成功: {success} 条，失败: {failed} 条")

    # ===================== MySQL 表导入 =====================
    def import_mysql_table(self, host, user, password, port, database, table, index_name,
                           column_mapping, id_column=None, batch_size=1000):
        """
        将 MySQL 表数据导入到 Elasticsearch，仅导入 column_mapping 中定义的字段
        :param host: MySQL 主机地址
        :param user: 用户名
        :param password: 密码
        :param port: 端口号
        :param database: 数据库名
        :param table: 表名
        :param index_name: 目标索引名（必须已存在）
        :param column_mapping: 字段映射 {mysql_col: es_field}，value 为 None 表示忽略该字段
        :param id_column: 可选，MySQL 表中用于作为 _id 的列名（必须存在于 column_mapping 中）
        :param batch_size: 每次读取行数
        """

        conn = connect(
            host=host,
            user=user,
            password=password,
            port=port,
            database=database,
            cursorclass=SSCursor,
            charset='utf8mb4'
        )

        cursor = conn.cursor()
        try:
            # 获取所有字段名
            cursor.execute(f"SELECT * FROM `{table}` LIMIT 1")
            all_fields = [desc[0] for desc in cursor.description]

            # 验证 column_mapping 的 key 是否都在表中存在
            invalid_cols = set(column_mapping.keys()) - set(all_fields)
            if invalid_cols:
                raise ValueError(f"❌ 错误：column_mapping 中包含不存在的列: {invalid_cols}")

            # 检查 id_column 是否合法
            if id_column is not None:
                if id_column not in column_mapping:
                    raise ValueError(f"❌ 参数错误：id_column='{id_column}' 必须在 column_mapping 中定义")
                if column_mapping[id_column] is None:
                    raise ValueError(f"❌ 参数错误：id_column='{id_column}' 被标记为 None，不能用作 _id")

            # 筛选出需要导入的列（即 value 不为 None 的）
            selected_fields = [col for col in column_mapping if column_mapping[col] is not None]

            # 构建查询语句
            columns_str = ", ".join([f"`{col}`" for col in selected_fields])
            query = f"SELECT {columns_str} FROM `{table}`"

            print(f"📊 正在从 MySQL 表 `{table}` 读取数据...")

            def mysql_generator():
                cursor.execute(query)
                while True:
                    rows = cursor.fetchmany(batch_size)
                    if not rows:
                        break
                    for row in rows:
                        yield row

            def build_actions():
                fields = selected_fields
                for row in mysql_generator():
                    source = dict(zip(fields, row))

                    # 类型转换
                    for k, v in source.items():
                        if isinstance(v, (datetime,)):
                            source[k] = v.isoformat()
                        elif isinstance(v, (bytes,)):
                            source[k] = v.decode('utf-8')

                    # 字段映射处理
                    doc = {}
                    for col in fields:
                        es_key = column_mapping[col]
                        if es_key is None:
                            continue
                        doc[es_key] = source[col]

                    action = {
                        "_op_type": "index",
                        "_index": index_name,
                        "_source": doc
                    }

                    # 如果设置了 id_column，则添加 _id
                    if id_column is not None:
                        id_value = doc.get(column_mapping[id_column])
                        if id_value is None:
                            raise ValueError(f"❌ 文档缺少有效的 _id 值，字段: {column_mapping[id_column]}")
                        action["_id"] = str(id_value)

                    yield action

            # 检查索引是否存在
            if not self.es.indices.exists(index=index_name):
                raise ValueError(f"❌ 索引 '{index_name}' 不存在，请先手动创建索引")

            print(f"🚀 开始将数据导入到 Elasticsearch 索引 '{index_name}'...")

            success, failed = 0, 0
            actions = build_actions()

            for ok, result in helpers.streaming_bulk(client=self.es, actions=actions, chunk_size=batch_size):
                if ok:
                    success += 1
                else:
                    failed += 1

            print(f"✅ MySQL 表 `{table}` 成功导入 Elasticsearch！共成功: {success} 条，失败: {failed} 条。")

        finally:
            cursor.close()
            conn.close()


if __name__ == "__main__":
    # transfer = ElasticsearchDataTransfer(
    #     host="http://192.168.0.91:60001",
    #     username="elastic",
    #     password="lege@123"
    # )

    # index_name = 'xianlu_tag'
    # index_name="xianlu_limit"
    # 示例：导出数据
    # index_name='wuliushang'
    # index_name="xianlu_youxian"
    # index_name="xianlu"
    # transfer.export_data(
    #     index_name=index_name,
    #     output_file=index_name + ".json"
    # )
    transfer = ElasticsearchDataTransfer(
        host="http://192.168.0.91:59200/"
    )
    # 示例：导入数据
    # transfer.import_data(
    #     input_file=index_name + ".json",
    #     target_index=index_name
    # )

    # # 示例：JSONL 导入
    # transfer.import_jsonlines(
    #     file_path="data.jsonl",
    #     target_index="jsonl_index",
    #     id_field="id"
    # )
    #
    # # 示例：MySQL 表导入
    # cols = ["id", "name", "age"]
    # column_mapping = {col: col for col in cols}
    # transfer.import_mysql_table(
    #     host="192.168.0.91",
    #     user="root",
    #     password="BWOYO0R7oDt40y0shH6t",
    #     port=13306,
    #     database="bot_demo",
    #     table="bot_knowledge",
    #     index_name="chatbot_kg",
    #     id_field="id",
    #     batch_size=500,
    #     column_mapping=column_mapping
    # )

    column_mapping = {
        "id": "id",
        "question": "question",
        "answer": "answer",
        "business_type": "type",
        "created_at": "create"
    }

    transfer.import_mysql_table(
        host="192.168.0.91",
        user="root",
        password="BWOYO0R7oDt40y0shH6t",
        port=13306,
        database="bot_demo",
        table="bot_knowledge",
        index_name="chatbot_kg",
        column_mapping=column_mapping,
        id_column="id"  # 指定 id 列作为 _id
    )
