# -*- coding: utf-8 -*-

from elasticsearch import Elasticsearch, helpers
from faker import Faker
import random
from datetime import datetime, timedelta
import time
import logging

# 设置日志记录
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 初始化Faker和Elasticsearch客户端
fake = Faker('zh_CN')
es = Elasticsearch(hosts=["http://localhost:9200"])  # 根据您的ES配置调整

# 预定义的可选值列表，用于数组字段
SCENE_CODES = ["tech_doc", "dev_guide", "api_ref", "user_manual", "troubleshooting"]
VISIBLE_TAGS = ["public", "internal_rd", "confidential", "restricted"]  # 修正拼写: VISIABLE -> VISIBLE
USABLE_TAGS = ["read", "download", "edit", "share", "comment"]  # 修正字段名: usage_tags -> usable_tags
SOURCE_ORG_IDS = ["dept_001", "dept_002", "dept_003", "dept_004", "dept_005"]  # 修正字段名: source_departments -> source_org_id
SOURCE_SYSTEMS = ["KM-System", "CMS", "DocumentHub", "WikiSystem"]
CUSTOMIZED_TAGS = ["elasticsearch", "springboot", "tutorial", "java", "database",
                   "microservices", "cloud", "devops", "best_practices"]
KN_TYPES = ["guide", "reference", "tutorial", "faq", "concept"]  # 新增: kn_type 应为keyword, 使用字符串
PUBLISHED_FLAGS = ["published", "draft", "archived"]  # 修正: is_published -> published_flag, 且应为keyword


def generate_document(seq_num):
    """生成一条模拟文档数据，严格匹配索引映射"""
    base_time = datetime(2024, 9, 11, 10, 0, 0)
    time_offset = timedelta(minutes=seq_num)

    doc = {
        # 以下字段类型为 keyword
        "task_id": f"task_20240911_{seq_num:07d}",
        "file_id": f"file_{random.randint(100000, 999999)}",
        "para_id": f"para_{random.randint(1000000, 9999999)}",  # 新增字段
        "vectorized_flag": random.choice(["true", "false"]),  # 映射中定义为keyword，存储为字符串
        "scene_codes": random.sample(SCENE_CODES, random.randint(1, 3)),  # 修正拼写: secene_codes -> scene_codes
        "visible_tags": random.sample(VISIBLE_TAGS, random.randint(1, 2)),  # 修正拼写: visiable_tags -> visible_tags
        "usable_tags": random.sample(USABLE_TAGS, random.randint(1, 3)),  # 修正字段名: usage_tags -> usable_tags
        "source_org_id": random.choice(SOURCE_ORG_IDS),  # 修正字段名: source_departments -> source_org_id
        "source_system": random.choice(SOURCE_SYSTEMS),
        "customized_tags": random.sample(CUSTOMIZED_TAGS, random.randint(2, 5)),  # 修正拼写: cutomized_tags -> customized_tags
        "published_flag": random.choice(PUBLISHED_FLAGS),  # 修正字段名和类型: is_published (boolean) -> published_flag (keyword)
        "kn_type": random.choice(KN_TYPES),  # 修正类型: integer -> keyword (字符串)
        "latest_flag": random.choice(["true", "false"]),  # 映射中定义为keyword，存储为字符串
        "del_flag": random.choice(["true", "false"]),  # 映射中定义为keyword，存储为字符串
        # 以下字段类型为 date
        "create_time": (base_time + time_offset).isoformat(timespec='milliseconds') + 'Z',  # 格式化日期字符串
        "update_time": (base_time + time_offset).isoformat(timespec='milliseconds') + 'Z',
        # 以下字段类型为 text
        "kn_title": f"关于Spring Data Elasticsearch的深度实践指南_{seq_num}",
        "para_title": f"段落标题_{seq_num}",  # 新增字段
        "summary": f"本文详细介绍了如何在Spring Boot项目中集成并使用Spring Data Elasticsearch进行高效数据检索与分析，包括索引管理、复杂查询和性能优化等内容。版本: {seq_num}",
        "content": f"Spring Data Elasticsearch是Spring生态系统对Elasticsearch客户端库的封装，它简化了ES操作，允许开发者通过熟悉的Spring编程模型和Repository接口与ES交互。本文将通过实际案例，从项目搭建、实体映射、Repository编写到复杂查询构建，逐步讲解如何构建一个高效的全文搜索服务。同时，文章也会探讨一些高级主题，如动态模板、索引生命周期管理以及与现有JPA项目的协同工作。这是第{seq_num}篇文档。"
    }
    return doc


def check_and_create_index():
    """检查并创建索引（如果不存在）"""
    index_name = "knowledge_index"
    index_mapping = {
        "settings": {
            "number_of_shards": 2,
            "number_of_replicas": 1,
            "refresh_interval": "30s",
            "analysis": {
                "analyzer": {
                    "default": {
                        "type": "ik_max_word"
                    }
                }
            },
            "index.mapping.total_fields.limit": 2000,
            "index.mapping.nested_fields.limit": 50
        },
        "mappings": {
            "dynamic": "strict",
            "properties": {
                "task_id": {"type": "keyword"},
                "file_id": {"type": "keyword"},
                "para_id": {"type": "keyword"},
                "kn_title": {
                    "type": "text",
                    "analyzer": "ik_max_word",
                    "fields": {"keyword": {"type": "keyword", "ignore_above": 256}}
                },
                "para_title": {
                    "type": "text",
                    "analyzer": "ik_max_word",
                    "fields": {"keyword": {"type": "keyword", "ignore_above": 256}}
                },
                "summary": {
                    "type": "text",
                    "analyzer": "ik_max_word",
                    "fields": {"keyword": {"type": "keyword", "ignore_above": 256}}
                },
                "content": {"type": "text", "analyzer": "ik_max_word"},
                "vectorized_flag": {"type": "keyword"},
                "scene_codes": {"type": "keyword"},
                "visible_tags": {"type": "keyword"},
                "usable_tags": {"type": "keyword"},
                "source_org_id": {"type": "keyword"},
                "source_system": {"type": "keyword"},
                "customized_tags": {"type": "keyword"},
                "create_time": {"type": "date", "format": "strict_date_optional_time||epoch_millis"},
                "update_time": {"type": "date", "format": "strict_date_optional_time||epoch_millis"},
                "published_flag": {"type": "keyword"},
                "kn_type": {"type": "keyword"},
                "latest_flag": {"type": "keyword"},
                "del_flag": {"type": "keyword"}
            }
        }
    }

    try:
        if not es.ping():
            raise ValueError("无法连接到 Elasticsearch")

        # 检查索引是否存在
        if es.indices.exists(index=index_name):
            logger.info(f"索引 {index_name} 已存在，无需创建")
            # 可选: 如果需要重新创建，可以在这里删除
            # es.indices.delete(index=index_name)
            # logger.info("已删除已存在的索引")
            # es.indices.create(index=index_name, body=index_mapping)
            # logger.info("索引重新创建成功")
        else:
            # 创建新索引
            es.indices.create(index=index_name, body=index_mapping)
            logger.info("索引创建成功")
    except Exception as e:
        logger.error(f"连接或检查索引时出错: {e}")
        raise


def generate_bulk_data(total_docs):
    """生成批量操作数据"""
    for i in range(1, total_docs + 1):
        yield {
            "_index": "knowledge_index",  # 修正索引名: document_index -> knowledge_index
            "_source": generate_document(i)
        }


def main():
    """主函数"""
    total_documents = 100000  # 要生成的文档总数
    batch_size = 5000  # 每批处理的文档数量

    try:
        # 1. 检查并创建索引
        logger.info("开始检查/创建索引...")
        check_and_create_index()

        # 2. 批量插入数据
        logger.info("开始批量插入数据...")
        start_time = time.time()

        success_count = 0
        # 使用helpers.bulk进行批量插入
        for success, info in helpers.streaming_bulk(
                es,
                generate_bulk_data(total_documents),
                chunk_size=batch_size,
                raise_on_error=False,
                max_retries=2,  # 增加重试次数
                initial_backoff=1,  # 初始重试延迟(秒)
                max_backoff=30  # 最大重试延迟(秒)
        ):
            if success:
                success_count += 1
                if success_count % batch_size == 0:
                    logger.info(f"已成功插入 {success_count} 条文档")
            else:
                logger.error(f"插入失败: {info}")

        end_time = time.time()

        # 3. 输出结果统计
        logger.info(f"数据插入完成！总共成功插入 {success_count} 条文档")
        logger.info(f"总耗时: {end_time - start_time:.2f} 秒")
        if success_count > 0 and (end_time - start_time) > 0:
            logger.info(f"平均插入速度: {success_count / (end_time - start_time):.2f} 条/秒")

        # 4. 验证数据
        count_result = es.count(index="knowledge_index")
        logger.info(f"索引中现有文档数量: {count_result['count']}")

    except Exception as e:
        logger.error(f"执行过程中发生错误: {str(e)}")
        raise


if __name__ == "__main__":
    main()