import ast
import datetime
import json
from typing import List

import pandas as pd
from peewee import JOIN

from application.db.elastic_db.base_elastic import create_elastic_mapping, BaseElasticSearch
from application.db.mysql_db.nsfc.NsfcInfoAttachmentList import NsfcInfoAttachmentList
from application.db.mysql_db.nsfc.NsfcInfoList import NsfcInfoList
from application.db.mysql_db.nsfc.NsfcInfoSectionList import NsfcInfoSectionList
from application.db.mysql_db.nsfc.NsfcInfoTypeDict import NsfcInfoTypeDict
from application.db.mysql_db.nsfc.NsfcPublishProjectCodeDict import NsfcPublishProjectCodeDict
from application.db.mysql_db.nsfc.NsfcResourceSourceDict import NsfcResourceSourceDict
from application.utils.logger import get_logger

logger = get_logger("nsfc_to_es")


# ----------------------------- 工具函数 -----------------------------
def get_section_text(sections: List[dict]) -> str:
    """
    将 section 的 src_text 中的 children 拼接为纯文本
    """
    es_text_parts = []
    for item in sections:
        src_text = item.get("src_text")
        if not src_text:
            continue

        children = src_text.get("children", [])
        temp_parts = [child.get("text") for child in children if isinstance(child, dict) and child.get("text")]
        if temp_parts:
            es_text_parts.append("".join(temp_parts))

    return "\n".join(es_text_parts)


def get_section_translate(sections: List[dict]) -> List[dict]:
    """
    将 section 中的 media_info 从字符串转为字典
    """
    if not isinstance(sections, list):
        return []
    for item in sections:
        media_info = item.get("media_info")
        if media_info:
            item['media_info'] = ast.literal_eval(media_info)
    return sections


def get_into_es(row: dict) -> dict:
    """
    将 DataFrame 行转换为 ES 文档格式
    """
    return {
        "information_id": row.get("information_id"),
        "info_type": {
            "info_type_id": row.get("info_type_id"),
            "info_type_name": row.get("info_type_name"),
        },
        "source_info": {
            "source_id": row.get("source_id"),
            "source_name": row.get("source_name"),
            "source_main_link": row.get("source_main_link"),
        },
        "apply_info": {
            "apply_code": row.get("apply_code"),
            "code_name": row.get("code_name"),
        },
        "area_info": {
            "area_id": row.get("area_id"),
            "area_name": row.get("area_name"),
        },
        "attachment": row.get("attachments"),
        "info_name": row.get("info_name"),
        "original_link": row.get("original_link"),
        "publish_date": row.get("publish_date"),
        "sections_text": row.get("section_text"),
        "sections": row.get("sections", []),
    }


# ----------------------------- ES 操作类 -----------------------------
class NsfcInfoListToEs(BaseElasticSearch):
    index_name = "information_index"

    def create_index_from_mapping(self, mapping_filename: str = "nsfc_info.json", cover: bool = True) -> bool:
        """
        根据 mapping 文件创建或覆盖 ES 索引
        """
        mapping_path = f"./{mapping_filename}"
        try:
            with open(mapping_path, "r", encoding="utf-8") as f:
                mapping_info = json.load(f)
        except Exception:
            logger.exception("读取 ES mapping 文件失败：%s", mapping_path)
            return False

        result = create_elastic_mapping(
            index_name=self.index_name,
            mapping_info=mapping_info,
            connect_sign="default",
            cover_sign=cover
        )

        if result.get("result"):
            logger.info("索引 %s 创建成功。", self.index_name)
            return True
        else:
            logger.error("索引创建失败：%s", result.get("msg"))
            return False

    def bulk_insert_to_es(self, nsfc_info_list: list) -> bool:
        """
        批量写入文档到 ES
        """
        if not nsfc_info_list:
            logger.warning("没有要写入的文档。")
            return True

        for doc in nsfc_info_list:
            result = self.index_by_elastic(doc, doc["information_id"])
            if not result.get("result"):
                logger.error("写入 ES 失败：%s", result.get("msg"))
            else:
                logger.info("写入 ES 成功：%s", doc["information_id"])


# ----------------------------- 查询与数据处理 -----------------------------
def fetch_nsfc_data():
    """
    查询 NSFC 数据并整合 sections、attachments
    """
    # 查询字段
    nsfc_info_fields = [
        NsfcInfoList.information_id, NsfcInfoList.info_type_id, NsfcInfoList.source_id,
        NsfcInfoList.province_id, NsfcInfoList.info_name, NsfcInfoList.apply_code,
        NsfcInfoList.original_link, NsfcInfoList.publish_date, NsfcInfoList.create_time
    ]  # 主表字段

    nsfc_section_fields = [
        NsfcInfoSectionList.information_id, NsfcInfoSectionList.section_attr, NsfcInfoSectionList.title_level,
        NsfcInfoSectionList.marc_code, NsfcInfoSectionList.src_text, NsfcInfoSectionList.dst_text,
        NsfcInfoSectionList.media_info
    ]  #
    nsfc_info_type_dict_fields = [NsfcInfoTypeDict.info_type_id, NsfcInfoTypeDict.info_type_name]  # 信息类型表字段
    nsfc_publish_project_code_dict_fields = [NsfcPublishProjectCodeDict.apply_code,
                                             NsfcPublishProjectCodeDict.code_name]  # 项目领域表字段
    nsfc_resource_source_dict_fields = [
        NsfcResourceSourceDict.source_id, NsfcResourceSourceDict.source_name, NsfcResourceSourceDict.source_main_link
    ]  # 来源表字段
    nsfc_info_attachment_fields = [
        NsfcInfoAttachmentList.information_id, NsfcInfoAttachmentList.attachment_id,
        NsfcInfoAttachmentList.attachment_name, NsfcInfoAttachmentList.attachment_address,
        NsfcInfoAttachmentList.display_order
    ]  # 附件表字段

    # 查询主表
    query = (
        NsfcInfoList
        .select(
            *nsfc_info_fields,
            *nsfc_info_type_dict_fields,
            *nsfc_publish_project_code_dict_fields,
            *nsfc_resource_source_dict_fields
        )
        # 联接信息类型表（LEFT JOIN）
        .join(
            NsfcInfoTypeDict,
            JOIN.LEFT_OUTER,
            on=(NsfcInfoList.info_type_id == NsfcInfoTypeDict.info_type_id)
        )
        .switch(NsfcInfoList)

        # 联接项目代码字典表（LEFT JOIN）
        .join(
            NsfcPublishProjectCodeDict,
            JOIN.LEFT_OUTER,
            on=(NsfcInfoList.apply_code.collate('utf8mb4_general_ci') ==
                NsfcPublishProjectCodeDict.apply_code.collate('utf8mb4_general_ci'))
        )
        .switch(NsfcInfoList)

        # 联接资源来源字典表（LEFT JOIN）
        .join(
            NsfcResourceSourceDict,
            JOIN.LEFT_OUTER,
            on=(NsfcInfoList.source_id == NsfcResourceSourceDict.source_id)
        )
        .dicts()
    )
    info_list = pd.DataFrame(query)
    # 查询 sections 和 attachments
    section_list = pd.DataFrame(
        NsfcInfoSectionList.select(*nsfc_section_fields)
        .order_by(NsfcInfoSectionList.information_id.asc(), NsfcInfoSectionList.section_order.asc())
        .dicts()
    )
    attachment_list = pd.DataFrame(
        NsfcInfoAttachmentList.select(*nsfc_info_attachment_fields).dicts()
    )

    # 按 information_id 分组
    section_list = section_list.groupby('information_id', group_keys=False).apply(
        lambda x: x.to_dict(orient='records'), include_groups=False
    )
    attachment_list = attachment_list.groupby('information_id', group_keys=False).apply(
        lambda x: x.to_dict(orient='records'), include_groups=False
    )

    # 映射到 info_list
    info_list['sections'] = info_list['information_id'].map(section_list)
    info_list['attachments'] = info_list['information_id'].map(attachment_list)

    # 数据清洗与处理
    info_list['sections'] = info_list['sections'].apply(get_section_translate)
    info_list['attachments'] = info_list['attachments'].apply(lambda x: x if isinstance(x, list) else [])
    info_list['code_name'] = info_list['code_name'].apply(lambda x: x if x else "其他")
    info_list['area_id'] = "0"
    info_list['area_name'] = "全国"
    info_list['publish_date'] = info_list.apply(
        lambda x: x.get('publish_date') if x.get('publish_date') else x.get('create_time'), axis=1
    )
    info_list['publish_date'] = pd.to_datetime(info_list['publish_date']).dt.date
    info_list = info_list[info_list['publish_date'] >= datetime.date(2024, 1, 1)]# 只保留2024年之后的数据
    info_list['publish_date'] = info_list['publish_date'].apply(lambda x: x.strftime("%Y-%m-%d"))

    info_list['source_name'] = info_list['source_name'].apply(lambda x: x.get('zh'))
    info_list['section_text'] = info_list['sections'].apply(get_section_text)
    # 删除 sections 为空的行
    info_list = info_list[info_list['sections'].apply(lambda x: bool(x) and len(x) > 0)]
    return info_list


# ----------------------------- 主流程 -----------------------------
def main():
    info_list = fetch_nsfc_data()
    into_es = info_list.apply(get_into_es, axis=1).tolist()
    es = NsfcInfoListToEs()
    es.create_index_from_mapping()
    es.bulk_insert_to_es(into_es)


if __name__ == "__main__":
    main()
