import random
import time as tt
from urllib.parse import urlparse
from typing import Any, Dict, List, Union
import uuid
from application.etl.write.base_batch_observer import BaseBatchObserver
from application.db import get_database_connection
from application.db.mysql_db.info.ResourceInformationAttachmentList import ResourceInformationAttachmentList
from application.db.mysql_db.info.ResourceInformationList import ResourceInformationList
from application.db.mysql_db.info.ResourceInformationSectionList import ResourceInformationSectionList
from application.db.mysql_db.info.ResourceInformationTagsRelation import ResourceInformationTagsRelation
from application.db.mysql_db.info.ResourceSourceDict import ResourceSourceDict


class KafkaToInfoWrite(BaseBatchObserver):
    """
    资源信息批量观察者类，负责处理资源信息数据并写入数据库
    """

    tag_code = {
        "information_nsfc": "info_nsfc",  # 标签代码
    }

    @get_database_connection().atomic()  # 保证事务
    def process_batch(self, batch_data):
        """
        批量处理并写入数据库
        """
        print(f"\n[ResourceBatchObserver] 批量处理 {len(batch_data)} 行数据:")
        print("=" * 50)

        information_list = []
        information_tagging_relationships = []
        information_attachment = []
        information_section = []

        for value_dict in batch_data:
            data_dict = self.convert_to_dict(value_dict.get('data', {})) or {}
            metadata_dict = self.convert_to_dict(value_dict.get('metadata', {})) or {}

            information_list.append({
                "information_id": value_dict.get('uid'),
                "information_name": {'zh': value_dict.get('name')},
                "information_description": {'zh': data_dict.get('description')},
                "original_link": metadata_dict.get('details_page'),
                "original_language": metadata_dict.get('marc_code'),
                "publish_date": data_dict.get('info_date'),
                "metadata": {"info_author": data_dict.get('info_author')},
                "source_id": self.find_source_id(metadata_dict.get('details_page')),
            })

            information_tagging_relationships.append({
                "information_id": value_dict.get('uid'),
                "tag_code": self.tag_code.get(value_dict.get('data_type')),
                "tag_value": value_dict.get('tag_values'),
            })

            information_attachment.extend([
                {
                    "information_id": value_dict.get('uid'),
                    "attachment_id": str(uuid.uuid4()),
                    "attachment_name": self.convert_to_dict(link).get('accessory_name'),
                    "attachment_address": self.convert_to_dict(link).get('attachment_address'),
                    "display_order": index + 1
                }
                for index, link in enumerate(value_dict['link_data'])
            ])

            information_section.extend([
                {
                    "section_id": f"{value_dict.get('uid')}_{int(tt.time())}_{random.randint(1000, 9999)}",
                    "information_id": value_dict.get('uid'),
                    "section_attr": item.get('section_attr'),
                    "section_order": index + 1,
                    "title_level": item.get('title_level', 0) or 0,
                    "marc_code": item.get("marc_code"),
                    "src_text": item.get("text_info"),
                    "dst_text": item.get("dst_text"),
                    "media_info": item.get("media_info"),
                    "md5_encode": f"{value_dict.get('uid')}_{int(tt.time())}_{random.randint(1000, 9999)}",
                }
                for index, item in enumerate(data_dict.get('info_section', []) or [])
            ])
        print(information_attachment)
        ResourceInformationList.insert_many(information_list).execute()
        ResourceInformationTagsRelation.insert_many(information_tagging_relationships).execute()
        ResourceInformationAttachmentList.insert_many(information_attachment).execute()
        ResourceInformationSectionList.insert_many(information_section).execute()

    @staticmethod
    def find_source_id(url):
        """
        根据链接查找资源id
        """
        parsed_url = urlparse(url)
        domain = parsed_url.netloc
        record = ResourceSourceDict.get_or_none(ResourceSourceDict.source_main_link == domain)
        return record.source_id

    def convert_to_dict(self, obj):
        """
        将 Pathway 的 Json 对象转换为 Python 字典
        """
        return obj.as_dict()
