# -*- coding: utf-8 -*-

import scrapy
import requests
import json
from datetime import datetime
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from ..base_parser import BasePageParser
from ...utils import ContentProcessor, FileUtils
from .common import update_media_urls, extract_current_page_menu
from fire_control_spider.config import DEBUG
categories = ['Home', 'About NFPA', 'Why NFPA Codes and Standards Matter']

# 这是右侧筛选页
class CodeAndStandardListParser(BasePageParser):
    """NFPA新闻详情页面解析器"""
    
    def can_handle(self, response):
        urls = [
            'codes-and-standards/list-of-codes-and-standards',
            'news-blogs-and-articles/NFPA-Blogs',
            'news-blogs-and-articles/Podcasts',
            
            'about-nfpa/press-room/news-releases',
            'news-blogs-and-articles/nfpa-journal/Journal-articles',
            'news-blogs-and-articles/Survivor-Stories',
        ]
        for url in urls:
            if url in response.url:
                return True
        return False

    
    def parse(self, response):
        print("hahahaha")
        soup = BeautifulSoup(response.text, 'lxml')
        # 根据 response.url 获取对应的 categorys
        item = self.create_webpage_item(response)
        item['category'] = ['Home', 'For Professionals', 'Codes and Standards', 'List of Codes and Standards']

        # 提取标题
        title_selectors = [
            'h1',
        ]
        
        title = ''
        for selector in title_selectors:
            title_elem = soup.select_one(selector)
            if title_elem:
                title = title_elem.get_text(strip=True)
                break
        
        item['title'] = title
        
        # 提取发布时间
        publish_time_selectors = [
        ]
        
        publish_time = ''
        for selector in publish_time_selectors:
            time_elem = soup.select_one(selector)
            if time_elem:
                publish_time = time_elem.get_text(strip=True)
                # 如果有datetime属性，优先使用
                if time_elem.get('datetime'):
                    publish_time = time_elem.get('datetime')
                break
        
        item['publish_time'] = publish_time
        
        box_list = soup.select('#maincontent > div.container')
        content_elem = box_list[-1]
        if content_elem:
            # 处理图文交错内容
            main_body, media_urls = ContentProcessor.process_content_with_media(
                str(content_elem), response.url, 'nfpa'
            )
            item['main_body'] = main_body

            # 更新媒体URLs
            update_media_urls(item, media_urls)
        
        if 'news-blogs-and-articles/NFPA-Blogs' in response.url:
            search_page = 'blog'
        elif 'news-blogs-and-articles/Podcasts' in response.url:
            search_page = 'postcast'
        elif 'about-nfpa/press-room/news-releases' in response.url:
            search_page = 'news_release'
        elif 'news-blogs-and-articles/nfpa-journal/Journal-articles' in response.url:
            search_page = 'journal'
        elif 'news-blogs-and-articles/Survivor-Stories' in response.url:
            search_page = 'survivor'
        else:
            search_page = 'standard'
        
        # 调用 Coveo API 获取标准列表数据
        api_data = self.fetch_coveo_api(search_page)
        if api_data:
            item['remark'] = api_data
        
        yield item 

    def fetch_coveo_api(self, search_page='standard'):
        """调用 Coveo API 获取标准列表数据"""
        page_size = 12 if DEBUG else 1000
        key_map = {
            'blog': {
                'hub': 'Prod-All_Search',
                'tagtype': 'Blogs',
            },
            'standard': {
                'hub': 'Prod-DocInfo_Search',
                'tagtype': 'Standards Development Process',
            },
            'postcast': {
                'hub': 'Prod-All_Search',
                'tagtype': 'Podcasts',
            },
            'news_release': {
                'hub': 'Prod-All_Search',
                'tagtype': 'News releases',
            },
            'journal': {
                'hub': 'Prod-All_Search',
                'tagtype': 'NFPA Journal',
            },
            'survivor': {
                'hub': 'Prod-All_Search',
                'tagtype': 'Human interest stories',
            }       
        }
        key_info = key_map[search_page]
        cq = ""
        if search_page == 'journal':
            cq = f"@tagbrands==(\"{key_info['tagtype']}\")"
        elif search_page == 'survivor':
            cq = f"@subtopics==(\"{key_info['tagtype']}\")"
        else:
            cq = f"@tagtype==(\"{key_info['tagtype']}\")"
        try:
            url = 'https://platform.cloud.coveo.com/rest/search/v2?organizationId=nationalfireprotectionassociationnfpaproductionhl2h1ya2'
            
            headers = {
                'accept': '*/*',
                'accept-language': 'zh-CN,zh;q=0.9',
                'authorization': 'Bearer xx82aace5d-d11b-4ddf-b834-f26464dea2f6',
                'content-type': 'application/json',
                'origin': 'https://www.nfpa.org',
                'priority': 'u=1, i',
                'referer': 'https://www.nfpa.org/',
                'sec-ch-ua': '"Google Chrome";v="137", "Chromium";v="137", "Not/A)Brand";v="24"',
                'sec-ch-ua-mobile': '?0',
                'sec-ch-ua-platform': '"macOS"',
                'sec-fetch-dest': 'empty',
                'sec-fetch-mode': 'cors',
                'sec-fetch-site': 'cross-site',
                'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36'
            }
            
            data = {
                "locale": "en",
                "debug": False,
                "tab": "default",
                "referrer": "default",
                "timezone": "Asia/Shanghai",
                "visitorId": "f2d59551-93e1-4027-a10d-068f1c350ebf",
                "actionsHistory": [
                ],
                "aq": "@culture=",
                "cq": cq,
                "fieldsToInclude": [
                    "author", "language", "urihash", "objecttype", "collection", "source", "permanentid", 
                    "date", "filetype", "parents", "ec_price", "ec_name", "ec_description", "ec_brand", 
                    "ec_category", "ec_item_group_id", "ec_shortdesc", "ec_thumbnails", "ec_images", 
                    "ec_promo_price", "ec_in_stock", "ec_rating", "codenumberprefiz120x", "cardbuttonlabel", 
                    "publicationmainimage", "computedimageurl", "z95xtemplatename", "taglistingpage", 
                    "z95xdisplayname", "tagproductcategories", "tagcustomersupport", "tagevents", 
                    "tagbuyingstages", "tagwebpersonas", "tagcode", "tagcycle", "tagdevelopmentstage", 
                    "tagtype", "tagvehiclemanufacturer", "tagbrands", "concepts", "topics", "subtopics", 
                    "tags", "cardtez120xt", "CardText", "cardimage", "titleimageurl", "cardimageurl", 
                    "computedtitle", "title", "culture", "cat_platform", "cat_condition", "cat_categories", 
                    "cat_review_count", "cat_color", "productid", "publicationdate"
                ],
                "q": "",
                "enableQuerySyntax": False,
                "searchHub": key_info['hub'],
                "sortCriteria": "",
                "analytics": {},
                "facets": [
                    {
                        "delimitingCharacter": ">",
                        "filterFacetCount": True,
                        "injectionDepth": 1000,
                        "numberOfValues": 8,
                        "sortCriteria": "alphanumeric",
                        "type": "specific",
                        "currentValues": [
                            {"value": "Building and Life Safety", "state": "idle"},
                            {"value": "Electrical Safety", "state": "idle"},
                            {"value": "Emergency Response", "state": "idle"},
                            {"value": "Fire Protection Systems", "state": "idle"},
                            {"value": "Industrial and Chemical Hazards", "state": "idle"}
                        ],
                        "freezeCurrentValues": False,
                        "isFieldExpanded": False,
                        "preventAutoSelect": False,
                        "facetSearch": {"numberOfValues": 8},
                        "facetId": "a8e18935-f182-44cb-84d4-1de502c58d8d",
                        "field": "topics"
                    },
                    {
                        "delimitingCharacter": ">",
                        "filterFacetCount": True,
                        "injectionDepth": 1000,
                        "numberOfValues": 8,
                        "sortCriteria": "alphanumeric",
                        "type": "specific",
                        "currentValues": [
                            {"value": "Códigos aceptando aportes públicos", "state": "idle"},
                            {"value": "Standards accepting public input", "state": "idle"}
                        ],
                        "freezeCurrentValues": False,
                        "isFieldExpanded": False,
                        "preventAutoSelect": False,
                        "facetSearch": {"numberOfValues": 8},
                        "facetId": "1e30da07-fd03-4316-a203-2626fffb7d25",
                        "field": "tagdevelopmentstage"
                    },
                    {
                        "delimitingCharacter": ">",
                        "filterFacetCount": True,
                        "injectionDepth": 1000,
                        "numberOfValues": 8,
                        "sortCriteria": "occurrences",
                        "type": "specific",
                        "currentValues": [
                            {"value": "TBD", "state": "idle"},
                            {"value": "Fall 2026", "state": "idle"},
                            {"value": "Annual 2026", "state": "idle"},
                            {"value": "Annual 2027", "state": "idle"},
                            {"value": "Fall 2027", "state": "idle"},
                            {"value": "Fall 2025", "state": "idle"},
                            {"value": "Annual 2028", "state": "idle"},
                            {"value": "Custom ERRS A2027", "state": "idle"}
                        ],
                        "freezeCurrentValues": False,
                        "isFieldExpanded": False,
                        "preventAutoSelect": False,
                        "facetSearch": {"numberOfValues": 8},
                        "facetId": "962cd569-85ce-44a5-8061-c5425b8caaaf",
                        "field": "tagcycle"
                    }
                ],
                "numberOfResults": page_size,
                "firstResult": 0,
                "facetOptions": {"freezeFacetOrder": False},
                "groupBy": [{"field": "@commontab", "queryOverride": "", "constantQueryOverride": "@uri", "advancedQueryOverride": "@uri"}]
            }
            response = requests.post(url, headers=headers, json=data, timeout=30)
            
            if response.status_code == 200:
                result = response.json()
                return self.parse_coveo_response(result)
            else:
                self.logger.error(f"Coveo API 请求失败: {response.status_code}")
                return None
                
        except Exception as e:
            self.logger.error(f"调用 Coveo API 异常: {e}")
            return None
    
    def parse_coveo_response(self, response_data):
        """解析 Coveo API 响应数据"""
        try:
            results = response_data.get('results', [])
            self.logger.info(f"results: {len(results)}")
            parsed_data = []
            
            # 用于收集所有 topics 和 subtopics
            all_topics = set()
            all_subtopics = set()
            
            for result in results:
                # 提取基本信息
                title = result.get('title', '')
                click_uri = result.get('clickUri', '')
                
                # 提取 raw 数据
                raw = result.get('raw', {})
                tagcode = raw.get('tagcode', [])
                date = raw.get('date', 0)
                topics = raw.get('topics', [])
                subtopics = raw.get('subtopics', [])
                
                # 收集 topics 和 subtopics，去掉两边空格
                if topics:
                    # 去掉两边空格并过滤空字符串
                    topics = [topic.strip() for topic in topics if topic.strip()]
                    all_topics.update(topics)
                if subtopics:
                    # 去掉两边空格并过滤空字符串
                    subtopics = [subtopic.strip() for subtopic in subtopics if subtopic.strip()]
                    all_subtopics.update(subtopics)
                
                # 格式化日期
                formatted_date = self.format_timestamp(date)
                
                # 构建数据项
                item_data = {
                    'tagcode': tagcode,
                    'title': title,
                    'url': click_uri,
                    'date': formatted_date,
                    'topics': topics,
                    'subtopics': subtopics,
                }
                
                parsed_data.append(item_data)
            
            # 合并去重后的 topics 和 subtopics
            merged_topics = list(all_topics)
            merged_subtopics = list(all_subtopics)
            
            # 创建最终的 remark 数据
            remark_data = {
                'page_data': parsed_data,
                'topics': merged_topics,
                'subtopics': merged_subtopics,
            }
            
            return remark_data
            
        except Exception as e:
            self.logger.error(f"解析 Coveo 响应数据异常: {e}")
            return None
    
    def format_timestamp(self, timestamp):
        """格式化时间戳为可读格式"""
        try:
            if timestamp and timestamp > 0:
                # 假设时间戳是毫秒级的
                dt = datetime.fromtimestamp(timestamp / 1000)
                return dt.strftime('%Y-%m-%d %H:%M:%S')
            else:
                return ''
        except Exception as e:
            self.logger.error(f"格式化时间戳异常: {e}")
            return ''