# -*- coding: utf-8 -*-
"""
数据解析模块
负责解析HTML页面并提取结构化数据
"""

import json
import re
import logging
from typing import Dict, List, Any, Optional
from lxml import etree

logger = logging.getLogger(__name__)


class HouzzDataParser:
    """Houzz数据解析器"""
    
    def __init__(self, html_content: str, url: str):
        """初始化解析器"""
        self.url = url
        self.html_tree = etree.HTML(html_content)
        logger.debug(f"初始化数据解析器: {url}")
    
    @staticmethod
    def _extract_text(elements: List[Any]) -> str:
        """提取文本内容并清理空白字符"""
        if not elements:
            return ""
        return re.sub(r'\s+', ' ', " ".join(map(str, elements))).strip()
    
    @staticmethod
    def _extract_first(elements: List[Any]) -> Optional[str]:
        """提取第一个元素"""
        return elements[0].strip() if elements else None
    
    @staticmethod
    def _extract_number_from_text(text: str) -> Optional[int]:
        """从文本中提取数字"""
        if not text:
            return None
        # 处理带逗号的数字，如 "1,234"
        match = re.search(r'[\d,]+', str(text))
        return int(match.group(0).replace(',', '')) if match else None
    
    def _extract_number_from_list(self, text_list: List[str], target_field: str) -> Optional[int]:
        """从文本列表中提取特定字段的数字"""
        pattern = re.compile(rf'^\s*(\d+)\s+{target_field}\s*$', re.IGNORECASE)
        for text in text_list:
            clean_text = str(text).strip()
            if not clean_text:
                continue
            match = pattern.match(clean_text)
            if match:
                return int(match.group(1))
        return None
    
    def get_awards_and_recognition(self) -> str:
        """提取奖项和认可信息"""
        extracted = self.html_tree.xpath('//div[@data-testid="standouts-container"]//text()')
        targets = ["Houzz Pro Certified", "Best of Houzz winner", "Award winning", "PowerHouzzer Winner"]
        matched = {award for text in extracted if text.strip() for award in targets if award in text}
        return ", ".join(sorted(list(matched)))
    
    def get_json_ld_data(self) -> Optional[Dict[str, Any]]:
        """提取JSON-LD结构化数据"""
        scripts = self.html_tree.xpath('//script[@type="application/ld+json"]/text()')
        for content in scripts:
            try:
                data = json.loads(content)
                if isinstance(data, list) and data and "ProfessionalService" in data[0].get("@type", ""):
                    return data[0]
                if isinstance(data, dict) and "ProfessionalService" in data.get("@type", ""):
                    return data
            except (json.JSONDecodeError, IndexError):
                continue
        return None
    
    def parse_list_page(self) -> List[str]:
        """解析列表页，提取详情页URLs"""
        try:
            profile_urls = self.html_tree.xpath('//ul[contains(@class, "hz-pro-search-results")]//li/a/@href')
            logger.info(f"从列表页提取到 {len(profile_urls)} 个详情页URL")
            return profile_urls
        except Exception as e:
            logger.error(f"解析列表页失败: {e}")
            return []
    
    def parse_detail_page(self) -> Dict[str, Any]:
        """解析详情页，提取结构化数据"""
        try:
            json_data = self.get_json_ld_data() or {}
            pro_user_id = None
            
            # 提取pro_user_id
            info_str = self._extract_first(self.html_tree.xpath('//header[@data-extra-info]/@data-extra-info'))
            if info_str:
                try:
                    pro_user_id = json.loads(info_str).get('pro_user_id')
                except json.JSONDecodeError:
                    logger.warning(f"无法解析pro_user_id JSON: {self.url}")
            
            # 构建数据字典
            data = {
                "pro_user_id": pro_user_id,
                "company_name": self._extract_first(self.html_tree.xpath('//h1/text()')),
                "company_profile_url": self.url,
                "category_name": None,  # 将在详情页爬虫中设置
                "category_url": None,   # 将在详情页爬虫中设置
                "category_page": None,   # 将在详情页爬虫中设置
                "average_rating": self._extract_text(
                    self.html_tree.xpath('//span[@class="hz-star-rate__rating-number"]//text()')
                ),
                "number_of_reviews": self._extract_number_from_text(
                    self._extract_text(self.html_tree.xpath('//span[@class="hz-star-rate__review-string"]//text()'))
                ) or 0,
                "location": self._extract_text(
                    self.html_tree.xpath('//h3[contains(text(),"Address")]/following-sibling::div[1]//text()')
                ),
                "company_description": json_data.get("description"),
                "company_logo": self._extract_first(
                    self.html_tree.xpath('//header[@data-container="Basic Pro Info"]//img/@src')
                ),
                "verified_license": "yes" if self.html_tree.xpath('.//span[contains(text(),"Verified License")]') else "no",
                "awards_and_recognition": self.get_awards_and_recognition(),
                "phone_number": self._extract_text(
                    self.html_tree.xpath('//h3[contains(text(),"Phone Number")]/following-sibling::p[1]//text()')
                ),
                "website_url": self._extract_first(
                    self.html_tree.xpath('//h3[contains(text(),"Website")]/following-sibling::p[1]/a/@href')
                ),
                "address": self._extract_text(
                    self.html_tree.xpath('//h3[contains(text(),"Address")]/following-sibling::div[1]//text()')
                ),
                "typical_job_cost_usd": self._extract_text(
                    self.html_tree.xpath('//h3[contains(text(),"Typical Job Cost")]/following-sibling::p[1]//text()')
                ),
                "license_number": self._extract_text(
                    self.html_tree.xpath('//h3[contains(text(),"License Number")]/following-sibling::p[1]//text()')
                ),
                "followers": self._extract_number_from_text(self._extract_text(
                    self.html_tree.xpath('//h3[contains(text(),"Followers")]/following-sibling::div[1]//p//text()')
                )) or 0,
                "social_media_links": ';'.join(
                    self.html_tree.xpath('//h3[contains(text(),"Socials")]/following-sibling::p[1]//a/@href')
                ) or "",
                "services_provided": self._extract_text([item.get("name", "") for item in
                                                       json_data.get("hasOfferCatalog", {}).get("itemListElement", [])]) if json_data else None,
                "areas_served": self._extract_text(
                    [area.get("name", "") for area in json_data.get("areaServed", [])]
                ) if json_data else None,
                "projects": self._extract_number_from_list(
                    self.html_tree.xpath('//*[contains(text(),"Projects")]/text()'), "Projects"
                ) or 0,
                "videos": self._extract_number_from_list(
                    self.html_tree.xpath('//button[contains(text(),"Videos")]/text()'), "Videos"
                ) or 0,
                "houzz_awards": self._extract_number_from_text(
                    self._extract_text(self.html_tree.xpath('//div[contains(text(),"Houzz Awards")]/text()'))
                ) or 0,
                "houzz_badges": self._extract_number_from_text(
                    self._extract_text(self.html_tree.xpath('//div[contains(text(),"Houzz Badges")]/text()'))
                ) or 0,
                "affiliations": ";".join(self.html_tree.xpath(
                    '//div[contains(text(),"Affiliation")]/following-sibling::div//a/img/@title'
                )) or "",
            }
            
            # 清理空字符串
            for key, value in data.items():
                if value == '':
                    data[key] = None
            
            logger.info(f"成功解析详情页数据: {self.url}")
            return data
            
        except Exception as e:
            logger.error(f"解析详情页失败 {self.url}: {e}")
            return {}
    
    def parse_api_data(self, api_response: str, api_type: str) -> Optional[int]:
        """解析API响应数据"""
        try:
            json_data = json.loads(api_response)
            
            if api_type == "projects":
                projects = json_data.get("ctx", {}).get("data", {}).get("stores", {}).get("data", {}).get("ProjectStore", {}).get('data', {})
                return len(projects)
            elif api_type == "videos":
                videos_data = json_data.get("ctx", {}).get("data", {}).get("stores", {}).get("data", {}).get("SpaceStore", {}).get("data", {})
                return len(videos_data)
            
            return None
        except (json.JSONDecodeError, KeyError) as e:
            logger.error(f"解析API数据失败 {api_type}: {e}")
            return None


class DataValidator:
    """数据验证器"""
    
    @staticmethod
    def validate_detail_data(data: Dict[str, Any]) -> bool:
        """验证详情页数据是否有效"""
        required_fields = ['company_name', 'company_profile_url']
        
        for field in required_fields:
            if not data.get(field):
                return False
        
        return True
    
    @staticmethod
    def clean_data(data: Dict[str, Any]) -> Dict[str, Any]:
        """清理数据"""
        cleaned_data = {}
        
        for key, value in data.items():
            if value is None or value == '':
                cleaned_data[key] = None
            elif isinstance(value, str):
                cleaned_data[key] = value.strip()
            else:
                cleaned_data[key] = value
        
        return cleaned_data
    
    @staticmethod
    def extract_category_from_url(url: str) -> str:
        """从URL中提取类目信息"""
        try:
            # 从URL中提取类目名称
            parts = url.split('/')
            if len(parts) > 4:
                return parts[4]  # 通常是类目名称
            return "unknown"
        except Exception:
            return "unknown"
