"""
数据解析器模块
Data Parser Module
"""

from bs4 import BeautifulSoup
from typing import List, Dict, Any, Optional
import re
import json
import logging
from datetime import datetime
from .models import PropertyListing


class DataParser:
    """数据解析器类"""
    
    # 字段选择器映射
    FIELD_SELECTORS = {
        'title': '.title',
        'detail_url': 'a[href]',
        'publish_date': '.issue_time',
        'district': '.item2 span:nth-child(1) span',
        'rent_range': '.item2 span:nth-child(2) span',
        'area_range': '.item3 span:nth-child(1) span',
        'transfer_fee': '.item3 span:nth-child(2) span',
        'industry': '.item4 span span'
    }
    
    def __init__(self):
        """初始化数据解析器"""
        self.logger = logging.getLogger(__name__)
        self.parsed_count = 0
        self.error_count = 0
    
    def parse_response(self, response_data: Dict[str, Any], city: str, business_type: str) -> List[PropertyListing]:
        """
        解析响应数据
        Parse response data from API
        
        Args:
            response_data: API响应数据
            city: 城市代码
            business_type: 业态类型
            
        Returns:
            List[PropertyListing]: 解析后的房源列表
        """
        try:
            # 处理不同的响应格式
            html_content = self._extract_html_from_response(response_data)
            if not html_content:
                self.logger.warning("No HTML content found in response")
                return []
            
            listings = self._parse_html_content(html_content, city, business_type)
            self.logger.info(f"Successfully parsed {len(listings)} listings")
            return listings
            
        except Exception as e:
            self.logger.error(f"Failed to parse response: {e}")
            self.error_count += 1
            return []
    
    def _extract_html_from_response(self, response_data: Dict[str, Any]) -> str:
        """从响应数据中提取HTML内容"""
        # 处理直接包含html字段的响应
        if 'html' in response_data:
            return response_data['html']
        
        # 处理嵌套在data字段中的html
        if 'data' in response_data:
            data = response_data['data']
            if isinstance(data, dict) and 'html' in data:
                return data['html']
            elif isinstance(data, str):
                # 尝试解析JSON字符串
                try:
                    parsed_data = json.loads(data)
                    if isinstance(parsed_data, dict) and 'html' in parsed_data:
                        return parsed_data['html']
                except json.JSONDecodeError:
                    pass
        
        # 处理其他可能的字段名
        for field in ['content', 'body', 'result']:
            if field in response_data:
                content = response_data[field]
                if isinstance(content, str):
                    return content
                elif isinstance(content, dict) and 'html' in content:
                    return content['html']
        
        return ""
    
    def _parse_html_content(self, html_content: str, city: str, business_type: str) -> List[PropertyListing]:
        """解析HTML内容"""
        if not html_content:
            return []
        
        try:
            soup = BeautifulSoup(html_content, 'html.parser')
            listings = []
            
            # 查找所有列表项
            list_items = soup.find_all('li')
            self.logger.debug(f"Found {len(list_items)} list items in HTML")
            
            for i, item in enumerate(list_items):
                try:
                    listing_data = self._extract_listing_data(item, city, business_type)
                    if listing_data:
                        # 创建PropertyListing对象
                        property_listing = PropertyListing(**listing_data)
                        
                        # 数据质量检查
                        if self._validate_listing_quality(property_listing):
                            listings.append(property_listing)
                            self.parsed_count += 1
                        else:
                            self.logger.warning(f"Listing {i+1} failed quality check: {property_listing.title[:50]}")
                    else:
                        self.logger.debug(f"Skipped empty listing data for item {i+1}")
                        
                except Exception as e:
                    # 记录解析错误但继续处理
                    self.logger.warning(f"Failed to parse listing item {i+1}: {e}")
                    self.error_count += 1
                    continue
            
            return listings
            
        except Exception as e:
            self.logger.error(f"Failed to parse HTML content: {e}")
            self.error_count += 1
            return []
    
    def _extract_listing_data(self, item, city: str, business_type: str) -> Dict[str, Any]:
        """从列表项中提取数据"""
        data = {
            'city': city,
            'business_type': business_type,
            'created_at': datetime.now().isoformat()
        }

        # 提取标题和详情链接
        title_element = item.select_one(self.FIELD_SELECTORS['title'])
        if title_element:
            data['title'] = self._clean_text(title_element.get_text())

        link_element = item.select_one(self.FIELD_SELECTORS['detail_url'])
        if link_element:
            href = link_element.get('href', '')
            # 将相对URL转换为绝对URL
            data['detail_url'] = self._convert_to_absolute_url(href, city)
            # 生成唯一ID
            data['id'] = self._generate_id(data['detail_url'])

        # 提取其他字段
        for field, selector in self.FIELD_SELECTORS.items():
            if field in ['title', 'detail_url']:
                continue

            element = item.select_one(selector)
            if element:
                data[field] = self._clean_text(element.get_text())

        # 数据清洗和格式化
        self._clean_and_format_data(data)

        return data if data.get('title') and data.get('detail_url') else None

    def _convert_to_absolute_url(self, href: str, city: str) -> str:
        """将相对URL转换为绝对URL"""
        if not href:
            return ""

        # 如果已经是绝对URL，直接返回
        if href.startswith(('http://', 'https://')):
            return href

        # 如果是相对URL，转换为绝对URL
        if href.startswith('/'):
            base_url = f"https://{city}.puxiansheng.com"
            return base_url + href

        # 如果是其他格式，也尝试转换
        base_url = f"https://{city}.puxiansheng.com"
        return base_url + '/' + href.lstrip('/')
    
    def _clean_text(self, text: str) -> str:
        """清理文本内容"""
        if not text:
            return ""
        
        # 移除多余的空白字符
        text = re.sub(r'\s+', ' ', text.strip())
        return text
    
    def _generate_id(self, detail_url: str) -> str:
        """生成唯一ID"""
        if not detail_url:
            return ""
        
        # 从URL中提取ID或使用URL的hash
        import hashlib
        return hashlib.md5(detail_url.encode()).hexdigest()[:16]
    
    def _clean_and_format_data(self, data: Dict[str, Any]):
        """清洗和格式化数据"""
        # 清理租金范围
        if 'rent_range' in data:
            data['rent_range'] = self._clean_price_range(data['rent_range'])
        
        # 清理面积范围
        if 'area_range' in data:
            data['area_range'] = self._clean_area_range(data['area_range'])
        
        # 清理转让费
        if 'transfer_fee' in data:
            data['transfer_fee'] = self._clean_price_range(data['transfer_fee'])
        
        # 格式化发布日期
        if 'publish_date' in data:
            data['publish_date'] = self._format_date(data['publish_date'])
    
    def _clean_price_range(self, price_text: str) -> str:
        """清理价格范围文本"""
        if not price_text:
            return ""
        
        # 移除多余字符，保留数字、小数点、连字符、中文字符等
        cleaned = re.sub(r'[^\d\.\-万元/月平米㎡面议]', '', price_text)
        return cleaned
    
    def _clean_area_range(self, area_text: str) -> str:
        """清理面积范围文本"""
        if not area_text:
            return ""
        
        # 移除多余字符，保留数字、小数点、连字符等
        cleaned = re.sub(r'[^\d\.\-平米㎡]', '', area_text)
        return cleaned
    
    def _format_date(self, date_text: str) -> str:
        """格式化日期"""
        if not date_text:
            return ""
        
        # 清理文本
        cleaned = self._clean_text(date_text)
        
        # 处理相对日期
        if '今天' in cleaned:
            return datetime.now().strftime('%Y-%m-%d')
        elif '昨天' in cleaned:
            from datetime import timedelta
            yesterday = datetime.now() - timedelta(days=1)
            return yesterday.strftime('%Y-%m-%d')
        elif '前天' in cleaned:
            from datetime import timedelta
            day_before = datetime.now() - timedelta(days=2)
            return day_before.strftime('%Y-%m-%d')
        
        # 尝试解析常见日期格式
        date_patterns = [
            (r'(\d{4})-(\d{1,2})-(\d{1,2})', lambda m: f"{m.group(1)}-{m.group(2).zfill(2)}-{m.group(3).zfill(2)}"),
            (r'(\d{4})年(\d{1,2})月(\d{1,2})日', lambda m: f"{m.group(1)}-{m.group(2).zfill(2)}-{m.group(3).zfill(2)}"),
            (r'(\d{1,2})-(\d{1,2})', lambda m: f"{datetime.now().year}-{m.group(1).zfill(2)}-{m.group(2).zfill(2)}"),
            (r'(\d{1,2})月(\d{1,2})日', lambda m: f"{datetime.now().year}-{m.group(1).zfill(2)}-{m.group(2).zfill(2)}"),
        ]
        
        for pattern, formatter in date_patterns:
            match = re.search(pattern, cleaned)
            if match:
                try:
                    return formatter(match)
                except:
                    continue
        
        return cleaned
    
    def _validate_listing_quality(self, listing: PropertyListing) -> bool:
        """
        验证房源数据质量
        Validate listing data quality
        
        Args:
            listing: PropertyListing对象
            
        Returns:
            bool: 数据质量是否合格
        """
        try:
            # 使用PropertyListing内置的质量检查
            if not listing.is_valid_data_quality():
                return False
            
            # 额外的质量检查
            # 检查标题长度
            if len(listing.title.strip()) < 3:
                self.logger.debug(f"Title too short: {listing.title}")
                return False
            
            # 检查URL有效性
            if not listing.detail_url or not listing.detail_url.startswith(('http://', 'https://')):
                self.logger.debug(f"Invalid URL: {listing.detail_url}")
                return False
            
            # 检查必要字段是否为空
            required_fields = ['district', 'rent_range', 'area_range', 'industry']
            for field in required_fields:
                value = getattr(listing, field, '')
                if not value or value.strip() == '':
                    self.logger.debug(f"Missing required field: {field}")
                    return False
            
            return True
            
        except Exception as e:
            self.logger.warning(f"Error validating listing quality: {e}")
            return False
    
    def get_parsing_stats(self) -> Dict[str, int]:
        """
        获取解析统计信息
        Get parsing statistics
        
        Returns:
            Dict[str, int]: 包含解析成功和失败数量的统计信息
        """
        return {
            'parsed_count': self.parsed_count,
            'error_count': self.error_count,
            'success_rate': round(self.parsed_count / max(self.parsed_count + self.error_count, 1) * 100, 2)
        }
    
    def reset_stats(self):
        """重置统计信息"""
        self.parsed_count = 0
        self.error_count = 0