# -*- coding: utf-8 -*-

import hashlib
import os
import re
import uuid
import mimetypes
from pathlib import Path
from urllib.parse import urljoin, urlparse, unquote
from typing import Optional, Tuple, List
import chardet
from bs4 import BeautifulSoup


class FileUtils:
    """文件处理工具类"""
    
    @staticmethod
    def get_sha256(url: str) -> str:
        """
        计算URL的SHA256哈希值
        
        Args:
            url: 原始URL，不经任何变换
            
        Returns:
            16进制小写字母形式的SHA256哈希值
        """
        # 确保URL是字符串且去除首尾空白字符
        clean_url = str(url).strip()
        # 使用UTF-8编码计算SHA256
        return hashlib.sha256(clean_url.encode('utf-8')).hexdigest().lower()
    
    @staticmethod
    def get_file_extension(url: str, content_type: Optional[str] = None) -> str:
        """
        获取文件扩展名
        
        Args:
            url: 文件URL
            content_type: HTTP Content-Type头
            
        Returns:
            文件扩展名（包含点号）
        """
        # 首先尝试从URL获取扩展名
        parsed_url = urlparse(url)
        path = unquote(parsed_url.path)
        _, ext = os.path.splitext(path)
        
        if ext:
            return ext.lower()
        
        # 如果URL没有扩展名，尝试从content_type推断
        if content_type:
            ext = mimetypes.guess_extension(content_type.split(';')[0].strip())
            if ext:
                return ext.lower()
        
        return ''
    
    @staticmethod
    def get_media_path(sha256_hash: str, file_type: str, extension: str) -> str:
        """
        生成媒体文件的存储路径
        
        Args:
            sha256_hash: SHA256哈希值
            file_type: 文件类型 (image/video/audio)
            extension: 文件扩展名
            
        Returns:
            相对路径
        """
        # 使用SHA256前两位作为子目录
        subdir = sha256_hash[:2]
        filename = sha256_hash + extension
        return f"{file_type}/{subdir}/{filename}"
    
    @staticmethod
    def create_directories(base_path: Path, site_name: str):
        """创建站点目录结构"""
        site_dir = base_path / site_name
        dirs_to_create = [
            site_dir,
            site_dir / "image",
            site_dir / "video", 
            site_dir / "audio",
            site_dir / "main_file",
            site_dir / "attachment_file",
            site_dir / "image_urls",
            site_dir / "video_urls",
            site_dir / "jsonl"
        ]
        
        for directory in dirs_to_create:
            directory.mkdir(parents=True, exist_ok=True)
        
        return site_dir


class ContentProcessor:
    """内容处理工具类"""
    
    @staticmethod
    def extract_media_urls(html: str, base_url: str) -> dict:
        """
        从HTML中提取媒体文件URLs
        
        Args:
            html: HTML内容
            base_url: 基础URL
            
        Returns:
            包含images, videos, audios, pdfs的字典
        """
        soup = BeautifulSoup(html, 'lxml')
        
        result = {
            'images': [],
            'videos': [],
            'audios': [],
            'main_files': [],
            'attachment_files': []
        }
        
        # 提取图片
        for img in soup.find_all(['img', 'image']):
            src = img.get('src') or img.get('data-src') or img.get('data-original')
            if src:
                full_url = urljoin(base_url, src)
                result['images'].append(full_url)
        
        # 提取视频
        for video in soup.find_all(['video', 'source']):
            src = video.get('src')
            if src:
                full_url = urljoin(base_url, src)
                result['videos'].append(full_url)
        
        # 提取YouTube视频
        for iframe in soup.find_all('iframe'):
            src = iframe.get('src')
            if src and 'youtube.com/embed/' in src:
                # 提取YouTube视频ID
                video_id = src.split('/embed/')[-1].split('?')[0]
                youtube_url = f"https://www.youtube.com/watch?v={video_id}"
                result['videos'].append(youtube_url)
            elif src and 'brightcove.net' in src:
                # 提取Brightcove视频URL
                result['videos'].append(src)
        
        # 提取音频
        for audio in soup.find_all(['audio', 'source']):
            src = audio.get('src')
            if src and ('audio' in audio.get('type', '') or 
                       any(ext in src.lower() for ext in ['.mp3', '.wav', '.ogg', '.m4a'])):
                full_url = urljoin(base_url, src)
                result['audios'].append(full_url)
        
        # 提取文件下载
        for link in soup.find_all('a', href=True):
            href = link['href']
            file_exts = ('.pdf', '.docx', '.doc', '.xls', '.xlsx', '.txt', '.pptx', '.ppt')
            if href.lower().endswith(file_exts):
                # 获取链接文本
                link_text = link.get_text(strip=True)
                
                # 获取文件名
                file_name = link_text if link_text else href.split('/')[-1]
                
                # 计算文件hash
                # 确保URL是字符串且去除首尾空白字符，与FileUtils.get_sha256保持一致
                clean_href = str(href).strip()
                file_hash = hashlib.sha256(clean_href.encode('utf-8')).hexdigest().lower()
                
                # 获取文件扩展名
                file_extension = '.' + href.split('.')[-1].lower()
                
                # 构建文件信息
                full_url = urljoin(base_url, href)
                # 改进附件判断逻辑，支持中英文
                attachment_keywords = ['附件', '附录', '附表', 'attachment', 'appendix', 'annex', 'supplement']
                is_attachment = any(kw.lower() in link_text.lower() for kw in attachment_keywords)
                file_info = {
                    "name": file_name,
                    "path": f"{'attachment_file' if is_attachment else 'main_file'}/{file_hash}{file_extension}",
                    "full_url": full_url
                }
                if is_attachment:
                    result['attachment_files'].append(file_info)
                else:
                    result['main_files'].append(file_info)
        # 去重 - 分别处理不同类型的字段
        for key in ['images', 'videos', 'audios']:
            result[key] = list(set(result[key]))
        
        # 对于字典列表，基于full_url去重
        for key in ['main_files', 'attachment_files']:
            seen_urls = set()
            unique_files = []
            for file_info in result[key]:
                if file_info['full_url'] not in seen_urls:
                    seen_urls.add(file_info['full_url'])
                    unique_files.append(file_info)
            result[key] = unique_files
        
        return result
    
    @staticmethod
    def process_content_with_media(html: str, base_url: str, site_name: str) -> Tuple[str, dict]:
        """
        处理图文交错内容，替换图片和视频为特殊标记
        
        Args:
            html: HTML内容
            base_url: 基础URL
            site_name: 站点名称
            
        Returns:
            (处理后的文本内容, 媒体URLs字典)
        """
        soup = BeautifulSoup(html, 'lxml')
        media_urls = ContentProcessor.extract_media_urls(html, base_url)
        
        # 替换图片标签
        for img in soup.find_all(['img', 'image']):
            src = img.get('src') or img.get('data-src') or img.get('data-original')
            if src:
                full_url = urljoin(base_url, src)
                sha256_hash = FileUtils.get_sha256(full_url)
                extension = FileUtils.get_file_extension(full_url)
                print(f'conv: {full_url} -> {sha256_hash}')
                
                # 创建特殊标记
                img_tag = f"<$<img>$>{sha256_hash}{extension}<$<\\img>$>"
                img.replace_with(img_tag)
        
        # 替换视频标签
        for video in soup.find_all(['video']):
            src = video.get('src')
            if src:
                full_url = urljoin(base_url, src)
                sha256_hash = FileUtils.get_sha256(full_url)
                extension = FileUtils.get_file_extension(full_url)
                
                # 创建特殊标记
                vid_tag = f"<$<vid>$>{sha256_hash}{extension}<$<\\vid>$>"
                video.replace_with(vid_tag)
        
        # 替换YouTube视频标签
        for iframe in soup.find_all('iframe'):
            src = iframe.get('src')
            if src and 'youtube.com/embed/' in src:
                # 提取YouTube视频ID
                video_id = src.split('/embed/')[-1].split('?')[0]
                youtube_url = f"https://www.youtube.com/watch?v={video_id}"
                sha256_hash = FileUtils.get_sha256(youtube_url)
                
                # 创建特殊标记
                vid_tag = f"<$<vid>$>{sha256_hash}.mp4<$<\\vid>$>"
                iframe.replace_with(vid_tag)
        
        # 提取纯文本并保持段落结构
        text_content = soup.get_text(separator='\n', strip=True)
        
        # 清理多余的换行符
        text_content = re.sub(r'\n+', '\n', text_content)
        
        return text_content, media_urls
    
    @staticmethod
    def detect_charset(content: bytes) -> str:
        """检测内容编码"""
        result = chardet.detect(content)
        return result.get('encoding', 'utf-8') or 'utf-8'
    
    @staticmethod
    def generate_track_id() -> str:
        """生成全局唯一ID"""
        return str(uuid.uuid4())
    
    @staticmethod
    def extract_title(html: str) -> str:
        """提取网页标题"""
        soup = BeautifulSoup(html, 'lxml')
        title_tag = soup.find('title')
        if title_tag:
            return title_tag.get_text(strip=True)
        return ''
    
    @staticmethod
    def extract_base_url(html: str, default_url: str) -> str:
        """提取base URL"""
        soup = BeautifulSoup(html, 'lxml')
        base_tag = soup.find('base', href=True)
        if base_tag:
            return base_tag['href']
        return default_url 
        # INSERT_YOUR_CODE
    @staticmethod
    def parse_flexible_date(date_str: str) -> str:
        """
        解析如 'July 29 • 2025' 格式的日期为 '2025-07-29'。
        支持常见英文月份，自动补零。
        """
        import re
        import calendar

        if not date_str or not isinstance(date_str, str):
            return ""

        # 匹配如 'July 29 • 2025' 或 'July 29, 2025'
        pattern = r'([A-Za-z]+)\s+(\d{1,2})\s*(?:[•,]|\s)*\s*(\d{4})'
        match = re.search(pattern, date_str)
        if not match:
            return ""

        month_str, day_str, year_str = match.groups()
        # 英文月份转数字
        try:
            month_num = list(calendar.month_name).index(month_str)
        except ValueError:
            # 可能是缩写
            try:
                month_num = list(calendar.month_abbr).index(month_str)
            except ValueError:
                return ""
        if month_num == 0:
            return ""

        day = int(day_str)
        year = int(year_str)
        return f"{year:04d}-{month_num:02d}-{day:02d}"
