"""文件信息数据模型"""

from pydantic import BaseModel, Field, field_validator, ConfigDict
from pathlib import Path
from datetime import datetime
from typing import List, Optional, Dict, Any
import hashlib


class FileInfo(BaseModel):
    """文件信息模型"""
    path: Path = Field(..., description="文件路径")
    name: str = Field(..., description="文件名")
    extension: str = Field(..., description="文件扩展名")
    size: int = Field(..., description="文件大小(字节)")
    created_time: datetime = Field(..., description="创建时间")
    modified_time: datetime = Field(..., description="修改时间")
    keywords: List[str] = Field(default_factory=list, description="提取的关键词")
    file_hash: Optional[str] = Field(None, description="文件哈希值")
    mime_type: Optional[str] = Field(None, description="MIME类型")
    metadata: Dict[str, Any] = Field(default_factory=dict, description="额外元数据")

    model_config = ConfigDict(
        json_encoders={
            Path: str,
            datetime: lambda v: v.isoformat()
        }
    )

    @field_validator('extension', mode='before')
    @classmethod
    def normalize_extension(cls, v):
        """标准化文件扩展名"""
        if not v.startswith('.'):
            v = '.' + v
        return v.lower()

    @field_validator('name')
    @classmethod
    def validate_name(cls, v):
        """验证文件名"""
        if not v or len(v.strip()) == 0:
            raise ValueError("文件名不能为空")
        return v.strip()

    @classmethod
    def from_path(cls, file_path: Path) -> "FileInfo":
        """从文件路径创建FileInfo对象"""
        if not file_path.exists():
            raise FileNotFoundError(f"文件不存在: {file_path}")
        
        stat = file_path.stat()
        return cls(
            path=file_path,
            name=file_path.name,
            extension=file_path.suffix or '.unknown',
            size=stat.st_size,
            created_time=datetime.fromtimestamp(stat.st_ctime),
            modified_time=datetime.fromtimestamp(stat.st_mtime)
        )

    def calculate_hash(self, algorithm: str = 'md5') -> str:
        """计算文件哈希值"""
        if not self.path.exists():
            raise FileNotFoundError(f"文件不存在: {self.path}")
        
        hash_obj = hashlib.new(algorithm)
        with open(self.path, 'rb') as f:
            for chunk in iter(lambda: f.read(4096), b""):
                hash_obj.update(chunk)
        
        self.file_hash = hash_obj.hexdigest()
        return self.file_hash

    def get_category_by_extension(self) -> str:
        """根据扩展名获取文件类别"""
        extension_map = {
            'documents': ['.pdf', '.docx', '.doc', '.txt', '.md', '.rtf'],
            'images': ['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.svg'],
            'videos': ['.mp4', '.avi', '.mkv', '.mov', '.wmv', '.flv'],
            'audio': ['.mp3', '.wav', '.flac', '.aac', '.ogg'],
            'archives': ['.zip', '.rar', '.7z', '.tar', '.gz'],
            'code': ['.py', '.js', '.html', '.css', '.java', '.cpp', '.c'],
            'data': ['.xlsx', '.csv', '.json', '.xml', '.sql']
        }
        
        for category, extensions in extension_map.items():
            if self.extension in extensions:
                return category
        return 'other'

    def is_text_file(self) -> bool:
        """判断是否为文本文件"""
        text_extensions = [
            '.txt', '.md', '.py', '.js', '.html', '.css', '.json', 
            '.xml', '.csv', '.log', '.ini', '.cfg', '.yaml', '.yml'
        ]
        return self.extension in text_extensions