"""AI文件分析器 - 集成大模型的智能分析"""

import asyncio
import time
from typing import List, Dict, Any, Optional
from pathlib import Path
import jieba
import re
from datetime import datetime

from ..models.file_info import FileInfo
from ..models.directory_rule import DirectoryRule, AnalysisContext
from ..models.config import LLMConfig, UserPreferences
from .llm_client import LLMClient
from .prompt_engine import PromptEngine
from ..utils.logger import get_logger
from ..utils.database import Database


class AIAnalyzer:
    """AI文件分析器"""
    
    def __init__(self, llm_config: LLMConfig, 
                 user_preferences: Optional[UserPreferences] = None,
                 database: Optional[Database] = None):
        """初始化AI分析器"""
        self.llm_config = llm_config
        self.user_preferences = user_preferences or UserPreferences()
        self.database = database
        self.logger = get_logger()
        
        # 初始化组件
        self.llm_client = LLMClient(llm_config)
        self.prompt_engine = PromptEngine(user_preferences)
        
        # 初始化jieba分词
        self._init_jieba()
        
        # 加载默认规则
        self._load_default_rules()
    
    def _init_jieba(self) -> None:
        """初始化jieba分词器"""
        try:
            # 预加载词典，提高后续分词速度
            jieba.initialize()
            
            # 添加自定义词汇
            custom_words = [
                '文件归纳', '项目管理', '会议纪要', '工作报告',
                '学习资料', '技术文档', '数据分析', '软件开发'
            ]
            
            for word in custom_words:
                jieba.add_word(word)
                
            self.logger.info("jieba分词器初始化完成")
            
        except Exception as e:
            self.logger.error(f"jieba初始化失败: {e}")
    
    def _load_default_rules(self) -> None:
        """加载默认分类规则"""
        self.default_rules = {
            'documents': {
                'keywords': ['文档', '报告', '合同', '方案', '总结', '计划'],
                'extensions': ['.pdf', '.docx', '.doc', '.txt', '.md'],
                'category': '工作文档'
            },
            'study': {
                'keywords': ['教程', '学习', '课程', '笔记', '资料', '书籍'],
                'extensions': ['.pdf', '.epub', '.mobi'],
                'category': '学习资料'
            },
            'images': {
                'keywords': ['照片', '图片', '截图', '设计'],
                'extensions': ['.jpg', '.jpeg', '.png', '.gif', '.bmp'],
                'category': '图片文件'
            },
            'videos': {
                'keywords': ['视频', '录像', '影片', '电影'],
                'extensions': ['.mp4', '.avi', '.mkv', '.mov'],
                'category': '视频文件'
            },
            'code': {
                'keywords': ['代码', '程序', '脚本', '开发'],
                'extensions': ['.py', '.js', '.html', '.css', '.java'],
                'category': '代码文件'
            }
        }
    
    async def analyze_file(self, file_info: FileInfo, 
                          context: AnalysisContext) -> DirectoryRule:
        """分析单个文件"""
        results = await self.analyze_files([file_info], context)
        return results[0] if results else self._create_fallback_rule(file_info)
    
    async def analyze_files(self, files: List[FileInfo], 
                           context: AnalysisContext) -> List[DirectoryRule]:
        """分析多个文件"""
        if not files:
            return []
        
        self.logger.info(f"开始分析文件", file_count=len(files), 
                        has_description=bool(context.user_description))
        
        # 预处理：提取关键词
        self._extract_keywords_for_files(files)
        
        # 检查缓存
        cached_rules = await self._check_cache(files, context)
        if cached_rules:
            return cached_rules
        
        try:
            # 使用LLM分析
            rules = await self.llm_client.analyze_files(files, context)
            
            # 后处理：验证和优化结果
            validated_rules = self._validate_and_enhance_rules(rules, files, context)
            
            # 缓存结果
            await self._cache_results(files, context, validated_rules)
            
            # 学习用户偏好
            self._learn_from_results(validated_rules, context)
            
            return validated_rules
            
        except Exception as e:
            self.logger.error(f"LLM分析失败，使用降级方案: {e}")
            return self._fallback_analysis(files, context)
    
    def _extract_keywords_for_files(self, files: List[FileInfo]) -> None:
        """为文件提取关键词"""
        for file_info in files:
            if not file_info.keywords:
                file_info.keywords = self.extract_keywords(file_info.name)
    
    def extract_keywords(self, filename: str) -> List[str]:
        """提取文件名关键词"""
        # 移除文件扩展名
        name_without_ext = Path(filename).stem
        
        # 清理文件名
        cleaned_name = re.sub(r'[^\w\s\u4e00-\u9fff]', ' ', name_without_ext)
        
        # jieba分词
        words = list(jieba.cut(cleaned_name))
        
        # 过滤和清理
        keywords = []
        stop_words = {'的', '了', '和', '与', '及', '或', '但', '而'}
        
        for word in words:
            word = word.strip()
            if (len(word) > 1 and 
                word not in stop_words and 
                not word.isdigit()):
                keywords.append(word)
        
        # 提取英文关键词
        english_words = re.findall(r'[A-Za-z]+', name_without_ext)
        keywords.extend([w for w in english_words if len(w) > 2])
        
        # 提取日期信息
        date_patterns = [
            r'(\d{4})[年\-](\d{1,2})[月\-](\d{1,2})',  # 2024年01月15日
            r'(\d{4})(\d{2})(\d{2})',                    # 20240115
            r'(\d{1,2})[月\-](\d{1,2})[日]?'           # 01月15日
        ]
        
        for pattern in date_patterns:
            if re.search(pattern, name_without_ext):
                keywords.append('日期相关')
                break
        
        return list(set(keywords))  # 去重
    
    async def _check_cache(self, files: List[FileInfo], 
                          context: AnalysisContext) -> Optional[List[DirectoryRule]]:
        """检查分析缓存"""
        if not self.database:
            return None
        
        try:
            # 计算文件哈希
            for file_info in files:
                if not file_info.file_hash:
                    file_info.calculate_hash()
            
            # 检查缓存
            cached_rules = []
            for file_info in files:
                cached_result = self.database.get_analysis_cache(file_info.file_hash)
                if cached_result:
                    rule = DirectoryRule(**cached_result)
                    cached_rules.append(rule)
                else:
                    return None  # 如果任何文件没有缓存，重新分析所有文件
            
            return cached_rules
            
        except Exception as e:
            self.logger.warning(f"缓存检查失败: {e}")
            return None
    
    async def _cache_results(self, files: List[FileInfo], 
                           context: AnalysisContext,
                           rules: List[DirectoryRule]) -> None:
        """缓存分析结果"""
        if not self.database:
            return
        
        try:
            for file_info, rule in zip(files, rules):
                if file_info.file_hash:
                    result_dict = rule.to_dict()
                    self.database.save_analysis_cache(
                        file_hash=file_info.file_hash,
                        file_name=file_info.name,
                        result=result_dict,
                        ttl=self.llm_config.cache_ttl
                    )
        except Exception as e:
            self.logger.warning(f"缓存保存失败: {e}")
    
    def _validate_and_enhance_rules(self, rules: List[DirectoryRule], 
                                  files: List[FileInfo],
                                  context: AnalysisContext) -> List[DirectoryRule]:
        """验证和增强规则"""
        enhanced_rules = []
        
        for rule, file_info in zip(rules, files):
            # 验证置信度
            if not 0.0 <= rule.confidence <= 1.0:
                rule.confidence = max(0.0, min(1.0, rule.confidence))
            
            # 验证路径合法性
            if not self._is_valid_path(rule.suggested_path):
                rule.suggested_path = self._create_safe_path(file_info, rule.category)
                rule.confidence = max(0.3, rule.confidence - 0.2)
                rule.reasoning += " (路径已调整为安全路径)"
            
            # 增强关键词
            if not rule.keywords:
                rule.keywords = file_info.keywords
            
            # 添加时间信息
            if self._contains_date_info(file_info.name) or rule.date_based:
                rule.date_based = True
                rule.suggested_path = self._add_date_to_path(rule.suggested_path, file_info)
            
            enhanced_rules.append(rule)
        
        return enhanced_rules
    
    def _is_valid_path(self, path: Path) -> bool:
        """验证路径是否合法"""
        try:
            # 检查路径长度
            if len(str(path)) > 250:
                return False
            
            # 检查非法字符
            invalid_chars = ['<', '>', ':', '"', '|', '?', '*']
            if any(char in str(path) for char in invalid_chars):
                return False
            
            # 检查层级深度
            if len(path.parts) > 5:
                return False
            
            return True
            
        except Exception:
            return False
    
    def _create_safe_path(self, file_info: FileInfo, category: str) -> Path:
        """创建安全路径"""
        safe_category = re.sub(r'[^\w\s\u4e00-\u9fff]', '', category) or '其他'
        safe_filename = re.sub(r'[^\w\s\u4e00-\u9fff\.]', '_', file_info.name)
        
        return Path(safe_category) / safe_filename
    
    def _contains_date_info(self, filename: str) -> bool:
        """检查文件名是否包含日期信息"""
        date_patterns = [
            r'\d{4}[年\-]\d{1,2}[月\-]\d{1,2}',
            r'\d{4}\d{2}\d{2}',
            r'\d{1,2}[月\-]\d{1,2}[日]?'
        ]
        
        return any(re.search(pattern, filename) for pattern in date_patterns)
    
    def _add_date_to_path(self, path: Path, file_info: FileInfo) -> Path:
        """为路径添加日期信息"""
        # 提取文件名中的日期
        date_match = re.search(r'(\d{4})[年\-]?(\d{1,2})[月\-]?(\d{1,2})?', file_info.name)
        
        if date_match:
            year = date_match.group(1)
            month = date_match.group(2).zfill(2)
            
            # 根据用户偏好格式化日期
            if self.user_preferences.date_format == "YYYY年MM月":
                date_folder = f"{year}年{month}月"
            else:
                date_folder = f"{year}-{month}"
            
            # 插入日期文件夹
            parts = list(path.parts)
            if len(parts) > 1:
                parts.insert(-1, date_folder)
            else:
                parts.insert(0, date_folder)
            
            return Path(*parts)
        
        return path
    
    def _fallback_analysis(self, files: List[FileInfo], 
                          context: AnalysisContext) -> List[DirectoryRule]:
        """降级分析方案"""
        rules = []
        
        for file_info in files:
            # 基于扩展名的简单分类
            category = self._classify_by_extension(file_info)
            
            # 基于关键词的分类
            keyword_category = self._classify_by_keywords(file_info.keywords)
            if keyword_category:
                category = keyword_category
            
            # 生成路径
            if category == '工作文档':
                suggested_path = Path(f"工作文档/{file_info.name}")
            elif category == '学习资料':
                suggested_path = Path(f"学习资料/{file_info.name}")
            else:
                suggested_path = Path(f"{category}/{file_info.name}")
            
            rule = DirectoryRule(
                category=category,
                suggested_path=suggested_path,
                confidence=0.6,
                keywords=file_info.keywords,
                reasoning=f"基于文件扩展名和关键词的规则分类：{category}"
            )
            
            rules.append(rule)
        
        return rules
    
    def _classify_by_extension(self, file_info: FileInfo) -> str:
        """基于扩展名分类"""
        for rule_name, rule_data in self.default_rules.items():
            if file_info.extension in rule_data['extensions']:
                return rule_data['category']
        
        return '其他文件'
    
    def _classify_by_keywords(self, keywords: List[str]) -> Optional[str]:
        """基于关键词分类"""
        for rule_name, rule_data in self.default_rules.items():
            rule_keywords = rule_data['keywords']
            if any(keyword in keywords for keyword in rule_keywords):
                return rule_data['category']
        
        return None
    
    def _create_fallback_rule(self, file_info: FileInfo) -> DirectoryRule:
        """创建默认规则"""
        return DirectoryRule(
            category='其他',
            suggested_path=Path(f"其他/{file_info.name}"),
            confidence=0.3,
            keywords=file_info.keywords,
            reasoning="默认分类"
        )
    
    def _learn_from_results(self, rules: List[DirectoryRule], 
                           context: AnalysisContext) -> None:
        """从结果中学习用户偏好"""
        # 这里可以实现学习逻辑，比如：
        # - 记录用户经常确认的分类模式
        # - 调整关键词权重
        # - 更新用户偏好设置
        pass
    
    async def analyze_batch(self, files: List[FileInfo], 
                           context: AnalysisContext) -> List[DirectoryRule]:
        """批量分析优化"""
        context.batch_mode = True
        
        # 如果文件数量大，分批处理
        batch_size = 10
        if len(files) <= batch_size:
            return await self.analyze_files(files, context)
        
        # 分批处理
        all_rules = []
        for i in range(0, len(files), batch_size):
            batch = files[i:i + batch_size]
            batch_rules = await self.analyze_files(batch, context)
            all_rules.extend(batch_rules)
            
            # 批次间短暂延迟，避免API限制
            await asyncio.sleep(0.1)
        
        return all_rules
    
    def _rule_based_analysis(self, files: List[FileInfo]) -> List[DirectoryRule]:
        """基于规则的分析（测试和回退用）"""
        rules = []
        
        for file_info in files:
            # 基于扩展名的简单分类
            category = self._classify_by_extension(file_info)
            
            # 基于关键词的分类
            keyword_category = self._classify_by_keywords(file_info.keywords or [])
            if keyword_category:
                category = keyword_category
            
            # 生成路径
            if category == '工作文档':
                suggested_path = Path(f"工作文档")
            elif category == '学习资料':
                suggested_path = Path(f"学习资料")
            elif category == '个人文件':
                suggested_path = Path(f"个人文件")
            else:
                suggested_path = Path(f"{category}")
            
            # 检查是否是图片文件
            if file_info.extension.lower() in ['.jpg', '.jpeg', '.png', '.gif']:
                suggested_path = Path("个人文件/照片")
                category = "图片"
            # 检查是否是代码文件
            elif file_info.extension.lower() in ['.py', '.js', '.java', '.cpp', '.c']:
                suggested_path = Path("工作文档/项目代码")
                category = "代码"
            
            rule = DirectoryRule(
                category=category,
                suggested_path=suggested_path,
                confidence=0.6,
                keywords=file_info.keywords or [],
                reasoning=f"基于规则分析的{category}分类"
            )
            
            rules.append(rule)
        
        return rules
    
    def _build_analysis_prompt(self, files: List[FileInfo], context: AnalysisContext) -> str:
        """构建分析提示"""
        prompt_parts = []
        
        # 系统提示
        prompt_parts.append("请分析以下文件并建议归档路径：")
        prompt_parts.append("")
        
        # 文件列表
        prompt_parts.append("文件列表：")
        for i, file_info in enumerate(files, 1):
            prompt_parts.append(f"{i}. {file_info.name}")
            if file_info.keywords:
                prompt_parts.append(f"   关键词: {', '.join(file_info.keywords)}")
        
        prompt_parts.append("")
        
        # 用户描述
        if context.user_description:
            prompt_parts.append(f"用户描述: {context.user_description}")
            prompt_parts.append("")
        
        # 现有结构
        if context.existing_structure:
            prompt_parts.append("现有目录结构：")
            for structure in context.existing_structure:
                prompt_parts.append(f"- {structure}")
            prompt_parts.append("")
        
        prompt_parts.append("请为每个文件提供归档建议。")
        
        return "\n".join(prompt_parts)
    
    def _extract_keywords(self, files: List[FileInfo]) -> List[str]:
        """提取文件关键词"""
        all_keywords = []
        
        for file_info in files:
            keywords = self.extract_keywords(file_info.name)
            all_keywords.extend(keywords)
        
        # 去重并返回
        return list(set(all_keywords))