"""
文本处理工具
包含文本预处理、提示词优化、多语言支持等功能
"""

import re
import json
import logging
from typing import List, Dict, Any, Optional, Tuple
from pathlib import Path
import numpy as np
from collections import Counter
import nltk
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import spacy
from transformers import pipeline, AutoTokenizer, AutoModel
import torch

logger = logging.getLogger(__name__)


class TextProcessor:
    """文本处理器"""
    
    def __init__(self, language='en'):
        self.language = language
        self.nlp = None
        self.tokenizer = None
        self.model = None
        
        # 初始化NLTK
        try:
            nltk.download('punkt', quiet=True)
            nltk.download('stopwords', quiet=True)
            nltk.download('wordnet', quiet=True)
            nltk.download('averaged_perceptron_tagger', quiet=True)
        except Exception as e:
            logger.warning(f"NLTK下载失败: {e}")
        
        # 初始化spaCy
        try:
            if language == 'en':
                self.nlp = spacy.load('en_core_web_sm')
            elif language == 'zh':
                self.nlp = spacy.load('zh_core_web_sm')
        except Exception as e:
            logger.warning(f"spaCy模型加载失败: {e}")
        
        # 初始化BERT
        try:
            if language == 'en':
                model_name = 'bert-base-uncased'
            elif language == 'zh':
                model_name = 'bert-base-chinese'
            else:
                model_name = 'bert-base-multilingual-cased'
            
            self.tokenizer = AutoTokenizer.from_pretrained(model_name)
            self.model = AutoModel.from_pretrained(model_name)
        except Exception as e:
            logger.warning(f"BERT模型加载失败: {e}")
        
        # 加载提示词模板
        self.prompt_templates = self._load_prompt_templates()
    
    def _load_prompt_templates(self) -> Dict[str, str]:
        """加载提示词模板"""
        templates = {
            "realistic": "realistic, high quality, detailed, photorealistic, 8k resolution",
            "cartoon": "cartoon style, animated, colorful, vibrant, clean lines",
            "anime": "anime style, Japanese animation, detailed, expressive, stylized",
            "oil_painting": "oil painting style, artistic, textured, brush strokes, masterpiece",
            "watercolor": "watercolor style, soft, flowing, transparent, artistic",
            "sketch": "sketch style, black and white, line art, pencil drawing, minimalist",
            "cyberpunk": "cyberpunk style, futuristic, neon, dystopian, high tech",
            "vintage": "vintage style, retro, old fashioned, nostalgic, classic",
            "fantasy": "fantasy style, magical, mystical, ethereal, dreamlike",
            "sci_fi": "sci-fi style, futuristic, space, technology, advanced",
            "horror": "horror style, dark, scary, atmospheric, creepy",
            "comedy": "comedy style, funny, humorous, lighthearted, cheerful",
            "drama": "drama style, emotional, intense, serious, powerful",
            "action": "action style, dynamic, energetic, fast-paced, exciting",
            "romance": "romance style, romantic, love, beautiful, tender"
        }
        return templates
    
    def clean_text(self, text: str) -> str:
        """清理文本"""
        # 移除多余空白
        text = re.sub(r'\s+', ' ', text.strip())
        
        # 移除特殊字符
        text = re.sub(r'[^\w\s.,!?;:()\-\'\"]', '', text)
        
        # 标准化引号
        text = text.replace('"', '"').replace('"', '"')
        text = text.replace(''', "'").replace(''', "'")
        
        return text
    
    def tokenize_text(self, text: str) -> List[str]:
        """分词"""
        if self.language == 'zh':
            # 中文分词
            if self.nlp:
                doc = self.nlp(text)
                return [token.text for token in doc]
            else:
                # 简单的中文分词
                return list(text)
        else:
            # 英文分词
            return word_tokenize(text)
    
    def remove_stopwords(self, tokens: List[str]) -> List[str]:
        """移除停用词"""
        try:
            if self.language == 'en':
                stop_words = set(stopwords.words('english'))
            elif self.language == 'zh':
                stop_words = set(['的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一', '一个', '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有', '看', '好', '自己', '这'])
            else:
                stop_words = set()
            
            return [token for token in tokens if token.lower() not in stop_words]
        except Exception as e:
            logger.warning(f"移除停用词失败: {e}")
            return tokens
    
    def lemmatize_tokens(self, tokens: List[str]) -> List[str]:
        """词形还原"""
        try:
            lemmatizer = WordNetLemmatizer()
            return [lemmatizer.lemmatize(token) for token in tokens]
        except Exception as e:
            logger.warning(f"词形还原失败: {e}")
            return tokens
    
    def extract_keywords(self, text: str, top_k: int = 10) -> List[Tuple[str, float]]:
        """提取关键词"""
        try:
            # 清理和分词
            clean_text = self.clean_text(text)
            tokens = self.tokenize_text(clean_text)
            tokens = self.remove_stopwords(tokens)
            tokens = self.lemmatize_tokens(tokens)
            
            # 计算词频
            word_freq = Counter(tokens)
            
            # 过滤短词
            word_freq = {word: freq for word, freq in word_freq.items() if len(word) > 2}
            
            # 排序并返回top_k
            keywords = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)[:top_k]
            
            # 计算权重
            total_freq = sum(freq for _, freq in keywords)
            weighted_keywords = [(word, freq/total_freq) for word, freq in keywords]
            
            return weighted_keywords
            
        except Exception as e:
            logger.error(f"关键词提取失败: {e}")
            return []
    
    def analyze_sentiment(self, text: str) -> Dict[str, float]:
        """情感分析"""
        try:
            if self.model and self.tokenizer:
                # 使用BERT进行情感分析
                inputs = self.tokenizer(text, return_tensors="pt", truncation=True, max_length=512)
                
                with torch.no_grad():
                    outputs = self.model(**inputs)
                    embeddings = outputs.last_hidden_state.mean(dim=1)
                
                # 简单的情感分析（这里可以替换为更复杂的情感分析模型）
                sentiment_score = torch.sigmoid(embeddings.mean()).item()
                
                return {
                    'positive': sentiment_score,
                    'negative': 1 - sentiment_score,
                    'neutral': 0.5
                }
            else:
                # 简单的情感分析
                positive_words = ['good', 'great', 'excellent', 'amazing', 'wonderful', 'beautiful', 'love', 'happy', 'joy']
                negative_words = ['bad', 'terrible', 'awful', 'horrible', 'hate', 'sad', 'angry', 'disappointed']
                
                text_lower = text.lower()
                positive_count = sum(1 for word in positive_words if word in text_lower)
                negative_count = sum(1 for word in negative_words if word in text_lower)
                
                total_words = len(text.split())
                if total_words > 0:
                    positive_score = positive_count / total_words
                    negative_score = negative_count / total_words
                    neutral_score = 1 - positive_score - negative_score
                else:
                    positive_score = negative_score = neutral_score = 0.33
                
                return {
                    'positive': positive_score,
                    'negative': negative_score,
                    'neutral': neutral_score
                }
                
        except Exception as e:
            logger.error(f"情感分析失败: {e}")
            return {'positive': 0.33, 'negative': 0.33, 'neutral': 0.34}
    
    def optimize_prompt(self, prompt: str, style: str = "realistic", 
                       enhance_quality: bool = True, add_details: bool = True) -> str:
        """优化提示词"""
        try:
            # 清理提示词
            optimized_prompt = self.clean_text(prompt)
            
            # 添加风格模板
            if style in self.prompt_templates:
                style_template = self.prompt_templates[style]
                optimized_prompt = f"{optimized_prompt}, {style_template}"
            
            # 增强质量
            if enhance_quality:
                quality_enhancers = [
                    "high quality", "detailed", "sharp focus", "professional",
                    "masterpiece", "best quality", "ultra detailed"
                ]
                optimized_prompt = f"{optimized_prompt}, {', '.join(quality_enhancers[:3])}"
            
            # 添加细节
            if add_details:
                # 提取关键词
                keywords = self.extract_keywords(prompt, top_k=5)
                if keywords:
                    detail_words = [word for word, _ in keywords if len(word) > 3]
                    if detail_words:
                        optimized_prompt = f"{optimized_prompt}, {', '.join(detail_words[:3])}"
            
            return optimized_prompt
            
        except Exception as e:
            logger.error(f"提示词优化失败: {e}")
            return prompt
    
    def translate_prompt(self, prompt: str, target_language: str) -> str:
        """翻译提示词"""
        try:
            # 这里可以集成翻译API，如Google Translate或百度翻译
            # 目前返回原提示词
            logger.info(f"翻译提示词到 {target_language}: {prompt}")
            return prompt
            
        except Exception as e:
            logger.error(f"提示词翻译失败: {e}")
            return prompt
    
    def generate_variations(self, prompt: str, num_variations: int = 3) -> List[str]:
        """生成提示词变体"""
        try:
            variations = []
            
            # 提取关键词
            keywords = self.extract_keywords(prompt, top_k=5)
            
            # 生成变体
            for i in range(num_variations):
                # 替换同义词
                variation = prompt
                for word, _ in keywords[:3]:
                    # 这里可以添加同义词词典
                    synonyms = self._get_synonyms(word)
                    if synonyms:
                        variation = variation.replace(word, synonyms[i % len(synonyms)])
                
                # 调整描述
                if i == 1:
                    variation = f"close-up of {variation}"
                elif i == 2:
                    variation = f"wide shot of {variation}"
                
                variations.append(variation)
            
            return variations
            
        except Exception as e:
            logger.error(f"生成提示词变体失败: {e}")
            return [prompt] * num_variations
    
    def _get_synonyms(self, word: str) -> List[str]:
        """获取同义词"""
        # 简单的同义词词典
        synonyms_dict = {
            'cat': ['feline', 'kitten', 'kitty'],
            'dog': ['canine', 'puppy', 'hound'],
            'beautiful': ['gorgeous', 'stunning', 'lovely'],
            'big': ['large', 'huge', 'enormous'],
            'small': ['tiny', 'little', 'miniature'],
            'happy': ['joyful', 'cheerful', 'delighted'],
            'sad': ['unhappy', 'melancholy', 'sorrowful'],
            'fast': ['quick', 'rapid', 'swift'],
            'slow': ['sluggish', 'leisurely', 'gradual']
        }
        
        return synonyms_dict.get(word.lower(), [])
    
    def validate_prompt(self, prompt: str) -> Dict[str, Any]:
        """验证提示词"""
        try:
            validation_result = {
                'is_valid': True,
                'length': len(prompt),
                'word_count': len(prompt.split()),
                'issues': [],
                'suggestions': []
            }
            
            # 检查长度
            if len(prompt) < 10:
                validation_result['issues'].append('提示词太短')
                validation_result['suggestions'].append('添加更多描述性词汇')
            
            if len(prompt) > 500:
                validation_result['issues'].append('提示词太长')
                validation_result['suggestions'].append('简化描述，保留关键信息')
            
            # 检查词汇多样性
            words = prompt.split()
            unique_words = set(words)
            diversity_ratio = len(unique_words) / len(words) if words else 0
            
            if diversity_ratio < 0.5:
                validation_result['issues'].append('词汇重复较多')
                validation_result['suggestions'].append('使用更多不同的描述词')
            
            # 检查是否包含关键元素
            key_elements = ['subject', 'action', 'style', 'setting']
            missing_elements = []
            
            if not any(word in prompt.lower() for word in ['cat', 'dog', 'person', 'object']):
                missing_elements.append('subject')
            
            if not any(word in prompt.lower() for word in ['running', 'sitting', 'flying', 'walking']):
                missing_elements.append('action')
            
            if missing_elements:
                validation_result['issues'].append(f'缺少关键元素: {", ".join(missing_elements)}')
                validation_result['suggestions'].append('添加主体、动作、风格等描述')
            
            # 更新有效性
            validation_result['is_valid'] = len(validation_result['issues']) == 0
            
            return validation_result
            
        except Exception as e:
            logger.error(f"提示词验证失败: {e}")
            return {'is_valid': False, 'issues': ['验证过程出错'], 'suggestions': ['请检查提示词格式']}
    
    def batch_process_prompts(self, prompts: List[str], 
                            style: str = "realistic",
                            optimize: bool = True) -> List[str]:
        """批量处理提示词"""
        try:
            processed_prompts = []
            
            for i, prompt in enumerate(prompts):
                logger.info(f"处理提示词 {i+1}/{len(prompts)}")
                
                # 验证提示词
                validation = self.validate_prompt(prompt)
                
                if validation['is_valid']:
                    # 优化提示词
                    if optimize:
                        processed_prompt = self.optimize_prompt(prompt, style)
                    else:
                        processed_prompt = prompt
                    
                    processed_prompts.append(processed_prompt)
                else:
                    logger.warning(f"提示词 {i+1} 验证失败: {validation['issues']}")
                    # 仍然添加原提示词，但记录问题
                    processed_prompts.append(prompt)
            
            return processed_prompts
            
        except Exception as e:
            logger.error(f"批量处理提示词失败: {e}")
            return prompts


class PromptTemplate:
    """提示词模板管理器"""
    
    def __init__(self):
        self.templates = {
            "character": {
                "description": "角色描述模板",
                "template": "A {age} {gender} {character_type} with {appearance} wearing {clothing}",
                "variables": ["age", "gender", "character_type", "appearance", "clothing"]
            },
            "scene": {
                "description": "场景描述模板",
                "template": "A {setting} with {objects} in {lighting} lighting",
                "variables": ["setting", "objects", "lighting"]
            },
            "action": {
                "description": "动作描述模板",
                "template": "A {subject} {action} in a {environment}",
                "variables": ["subject", "action", "environment"]
            },
            "emotion": {
                "description": "情感描述模板",
                "template": "A {subject} with {emotion} expression in {context}",
                "variables": ["subject", "emotion", "context"]
            }
        }
    
    def fill_template(self, template_name: str, variables: Dict[str, str]) -> str:
        """填充模板"""
        if template_name not in self.templates:
            raise ValueError(f"未知模板: {template_name}")
        
        template = self.templates[template_name]["template"]
        
        try:
            return template.format(**variables)
        except KeyError as e:
            raise ValueError(f"缺少变量: {e}")
    
    def get_template_info(self, template_name: str) -> Dict[str, Any]:
        """获取模板信息"""
        if template_name not in self.templates:
            raise ValueError(f"未知模板: {template_name}")
        
        return self.templates[template_name]
    
    def list_templates(self) -> List[str]:
        """列出所有模板"""
        return list(self.templates.keys())


if __name__ == "__main__":
    # 测试代码
    processor = TextProcessor()
    template_manager = PromptTemplate()
    
    # 测试文本处理
    test_text = "A beautiful cat is playing in the garden with colorful flowers"
    
    # 关键词提取
    keywords = processor.extract_keywords(test_text)
    print(f"关键词: {keywords}")
    
    # 情感分析
    sentiment = processor.analyze_sentiment(test_text)
    print(f"情感分析: {sentiment}")
    
    # 提示词优化
    optimized = processor.optimize_prompt(test_text, style="cartoon")
    print(f"优化后的提示词: {optimized}")
    
    # 提示词验证
    validation = processor.validate_prompt(test_text)
    print(f"验证结果: {validation}")
    
    # 模板使用
    character_prompt = template_manager.fill_template("character", {
        "age": "young",
        "gender": "female",
        "character_type": "wizard",
        "appearance": "long silver hair and blue eyes",
        "clothing": "flowing purple robes"
    })
    print(f"角色模板: {character_prompt}") 