import torch
import numpy as np
from PIL import Image
import io
import base64
import requests
import json
import folder_paths
import comfy.model_management
import re

class GeminiPoseAnalyzer:
    """
    Detailed Pose Description Generator for OpenPose Enhancement
    Generates comprehensive pose descriptions for multiple people
    """
    
    def __init__(self):
        pass
    
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "image": ("IMAGE",),
                "api_key": ("STRING", {
                    "multiline": False,
                    "default": "YOUR_GEMINI_API_KEY_HERE"
                }),
                "analysis_depth": (["ultra_detailed", "detailed", "standard"], {
                    "default": "detailed"
                }),
                "focus_areas": (["full_body", "upper_body", "hands_focus", "face_focus"], {
                    "default": "full_body"
                }),
                "pose_style": (["natural", "dynamic", "static", "artistic"], {
                    "default": "natural"
                }),
                "language": ([
                    "japanese", "english", "chinese", "korean", "spanish", "french", 
                    "german", "russian", "italian", "portuguese", "dutch", "swedish",
                    "norwegian", "danish", "finnish", "polish", "czech", "hungarian",
                    "turkish", "arabic", "hindi", "thai"
                ], {
                    "default": "japanese"
                }),
            },
            "optional": {
                "existing_prompt": ("STRING", {
                    "multiline": True,
                    "default": "",
                    "forceInput": True
                }),
            }
        }
    
    RETURN_TYPES = ("STRING", "STRING")
    RETURN_NAMES = ("english_prompt", "trans_text")
    FUNCTION = "analyze_pose"
    CATEGORY = "image/analysis"
    
    def image_to_base64(self, image_tensor):
        """Convert ComfyUI image tensor to base64 string"""
        if len(image_tensor.shape) == 4:
            image_tensor = image_tensor.squeeze(0)
        
        image_np = (image_tensor.cpu().numpy() * 255).astype(np.uint8)
        pil_image = Image.fromarray(image_np)
        
        buffer = io.BytesIO()
        pil_image.save(buffer, format='JPEG', quality=98)
        image_base64 = base64.b64encode(buffer.getvalue()).decode('utf-8')
        
        return image_base64
    
    def get_translation_keyword(self, language):
        """Get translation keyword for each language"""
        keywords = {
            "japanese": "[日本語翻訳]",
            "chinese": "[中文翻译]", 
            "korean": "[한국어번역]",
            "spanish": "[Traducción española]",
            "french": "[Traduction française]",
            "german": "[Deutsche Übersetzung]",
            "russian": "[Русский перевод]",
            "italian": "[Traduzione italiana]",
            "portuguese": "[Tradução portuguesa]",
            "dutch": "[Nederlandse vertaling]",
            "swedish": "[Svensk översättning]",
            "norwegian": "[Norsk oversettelse]",
            "danish": "[Dansk oversættelse]",
            "finnish": "[Suomenkielinen käännös]",
            "polish": "[Polskie tłumaczenie]",
            "czech": "[Český překlad]",
            "hungarian": "[Magyar fordítás]",
            "turkish": "[Türkçe çeviri]",
            "arabic": "[الترجمة العربية]",
            "hindi": "[हिंदी अनुवाद]",
            "thai": "[การแปลภาษาไทย]"
        }
        return keywords.get(language, "[Translation]")

    def get_language_name(self, language):
        """Get native language name"""
        names = {
            "japanese": "日本語",
            "english": "English", 
            "chinese": "中文",
            "korean": "한국어",
            "spanish": "Español",
            "french": "Français",
            "german": "Deutsch",
            "russian": "Русский",
            "italian": "Italiano",
            "portuguese": "Português",
            "dutch": "Nederlands",
            "swedish": "Svenska",
            "norwegian": "Norsk",
            "danish": "Dansk",
            "finnish": "Suomi",
            "polish": "Polski",
            "czech": "Čeština",
            "hungarian": "Magyar",
            "turkish": "Türkçe",
            "arabic": "العربية",
            "hindi": "हिंदी",
            "thai": "ไทย"
        }
        return names.get(language, language.title())

    def create_detailed_pose_prompt(self, analysis_depth, focus_areas, pose_style, language):
        """Create detailed pose description prompt for multiple languages"""
        
        translation_keyword = self.get_translation_keyword(language)
        lang_name = self.get_language_name(language)
        
        # Base universal instruction in English (Gemini understands English best)
        base_instruction = f"""Create detailed pose descriptions for ALL people in this image. This is for OpenPose enhancement, describing each person's body orientation, weight distribution, limb placement, and gaze direction accurately.

🔥 **ABSOLUTE RULES** 🔥
1. Count ALL people accurately (single or multiple)
2. Clearly describe positional relationships between people
3. Detail each body part (head, torso, arms, hands, legs, feet) precisely
4. No introductory text - only pure pose descriptions
5. Use {translation_keyword} as keyword to separate English and {lang_name} versions

📋 **Required Description Elements**

【Overall Composition】
- Total number of people and their relative positions
- Basic orientation and posture of each person

【Individual Details (per person)】
- Body direction: front facing, three-quarter view, side profile, back view
- Weight distribution: weight on left/right leg, balanced stance
- Head: head position, gaze direction, head tilt
- Torso: torso angle, chest direction, spine curve
- Right arm: shoulder position, elbow bend, hand placement
- Left arm: shoulder position, elbow bend, hand placement
- Right leg: leg position, knee bend, foot placement
- Left leg: leg position, knee bend, foot placement
- Hand states: finger position, hand gestures, touching points

===OUTPUT FORMAT===

Single person:
[Detailed English pose description sentence]
{translation_keyword}[Detailed {lang_name} pose description sentence]

Multiple people:
[Number] people positioned [positional relationship], 
left position: [detailed English pose description], 
center position: [detailed English pose description], 
right position: [detailed English pose description]
{translation_keyword}[Number] people positioned [positional relationship in {lang_name}], left position: [detailed {lang_name} description], center position: [detailed {lang_name} description], right position: [detailed {lang_name} description]

**Important**: 
- Use "left position", "center position", "right position" for positioning
- Distinguish left/right clearly: "right arm", "left arm", "right leg", "left leg"
- Include specific angles and distances (slightly, moderately, fully, etc.)
- Do NOT use "===" markers
- No explanatory text - only pure descriptive sentences
- NEVER use words like figure, mannequin, doll that suggest artificial forms
- Translate naturally and accurately to {lang_name}"""
        
        return base_instruction
    
    def call_gemini_api(self, api_key, image_base64, prompt):
        """Call Gemini API for detailed pose analysis"""
        url = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key={api_key}"
        
        payload = {
            "contents": [
                {
                    "parts": [
                        {
                            "text": prompt
                        },
                        {
                            "inline_data": {
                                "mime_type": "image/jpeg",
                                "data": image_base64
                            }
                        }
                    ]
                }
            ],
            "generationConfig": {
                "temperature": 0.1,       # Low for consistent descriptions
                "topK": 40,               # Allow more vocabulary for descriptions
                "topP": 0.8,              # Good balance for detailed text
                "maxOutputTokens": 2000,  # More tokens for detailed descriptions
            },
            "safetySettings": [
                {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_ONLY_HIGH"},
                {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_ONLY_HIGH"},
                {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_ONLY_HIGH"},
                {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_ONLY_HIGH"}
            ]
        }
        
        headers = {"Content-Type": "application/json"}
        
        try:
            response = requests.post(url, json=payload, headers=headers, timeout=60)
            response.raise_for_status()
            
            result = response.json()
            
            if 'candidates' in result and len(result['candidates']) > 0:
                content = result['candidates'][0]['content']['parts'][0]['text']
                return content.strip()
            else:
                return "pose analysis failed - no candidates"
                
        except requests.exceptions.RequestException as e:
            print(f"🚨 Gemini API Error: {str(e)}")
            return "API connection error"
        except Exception as e:
            print(f"🚨 Error processing Gemini response: {str(e)}")
            return "processing error"
    
    def process_pose_description(self, raw_output):
        """Process the detailed pose description"""
        print(f"🔍 Raw API output: {raw_output}")
        
        # Remove any introductory text
        cleaned = self.remove_introductions(raw_output)
        print(f"🧹 After intro removal: {cleaned}")
        
        return cleaned.strip()
    
    def remove_introductions(self, text):
        """Remove introduction patterns from text"""
        intro_patterns = [
            r'^.*?(?:この画像|This image|Looking at|Based on|Analyzing|I can see|The image shows).*?\n',
            r'^.*?(?:分析|analysis|shows|contains|depicts|features).*?\n',
            r'^\*\*.*?\*\*\s*\n',
            r'^#{1,6}\s.*?\n',
            r'^.*?(?:人物|people|person|figures?|mannequins?).*?(?:います|present|visible|shown).*?\n',
            r'^.*?(?:Here|これは|以下).*?\n',
        ]
        
        for pattern in intro_patterns:
            text = re.sub(pattern, '', text, flags=re.IGNORECASE | re.MULTILINE)
        
        return text.strip()
    
    def extract_english_description(self, pose_description):
        """Extract English description for Stable Diffusion"""
        if '[日本語翻訳]' in pose_description:
            english_part = pose_description.split('[日本語翻訳]')[0].strip()
        elif '[Japanese]' in pose_description:
            english_part = pose_description.split('[Japanese]')[0].strip()
        else:
            # If no Japanese marker, check line by line
            lines = pose_description.split('\n')
            english_lines = []
            for line in lines:
                line = line.strip()
                if line and not self.contains_japanese(line):
                    english_lines.append(line)
            english_part = ' '.join(english_lines)
        
        return english_part
    
    def contains_japanese(self, text):
        """Check if text contains Japanese characters"""
        return bool(re.search(r'[ひらがなカタカナ漢字]', text))
    
    def extract_translated_description(self, pose_description, language):
        """Extract translated description for viewer display"""
        translation_keyword = self.get_translation_keyword(language)
        
        if translation_keyword in pose_description:
            translated_part = pose_description.split(translation_keyword)[1].strip()
        else:
            # Fallback: try to find text in the target language
            lines = pose_description.split('\n')
            translated_lines = []
            for line in lines:
                line = line.strip()
                if line and self.contains_target_language(line, language):
                    translated_lines.append(line)
            translated_part = '\n'.join(translated_lines) if translated_lines else f"翻訳テキストが見つかりません / Translation not found"
        
        return translated_part
    
    def contains_target_language(self, text, language):
        """Check if text contains target language characters"""
        if language == "japanese":
            return bool(re.search(r'[ひらがなカタカナ漢字]', text))
        elif language == "chinese":
            return bool(re.search(r'[\u4e00-\u9fff]', text))
        elif language == "korean":
            return bool(re.search(r'[\uac00-\ud7af]', text))
        elif language == "arabic":
            return bool(re.search(r'[\u0600-\u06ff]', text))
        elif language == "hindi":
            return bool(re.search(r'[\u0900-\u097f]', text))
        elif language == "thai":
            return bool(re.search(r'[\u0e00-\u0e7f]', text))
        elif language == "russian":
            return bool(re.search(r'[\u0400-\u04ff]', text))
        else:
            # For Latin-based languages, check for non-English words
            return len(text) > 0

    def analyze_pose(self, image, api_key, analysis_depth, focus_areas, pose_style, language, existing_prompt=""):
        """Main processing: Detailed pose analysis for OpenPose enhancement"""
        if not api_key or api_key == "YOUR_GEMINI_API_KEY_HERE":
            lang_name = self.get_language_name(language)
            return ("Error: Please set your Gemini API key", f"APIキーを設定してください / Please set API key ({lang_name})")
        
        try:
            print(f"🚀 Starting detailed pose analysis...")
            
            # Convert image to base64
            image_base64 = self.image_to_base64(image)
            
            # Create detailed analysis prompt
            analysis_prompt = self.create_detailed_pose_prompt(analysis_depth, focus_areas, pose_style, language)
            print(f"📝 Detailed prompt created")
            
            # Call Gemini API
            raw_output = self.call_gemini_api(api_key, image_base64, analysis_prompt)
            print(f"🤖 API response received")
            
            # Process the detailed description
            pose_description = self.process_pose_description(raw_output)
            
            # Extract English description for output
            english_description = self.extract_english_description(pose_description)
            
            # Extract translated description for viewer
            translated_description = self.extract_translated_description(pose_description, language)
            
            # Combine with existing prompt
            if existing_prompt.strip():
                final_prompt = f"{existing_prompt.strip()}, {english_description}"
            else:
                final_prompt = english_description
            
            print(f"🎯 English Description: {english_description}")
            print(f"🎯 Translated Description ({language}): {translated_description}")
            print(f"🎯 Final Prompt: {final_prompt}")
            
            translation_keyword = self.get_translation_keyword(language)
            print(f"🌍 Language: {language}")
            print(f"🔍 Translation keyword: {translation_keyword}")
            print(f"🔍 Contains translation keyword? {translation_keyword in pose_description}")
            print(f"📝 Raw Pose Description: {pose_description}")
            
            # 両方を返す（英語は端子出力、翻訳テキストはビューワ用）
            return (final_prompt, translated_description)
            
        except Exception as e:
            error_msg = f"🚨 Error in pose analysis: {str(e)}"
            print(error_msg)
            return (error_msg, f"エラー: {error_msg}")

# JavaScript不要 - Pythonのみで実装

# Node mappings
NODE_CLASS_MAPPINGS = {
    "GeminiPoseAnalyzer": GeminiPoseAnalyzer
}

NODE_DISPLAY_NAME_MAPPINGS = {
    "GeminiPoseAnalyzer": "🌍 Gemini Pose Analyzer [22言語対応]"
}