import json
import requests
import time
import random
from typing import Dict, List, Any, Tuple

class LMStudioTextGenNode:
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "prompt": ("STRING", {
                    "multiline": True,
                    "default": ""
                }),
                "model_key": ("STRING", {
                    "default": "qwen/qwen-4b-2507"
                }),
                "auto_unload": ("BOOLEAN", {
                    "default": True
                }),
                "unload_delay": ("INT", {
                    "default": 0,
                    "min": 0,
                    "max": 3600,
                    "step": 1
                }),
                "seed": ("INT", {
                    "default": 1039310740569363,
                    "min": 0,
                    "max": 0xffffffffffffffff
                }),
                "max_tokens": ("INT", {
                    "default": 4096,
                    "min": 1,
                    "max": 8192,
                    "step": 1
                }),
                "temperature": ("FLOAT", {
                    "default": 0.7,
                    "min": 0.0,
                    "max": 2.0,
                    "step": 0.01
                }),
                "debug": ("BOOLEAN", {
                    "default": False
                }),
                "timeout_seconds": ("INT", {
                    "default": 300,
                    "min": 1,
                    "max": 3600,
                    "step": 1
                }),
                "model": ("STRING", {
                    "default": "qwen/qwen-4b-2507"
                }),
                "ip_address": ("STRING", {
                    "default": "192.168.0.120"
                }),
                "port": ("INT", {
                    "default": 1234,
                    "min": 1,
                    "max": 65535,
                    "step": 1
                })
            },
            "optional": {
                "system_prompt": ("STRING", {
                    "multiline": True,
                    "default": "Qwen-Image About Qwen\n\nQwen is a series of large language models developed by Alibaba's DAMO Academy. It is a large language model that is pre-trained on a large-scale dataset of text and code. Qwen is designed to be a helpful, harmless, and honest AI assistant."
                }),
                "生成後の制御": ("STRING", {
                    "default": "randomize"
                }),
                "reset_conversation": ("BOOLEAN", {
                    "default": False
                })
            }
        }
    
    RETURN_TYPES = ("STRING",)
    RETURN_NAMES = ("Generated Text",)
    FUNCTION = "generate_text"
    CATEGORY = "LM Studio (Text Gen)"
    
    def __init__(self):
        self.conversations = {}
    
    def get_conversation_key(self) -> str:
        return f"conv_{int(time.time() * 1000) // 5000}"
    
    def generate_text(self, 
                     prompt: str,
                     model_key: str,
                     auto_unload: bool,
                     unload_delay: int,
                     seed: int,
                     max_tokens: int,
                     temperature: float,
                     debug: bool,
                     timeout_seconds: int,
                     model: str,
                     ip_address: str,
                     port: int,
                     system_prompt: str = "Qwen-Image About Qwen\n\nQwen is a series of large language models developed by Alibaba's DAMO Academy. It is a large language model that is pre-trained on a large-scale dataset of text and code. Qwen is designed to be a helpful, harmless, and honest AI assistant.",
                     生成後の制御: str = "randomize",
                     reset_conversation: bool = False) -> Tuple[str]:
        
        if debug:
            print(f"LM Studio Text Gen - Debug Info:")
            print(f"Model: {model}")
            print(f"IP: {ip_address}:{port}")
            print(f"Temperature: {temperature}")
            print(f"Max Tokens: {max_tokens}")
            print(f"Seed: {seed}")
        
        conv_key = self.get_conversation_key()
        
        if reset_conversation or conv_key not in self.conversations:
            self.conversations[conv_key] = [
                {"role": "system", "content": system_prompt}
            ]
        
        conversation = self.conversations[conv_key]
        conversation.append({"role": "user", "content": prompt})
        
        try:
            lm_studio_url = f"http://{ip_address}:{port}"
            response_text = self._call_lm_studio(
                lm_studio_url, 
                conversation,
                model,
                temperature,
                max_tokens,
                seed,
                timeout_seconds
            )
            
            conversation.append({"role": "assistant", "content": response_text})
            self.conversations[conv_key] = conversation
            
            if 生成後の制御 == "randomize":
                random.seed()
            
        except Exception as e:
            response_text = f"Error: {str(e)}"
            if debug:
                print(f"LM Studio API Error: {str(e)}")
        
        return (response_text,)
    
    def _call_lm_studio(self, 
                        url: str, 
                        messages: List[Dict[str, str]],
                        model_name: str = "",
                        temperature: float = 0.7,
                        max_tokens: int = 1000,
                        seed: int = None,
                        timeout: int = 300) -> str:
        
        api_url = f"{url.rstrip('/')}/v1/chat/completions"
        
        headers = {
            "Content-Type": "application/json"
        }
        
        data = {
            "messages": messages,
            "temperature": temperature,
            "max_tokens": max_tokens,
            "stream": False
        }
        
        if model_name:
            data["model"] = model_name
            
        if seed is not None:
            data["seed"] = seed
        
        try:
            response = requests.post(api_url, json=data, headers=headers, timeout=timeout)
            response.raise_for_status()
            
            result = response.json()
            
            if "choices" in result and len(result["choices"]) > 0:
                return result["choices"][0]["message"]["content"]
            else:
                raise Exception("Invalid response format from LM Studio")
                
        except requests.exceptions.ConnectionError:
            raise Exception("Cannot connect to LM Studio. Please ensure LM Studio is running and the server is started.")
        except requests.exceptions.Timeout:
            raise Exception("Request to LM Studio timed out")
        except requests.exceptions.RequestException as e:
            raise Exception(f"Request failed: {str(e)}")
        except json.JSONDecodeError:
            raise Exception("Invalid JSON response from LM Studio")


class LMStudioConversationNode:
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "prompt": ("STRING", {
                    "multiline": True,
                    "default": "Hello, how are you today?"
                }),
                "lm_studio_url": ("STRING", {
                    "default": "http://localhost:1234"
                }),
                "max_conversation_length": ("INT", {
                    "default": 10,
                    "min": 1,
                    "max": 50,
                    "step": 1
                }),
            },
            "optional": {
                "model_name": ("STRING", {
                    "default": ""
                }),
                "reset_conversation": ("BOOLEAN", {
                    "default": False
                }),
                "system_prompt": ("STRING", {
                    "multiline": True,
                    "default": "You are a helpful AI assistant."
                }),
                "temperature": ("FLOAT", {
                    "default": 0.7,
                    "min": 0.0,
                    "max": 2.0,
                    "step": 0.1
                }),
                "max_tokens": ("INT", {
                    "default": 1000,
                    "min": 1,
                    "max": 4000,
                    "step": 1
                }),
            }
        }
    
    RETURN_TYPES = ("STRING", "STRING", "INT")
    RETURN_NAMES = ("response", "conversation_log", "message_count")
    FUNCTION = "generate_response"
    CATEGORY = "LM Studio"
    
    def __init__(self):
        self.conversations = {}
    
    def get_conversation_key(self) -> str:
        return f"conv_{int(time.time() * 1000) // 5000}"
    
    def generate_response(self, 
                         prompt: str,
                         lm_studio_url: str,
                         max_conversation_length: int,
                         model_name: str = "",
                         reset_conversation: bool = False,
                         system_prompt: str = "You are a helpful AI assistant.",
                         temperature: float = 0.7,
                         max_tokens: int = 1000) -> Tuple[str, str, int]:
        
        conv_key = self.get_conversation_key()
        
        if reset_conversation or conv_key not in self.conversations:
            self.conversations[conv_key] = [
                {"role": "system", "content": system_prompt}
            ]
        
        conversation = self.conversations[conv_key]
        
        conversation.append({"role": "user", "content": prompt})
        
        if len(conversation) > max_conversation_length * 2 + 1:
            system_msg = conversation[0]
            conversation = [system_msg] + conversation[-(max_conversation_length * 2):]
        
        try:
            response_text = self._call_lm_studio_conversation(
                lm_studio_url, 
                conversation,
                model_name,
                temperature,
                max_tokens
            )
            
            conversation.append({"role": "assistant", "content": response_text})
            self.conversations[conv_key] = conversation
            
        except Exception as e:
            response_text = f"Error: {str(e)}"
            print(f"LM Studio API Error: {str(e)}")
        
        conversation_log = json.dumps(conversation, ensure_ascii=False, indent=2)
        message_count = len(conversation) - 1
        
        return (response_text, conversation_log, message_count)
    
    def _call_lm_studio_conversation(self, 
                        url: str, 
                        messages: List[Dict[str, str]],
                        model_name: str = "",
                        temperature: float = 0.7,
                        max_tokens: int = 1000) -> str:
        
        api_url = f"{url.rstrip('/')}/v1/chat/completions"
        
        headers = {
            "Content-Type": "application/json"
        }
        
        data = {
            "messages": messages,
            "temperature": temperature,
            "max_tokens": max_tokens,
            "stream": False
        }
        
        if model_name:
            data["model"] = model_name
        
        try:
            response = requests.post(api_url, json=data, headers=headers, timeout=60)
            response.raise_for_status()
            
            result = response.json()
            
            if "choices" in result and len(result["choices"]) > 0:
                return result["choices"][0]["message"]["content"]
            else:
                raise Exception("Invalid response format from LM Studio")
                
        except requests.exceptions.ConnectionError:
            raise Exception("Cannot connect to LM Studio. Please ensure LM Studio is running and the server is started.")
        except requests.exceptions.Timeout:
            raise Exception("Request to LM Studio timed out")
        except requests.exceptions.RequestException as e:
            raise Exception(f"Request failed: {str(e)}")
        except json.JSONDecodeError:
            raise Exception("Invalid JSON response from LM Studio")


class LMStudioConversationClearNode:
    @classmethod
    def INPUT_TYPES(cls):
        return {
            "required": {
                "clear_all": ("BOOLEAN", {
                    "default": True
                }),
            }
        }
    
    RETURN_TYPES = ("STRING",)
    RETURN_NAMES = ("status",)
    FUNCTION = "clear_conversations"
    CATEGORY = "LM Studio"
    
    def clear_conversations(self, clear_all: bool = True) -> Tuple[str]:
        if hasattr(LMStudioConversationNode, 'conversations'):
            if clear_all:
                LMStudioConversationNode.conversations = {}
                return ("All conversations cleared successfully",)
        
        return ("No conversations to clear",)


NODE_CLASS_MAPPINGS = {
    "LMStudioTextGen": LMStudioTextGenNode,
    "LMStudioConversation": LMStudioConversationNode,
    "LMStudioConversationClear": LMStudioConversationClearNode
}

NODE_DISPLAY_NAME_MAPPINGS = {
    "LMStudioTextGen": "LM Studio (Text Gen)",
    "LMStudioConversation": "LM Studio Conversation",
    "LMStudioConversationClear": "LM Studio Clear Conversations"
}