import { create } from 'zustand';
import { persist } from 'zustand/middleware';
import { v4 as uuidv4 } from 'uuid';
import { ModelType, ModelParameters } from '@/types/models';
import { PromptTemplate } from '@/types/prompt';
import { generationService, GenerationRequest } from '@/services/generation.service';
import { useModelConfigStore } from './model-config.store';
// import apiKeyService from '@/services/api-key.service'; // 暂未使用

export interface Message {
  id: string;
  role: 'user' | 'assistant' | 'system';
  content: string;
  timestamp: Date;
  status?: 'sending' | 'sent' | 'error' | 'streaming';
  metadata?: {
    model?: string;
    tokens?: number;
    editedAt?: Date;
    parameters?: ModelParameters;
    promptId?: string;  // 添加提示词ID
    promptVersion?: number;  // 添加提示词版本
  };
}

export interface Conversation {
  id: string;
  title: string;
  preview: string;
  messages: Message[];
  createdAt: Date;
  updatedAt: Date;
  promptId?: string;  // 添加当前使用的提示词ID
  promptTemplate?: PromptTemplate;  // 添加提示词模板
  hotTopicContext?: HotTopicContext; // 添加热点上下文
}

export interface HotTopicContext {
  platform: string;
  topicId: string;
  title: string;
  excerpt: string;
  comments: any[];
  perspectives: any[];
  generatedPrompt?: string;
}

export interface ConversationSettings {
  model: string;
  temperature: number;
  maxTokens: number;
  streamResponse: boolean;
  showTimestamp: boolean;
  enableMarkdown: boolean;
  enableCodeHighlight: boolean;
}

interface ConversationState {
  conversations: Conversation[];
  currentConversationId: string | null;
  messages: Message[];
  isGenerating: boolean;
  settings: ConversationSettings;
  
  // New model-related state
  selectedModel: ModelType;
  modelParameters: ModelParameters;
  parameterPreset: string | null;
  comparisonModels: ModelType[];
  
  // Prompt-related state
  currentPromptId: string | null;
  currentPromptTemplate: PromptTemplate | null;
  promptVariables: Record<string, any>;
  promptUsageLog: Array<{
    promptId: string;
    timestamp: Date;
    messageId: string;
    success: boolean;
  }>;
  
  // Hot topic context state
  hotTopicContext: HotTopicContext | null;
  
  // Actions
  createNewConversation: () => void;
  selectConversation: (id: string) => void;
  deleteConversation: (id: string) => void;
  sendMessage: (content: string) => Promise<void>;
  addMessage: (message: Message) => void;
  updateMessage: (id: string, content: string) => void;
  deleteMessage: (id: string) => void;
  regenerateMessage: (id: string) => void;
  clearMessages: () => void;
  updateSettings: (settings: Partial<ConversationSettings>) => void;
  setIsGenerating: (isGenerating: boolean) => void;
  
  // New model-related actions
  setModel: (model: ModelType) => void;
  setParameters: (params: Partial<ModelParameters>) => void;
  setPreset: (preset: string | null) => void;
  toggleComparisonModel: (model: ModelType) => void;
  
  // Prompt-related actions
  setPromptTemplate: (template: PromptTemplate | null) => void;
  setPromptVariables: (variables: Record<string, any>) => void;
  switchPromptWithoutInterrupt: (template: PromptTemplate) => void;
  logPromptUsage: (promptId: string, messageId: string, success: boolean) => void;
  
  // Hot topic actions
  setHotTopicContext: (context: HotTopicContext | null) => void;
  startNewConversation: (initialPrompt?: string) => Promise<void>;
  clearHotTopicContext: () => void;
}

const defaultSettings: ConversationSettings = {
  model: 'gpt-3.5-turbo',
  temperature: 0.7,
  maxTokens: 2000,
  streamResponse: true,
  showTimestamp: true,
  enableMarkdown: true,
  enableCodeHighlight: true,
};

const defaultParameters: ModelParameters = {
  temperature: 0.7,
  topP: 0.85,
  maxTokens: 1500,
  presencePenalty: 0,
  frequencyPenalty: 0,
};

export const useConversationStore = create<ConversationState>()(
  persist(
    (set, get) => ({
      conversations: [],
      currentConversationId: null,
      messages: [],
      isGenerating: false,
      settings: defaultSettings,
      
      // Initialize model-related state
      selectedModel: ModelType.GPT35,
      modelParameters: defaultParameters,
      parameterPreset: 'balanced',
      comparisonModels: [],
      
      // Initialize prompt-related state
      currentPromptId: null,
      currentPromptTemplate: null,
      promptVariables: {},
      promptUsageLog: [],
      
      // Initialize hot topic state
      hotTopicContext: null,

      createNewConversation: () => {
        const newConversation: Conversation = {
          id: uuidv4(),
          title: '新对话',
          preview: '点击开始对话',
          messages: [],
          createdAt: new Date(),
          updatedAt: new Date(),
        };

        set((state) => ({
          conversations: [newConversation, ...state.conversations],
          currentConversationId: newConversation.id,
          messages: [],
        }));
      },

      selectConversation: (id: string) => {
        const conversation = get().conversations.find((c) => c.id === id);
        if (conversation) {
          set({
            currentConversationId: id,
            messages: conversation.messages,
          });
        }
      },

      deleteConversation: (id: string) => {
        set((state) => {
          const newConversations = state.conversations.filter((c) => c.id !== id);
          const newCurrentId = state.currentConversationId === id 
            ? newConversations[0]?.id || null 
            : state.currentConversationId;
          
          return {
            conversations: newConversations,
            currentConversationId: newCurrentId,
            messages: newCurrentId === id ? [] : state.messages,
          };
        });
      },

      sendMessage: async (content: string) => {
        const userMessage: Message = {
          id: uuidv4(),
          role: 'user',
          content,
          timestamp: new Date(),
          status: 'sent',
        };

        // Add user message
        get().addMessage(userMessage);
        
        // Set generating state
        set({ isGenerating: true });

        // Define assistant message outside try-catch so it's accessible in catch block
        const assistantMessage: Message = {
          id: uuidv4(),
          role: 'assistant',
          content: '',
          timestamp: new Date(),
          status: 'streaming',
        };

        try {
          get().addMessage(assistantMessage);

          // Prepare generation request
          const state = get();
          
          // 获取选中的模型配置
          let modelConfig: any = null;
          const selectedModelId = state.settings.model;
          
          // 从 model-config store 获取配置的详细信息
          const modelConfigStore = useModelConfigStore.getState();
          console.log('[CONVERSATION] Available configs:', modelConfigStore.configs.length, 'configs');
          console.log('[CONVERSATION] Looking for config with ID:', selectedModelId);
          
          // 首先尝试通过ID查找
          let selectedConfig = modelConfigStore.configs.find(c => c.id === selectedModelId);
          
          if (!selectedConfig) {
            console.log('[CONVERSATION] Config not found by ID, available IDs:', 
              modelConfigStore.configs.map(c => c.id));
          }
          
          // 如果没找到，可能是用户选择的是模型名称而不是配置ID，尝试通过模型名称查找
          if (!selectedConfig && selectedModelId) {
            selectedConfig = modelConfigStore.configs.find(c => 
              c.modelName === selectedModelId && c.isActive === true
            );
            console.log('[CONVERSATION] Trying to find config by model name:', selectedModelId, 'Found:', !!selectedConfig);
          }
          
          if (selectedConfig) {
            // 使用实际的模型配置
            modelConfig = {
              model: selectedConfig.modelName,  // 使用实际的模型名称，如 'deepseek-reasoner'
              provider: selectedConfig.providerId,
              api_key: selectedConfig.apiKey,
              api_base: selectedConfig.baseUrl,
              temperature: selectedConfig.temperature || state.settings.temperature,
              max_tokens: selectedConfig.maxTokens || state.settings.maxTokens
            };
            console.log('[CONVERSATION] Using model config:', {
              id: selectedConfig.id,
              modelName: selectedConfig.modelName,
              provider: selectedConfig.providerId,
              hasApiKey: !!selectedConfig.apiKey,
              hasBaseUrl: !!selectedConfig.baseUrl,
              apiKeyLength: selectedConfig.apiKey?.length || 0,
              apiKeySample: selectedConfig.apiKey?.substring(0, 20) || 'null',
              fullApiKey: selectedConfig.apiKey // 临时显示完整密钥用于调试
            });
          } else {
            // 没有找到配置，使用默认参数
            console.warn('[CONVERSATION] No model config found for:', selectedModelId);
          }
          
          const generationRequest: GenerationRequest = {
            model: modelConfig?.model || state.selectedModel,
            messages: state.messages
              .filter(msg => msg.role !== 'assistant' || msg.id !== assistantMessage.id)
              .map(msg => ({
                role: msg.role,
                content: msg.content
              })),
            parameters: modelConfig || state.modelParameters,
            stream: state.settings.streamResponse,
            promptTemplate: state.currentPromptTemplate || undefined,
            promptVariables: state.promptVariables,
            // 添加用户模型的额外配置
            ...(modelConfig && {
              provider: modelConfig.provider,
              api_key: modelConfig.api_key,
              api_base: modelConfig.api_base
            })
          };
          
          // 调试：检查最终请求数据
          console.log('[CONVERSATION] Final generation request API key debug:', {
            hasModelConfig: !!modelConfig,
            apiKeyLength: generationRequest.api_key?.length || 0,
            apiKeySample: generationRequest.api_key?.substring(0, 20) || 'null'
          });
          
          // generationRequest已经正确构建
          
          console.log('[CONVERSATION] Generation request:', {
            model: generationRequest.model,
            hasProvider: !!generationRequest.provider,
            hasApiKey: !!generationRequest.api_key,
            hasApiBase: !!generationRequest.api_base,
            stream: generationRequest.stream,
            messagesCount: generationRequest.messages.length
          });

          if (state.settings.streamResponse) {
            // Use streaming generation
            try {
              let fullContent = '';
              for await (const chunk of generationService.streamGenerate(generationRequest)) {
                fullContent += chunk;
                get().updateMessage(assistantMessage.id, fullContent);
              }
              
              // Update message status to sent
              set((state) => ({
                messages: state.messages.map((msg) =>
                  msg.id === assistantMessage.id
                    ? { ...msg, status: 'sent' }
                    : msg
                ),
              }));
            } catch (streamError) {
              console.error('Streaming error:', streamError);
              // Fall back to non-streaming
              const response = await generationService.generate({
                ...generationRequest,
                stream: false
              });
              
              if (response.error) {
                throw new Error(response.error);
              }
              
              get().updateMessage(assistantMessage.id, response.content);
              set((state) => ({
                messages: state.messages.map((msg) =>
                  msg.id === assistantMessage.id
                    ? { 
                        ...msg, 
                        status: 'sent',
                        metadata: {
                          ...msg.metadata,
                          model: response.model,
                          tokens: response.usage?.totalTokens,
                          promptId: state.currentPromptId || undefined,
                          promptVersion: state.currentPromptTemplate?.version
                        }
                      }
                    : msg
                ),
              }));
            }
          } else {
            // Use non-streaming generation
            const response = await generationService.generate(generationRequest);
            
            if (response.error) {
              throw new Error(response.error);
            }
            
            get().updateMessage(assistantMessage.id, response.content);
            set((state) => ({
              messages: state.messages.map((msg) =>
                msg.id === assistantMessage.id
                  ? { 
                      ...msg, 
                      status: 'sent',
                      metadata: {
                        ...msg.metadata,
                        model: response.model,
                        tokens: response.usage?.totalTokens,
                        promptId: state.currentPromptId || undefined,
                        promptVersion: state.currentPromptTemplate?.version
                      }
                    }
                  : msg
              ),
            }));
          }

          // Log prompt usage if template was used
          if (state.currentPromptTemplate) {
            get().logPromptUsage(state.currentPromptTemplate.id, assistantMessage.id, true);
          }
        } catch (error) {
          console.error('Error sending message:', error);
          
          // Update assistant message with error
          set((state) => ({
            messages: state.messages.map((msg) =>
              msg.id === assistantMessage.id
                ? { 
                    ...msg, 
                    content: `抱歉，生成响应时出现错误：${error instanceof Error ? error.message : '未知错误'}`,
                    status: 'error'
                  }
                : msg
            ),
          }));
        } finally {
          set({ isGenerating: false });
        }

        // Update conversation
        const currentConversation = get().conversations.find(
          (c) => c.id === get().currentConversationId
        );
        
        if (currentConversation) {
          const updatedConversation: Conversation = {
            ...currentConversation,
            title: content.slice(0, 30) || currentConversation.title,
            preview: content.slice(0, 50) || currentConversation.preview,
            messages: get().messages,
            updatedAt: new Date(),
          };

          set((state) => ({
            conversations: state.conversations.map((c) =>
              c.id === currentConversation.id ? updatedConversation : c
            ),
          }));
        }
      },

      addMessage: (message: Message) => {
        set((state) => ({
          messages: [...state.messages, message],
        }));
      },

      updateMessage: (id: string, content: string) => {
        set((state) => ({
          messages: state.messages.map((msg) =>
            msg.id === id
              ? { ...msg, content, metadata: { ...msg.metadata, editedAt: new Date() } }
              : msg
          ),
        }));
      },

      deleteMessage: (id: string) => {
        set((state) => ({
          messages: state.messages.filter((msg) => msg.id !== id),
        }));
      },

      regenerateMessage: (id: string) => {
        const message = get().messages.find((msg) => msg.id === id);
        if (message && message.role === 'assistant') {
          // Find the previous user message
          const messageIndex = get().messages.findIndex((msg) => msg.id === id);
          if (messageIndex > 0) {
            const previousMessage = get().messages[messageIndex - 1];
            if (previousMessage.role === 'user') {
              // Delete the current assistant message
              get().deleteMessage(id);
              // Resend the user message
              get().sendMessage(previousMessage.content);
            }
          }
        }
      },

      clearMessages: () => {
        set({ messages: [] });
        
        // Update current conversation
        const currentConversation = get().conversations.find(
          (c) => c.id === get().currentConversationId
        );
        
        if (currentConversation) {
          set((state) => ({
            conversations: state.conversations.map((c) =>
              c.id === currentConversation.id
                ? { ...c, messages: [], updatedAt: new Date() }
                : c
            ),
          }));
        }
      },

      updateSettings: (newSettings: Partial<ConversationSettings>) => {
        set((state) => ({
          settings: { ...state.settings, ...newSettings },
        }));
      },

      setIsGenerating: (isGenerating: boolean) => {
        set({ isGenerating });
      },
      
      // Model-related actions implementation
      setModel: (model: ModelType) => {
        set({ 
          selectedModel: model,
          settings: { ...get().settings, model }
        });
      },
      
      setParameters: (params: Partial<ModelParameters>) => {
        set((state) => ({
          modelParameters: { ...state.modelParameters, ...params },
          settings: {
            ...state.settings,
            temperature: params.temperature ?? state.settings.temperature,
            maxTokens: params.maxTokens ?? state.settings.maxTokens,
          }
        }));
      },
      
      setPreset: (preset: string | null) => {
        set({ parameterPreset: preset });
      },
      
      toggleComparisonModel: (model: ModelType) => {
        set((state) => {
          const currentModels = [...state.comparisonModels];
          const index = currentModels.indexOf(model);
          
          if (index > -1) {
            currentModels.splice(index, 1);
          } else {
            // Limit to 3 models for comparison
            if (currentModels.length < 3) {
              currentModels.push(model);
            }
          }
          
          return { comparisonModels: currentModels };
        });
      },
      
      // Prompt-related actions implementation
      setPromptTemplate: (template: PromptTemplate | null) => {
        set({ 
          currentPromptTemplate: template,
          currentPromptId: template?.id || null
        });
        
        // Update current conversation with prompt info
        if (template) {
          const currentConversation = get().conversations.find(
            c => c.id === get().currentConversationId
          );
          
          if (currentConversation) {
            set((state) => ({
              conversations: state.conversations.map((c) =>
                c.id === currentConversation.id 
                  ? { ...c, promptId: template.id, promptTemplate: template }
                  : c
              ),
            }));
          }
        }
      },
      
      setPromptVariables: (variables: Record<string, any>) => {
        set({ promptVariables: variables });
      },
      
      switchPromptWithoutInterrupt: (template: PromptTemplate) => {
        // Save current conversation state
        const currentMessages = get().messages;
        const currentConversationId = get().currentConversationId;
        
        // Switch prompt
        get().setPromptTemplate(template);
        
        // Maintain conversation continuity by adding a system message
        const systemMessage: Message = {
          id: uuidv4(),
          role: 'system',
          content: `[已切换到提示词: ${template.name}]`,
          timestamp: new Date(),
          status: 'sent',
          metadata: {
            promptId: template.id,
            promptVersion: template.version
          }
        };
        
        get().addMessage(systemMessage);
        
        // Log the prompt switch
        get().logPromptUsage(template.id, systemMessage.id, true);
      },
      
      logPromptUsage: (promptId: string, messageId: string, success: boolean) => {
        set((state) => ({
          promptUsageLog: [
            ...state.promptUsageLog,
            {
              promptId,
              timestamp: new Date(),
              messageId,
              success
            }
          ].slice(-100) // Keep only last 100 logs
        }));
      },
      
      // Hot topic actions implementation
      setHotTopicContext: (context: HotTopicContext | null) => {
        set({ hotTopicContext: context });
        
        // Update current conversation with hot topic context
        if (context) {
          const currentConversation = get().conversations.find(
            c => c.id === get().currentConversationId
          );
          
          if (currentConversation) {
            set((state) => ({
              conversations: state.conversations.map((c) =>
                c.id === currentConversation.id 
                  ? { ...c, hotTopicContext: context }
                  : c
              ),
            }));
          }
        }
      },
      
      startNewConversation: async (initialPrompt?: string) => {
        // Create new conversation
        get().createNewConversation();
        
        // If initial prompt provided, send it
        if (initialPrompt) {
          await get().sendMessage(initialPrompt);
        }
      },
      
      clearHotTopicContext: () => {
        set({ hotTopicContext: null });
        
        const currentConversation = get().conversations.find(
          c => c.id === get().currentConversationId
        );
        
        if (currentConversation) {
          set((state) => ({
            conversations: state.conversations.map((c) =>
              c.id === currentConversation.id 
                ? { ...c, hotTopicContext: null }
                : c
            ),
          }));
        }
      },
    }),
    {
      name: 'conversation-storage',
      partialize: (state) => ({
        conversations: state.conversations,
        currentConversationId: state.currentConversationId,
        settings: state.settings,
        selectedModel: state.selectedModel,
        modelParameters: state.modelParameters,
        parameterPreset: state.parameterPreset,
      }),
    }
  )
);