import { create } from 'zustand';

export interface Model {
  id: string;
  name: string;
  provider: string;
  type: 'embedding' | 'llm' | 'search';
  description: string;
  isActive: boolean;
  isFavorite: boolean;
  apiKey?: string;
  parameters: Record<string, any>;
  maxTokens?: number;
  supportedLanguages: string[];
  lastUsed?: string;
  costPerRequest?: number;
  tags: string[];
  
  // Additional properties used in ModelSwitchPanel
  isNew?: boolean;
  modelSize?: string;
  contextLength?: number | string;
  multimodal?: boolean;
  dimensions?: number;
  maxLength?: number;
}

interface OllamaModelInfo {
  name: string;
  model: string;
  modified_at: string;
  size: number;
  digest: string;
  details?: {
    format: string;
    family: string;
    families?: string[];
    parameter_size: string;
    quantization_level?: string;
  };
}

interface ModelsState {
  models: Model[];
  activeModel: Model | null;
  loading: boolean;
  error: string | null;
  
  // Derived properties
  llmModels: Model[];
  embeddingModels: Model[];
  activeLlm: string | null;
  activeEmbedding: string | null;
  
  // Methods
  fetchModels: () => Promise<void>;
  fetchOllamaModels: () => Promise<Model[]>;
  setActiveModel: (modelId: string) => void;
  setActiveLlm: (modelId: string) => void;
  setActiveEmbedding: (modelId: string) => void;
  toggleFavorite: (modelId: string, modelType?: string) => void;
  updateModelConfig: (modelId: string, config: Partial<Model>) => Promise<void>;
  refreshModels: () => Promise<void>;
  saveActiveModelsToStorage: () => void;
  loadActiveModelsFromStorage: () => void;
}

// 保存/加载模型状态的本地存储键
const STORAGE_KEY_ACTIVE_LLM = 'ai_assistant_active_llm';
const STORAGE_KEY_ACTIVE_EMBEDDING = 'ai_assistant_active_embedding';
const STORAGE_KEY_FAVORITE_MODELS = 'ai_assistant_favorite_models';

// 嵌入模型关键词，用于判断模型类型
const EMBEDDING_MODEL_KEYWORDS = [
  'embed', 
  'embedding', 
  'embeddings', 
  'text-embedding', 
  'text-embeddings', 
  'ada-002',
  'bge',
  'e5',
  'instructor',
  'instructor-xl',
  'all-minilm',
  'gte',
  'sentence',
  'nomic',
  'jina',
  'm3',
  'simcse',
  'sgpt',
  'vector',
  'vectorize',
  'contriever',
  'cohere-embed'
];

// 明确的LLM模型关键词，帮助识别LLM模型
const LLM_MODEL_KEYWORDS = [
  'gpt', 'llama', 'mistral', 'llm', 'vicuna', 'mixtral', 'falcon', 'mpt',
  'language-model', 'chat', 'completion', 'claude', 'gemma', 'yi', 'qwen', 'phi',
  'stable-lm', 'bloom', 'baichuan', 'chatglm', 'zephyr', 'nous', 'hermes', 'openchat'
];

export const useModelsStore = create<ModelsState>((set, get) => ({
  models: [],
  activeModel: null,
  loading: false,
  error: null,
  
  // Derived properties with getters
  get llmModels() {
    const llms = get().models.filter(model => model.type === 'llm');
    console.log('LLM模型列表 (getter):', llms.map(m => `${m.name} (${m.provider})`));
    return llms;
  },
  
  get embeddingModels() {
    const embeddings = get().models.filter(model => model.type === 'embedding');
    console.log('嵌入模型列表 (getter):', embeddings.map(m => `${m.name} (${m.provider})`));
    return embeddings;
  },
  
  get activeLlm() {
    const activeLlm = get().models.find(model => model.type === 'llm' && model.isActive);
    return activeLlm ? activeLlm.id : null;
  },
  
  get activeEmbedding() {
    const activeEmbedding = get().models.find(model => model.type === 'embedding' && model.isActive);
    return activeEmbedding ? activeEmbedding.id : null;
  },
  
  // 保存激活的模型到localStorage
  saveActiveModelsToStorage: () => {
    try {
      const { activeLlm, activeEmbedding } = get();
      
      if (activeLlm) {
        localStorage.setItem(STORAGE_KEY_ACTIVE_LLM, activeLlm);
        console.log('保存激活的LLM模型:', activeLlm);
      }
      
      if (activeEmbedding) {
        localStorage.setItem(STORAGE_KEY_ACTIVE_EMBEDDING, activeEmbedding);
        console.log('保存激活的Embedding模型:', activeEmbedding);
      }
      
      // 保存收藏的模型
      const favoriteModels = get().models
        .filter(model => model.isFavorite)
        .map(model => model.id);
      
      localStorage.setItem(STORAGE_KEY_FAVORITE_MODELS, JSON.stringify(favoriteModels));
      console.log('保存收藏的模型:', favoriteModels);
    } catch (error) {
      console.error('保存模型状态失败:', error);
    }
  },
  
  // 从localStorage加载激活的模型
  loadActiveModelsFromStorage: () => {
    try {
      const savedActiveLlm = localStorage.getItem(STORAGE_KEY_ACTIVE_LLM);
      const savedActiveEmbedding = localStorage.getItem(STORAGE_KEY_ACTIVE_EMBEDDING);
      const savedFavoriteModels = localStorage.getItem(STORAGE_KEY_FAVORITE_MODELS);
      const currentModels = get().models;
      
      console.log('从localStorage加载模型状态...');
      console.log('  - 已保存的LLM:', savedActiveLlm);
      console.log('  - 已保存的Embedding:', savedActiveEmbedding);
      
      if (savedActiveLlm && currentModels.length > 0) {
        // 找到对应的模型并激活
        const llmModel = currentModels.find(model => model.id === savedActiveLlm && model.type === 'llm');
        if (llmModel) {
          get().setActiveLlm(llmModel.id);
          console.log('已加载并激活LLM模型:', llmModel.name, llmModel.id);
        } else {
          console.log('未找到保存的LLM模型:', savedActiveLlm);
        }
      }
      
      if (savedActiveEmbedding && currentModels.length > 0) {
        // 找到对应的模型并激活
        const embeddingModel = currentModels.find(model => model.id === savedActiveEmbedding && model.type === 'embedding');
        if (embeddingModel) {
          get().setActiveEmbedding(embeddingModel.id);
          console.log('已加载并激活Embedding模型:', embeddingModel.name, embeddingModel.id);
        } else {
          console.log('未找到保存的Embedding模型:', savedActiveEmbedding);
        }
      }
      
      // 加载收藏的模型
      if (savedFavoriteModels) {
        try {
          const favoriteIds = JSON.parse(savedFavoriteModels) as string[];
          
          // 更新模型收藏状态
          const updatedModels = currentModels.map(model => ({
            ...model,
            isFavorite: favoriteIds.includes(model.id)
          }));
          
          set({ models: updatedModels });
          console.log('已加载收藏的模型:', favoriteIds);
        } catch (e) {
          console.error('解析收藏模型数据失败:', e);
        }
      }
    } catch (error) {
      console.error('加载模型状态失败:', error);
    }
  },
  
  // 从Ollama API获取模型列表
  fetchOllamaModels: async () => {
    try {
      const ollamaServer = localStorage.getItem('ollamaServer') || 'http://localhost:11434';
      console.log('正在从Ollama服务器获取模型列表:', ollamaServer);
      
      // 获取Ollama模型列表
      const response = await fetch(`${ollamaServer}/api/tags`, {
        method: 'GET',
        headers: {
          'Accept': 'application/json',
        },
        cache: 'no-store'
      });
      
      console.log('Ollama API响应状态:', response.status, response.statusText);
      
      if (!response.ok) {
        throw new Error(`获取Ollama模型失败: ${response.status} ${response.statusText}`);
      }
      
      const data = await response.json();
      console.log('Ollama API响应数据:', data);
      
      if (!data.models || !Array.isArray(data.models)) {
        console.warn('Ollama API未返回有效的模型列表', data);
        return [];
      }
      
      // 打印原始模型信息便于调试
      data.models.forEach((model: OllamaModelInfo, index: number) => {
        console.log(`原始Ollama模型 ${index + 1}:`, model);
      });
      
      // 转换Ollama模型数据为应用模型格式
      const ollamaModels: Model[] = data.models.map((model: OllamaModelInfo, index: number) => {
        // 多层次的类型检测逻辑
        let computedType: 'embedding' | 'llm' = 'llm'; // 默认为LLM类型
        const modelNameLower = model.name.toLowerCase();
        
        // 1. 首先检查是否包含嵌入模型关键词
        const hasEmbedKeyword = EMBEDDING_MODEL_KEYWORDS.some(keyword => 
          modelNameLower.includes(keyword.toLowerCase())
        );
        
        // 2. 然后检查是否包含明确的LLM关键词
        const hasLlmKeyword = LLM_MODEL_KEYWORDS.some(keyword =>
          modelNameLower.includes(keyword.toLowerCase())
        );
        
        // 3. 如果包含嵌入关键词但不包含LLM关键词，认为是嵌入模型
        if (hasEmbedKeyword && !hasLlmKeyword) {
          computedType = 'embedding';
          console.log(`模型 ${model.name} 包含嵌入关键词，判定为嵌入模型`);
        } 
        // 4. 如果不包含嵌入关键词但包含LLM关键词，认为是LLM模型
        else if (!hasEmbedKeyword && hasLlmKeyword) {
          computedType = 'llm';
          console.log(`模型 ${model.name} 包含LLM关键词，判定为LLM模型`);
        }
        // 5. 如果两种关键词都有或都没有，通过其他特征判断
        else {
          // 检查模型family特征
          if (model.details?.families) {
            const families = model.details.families.map(f => f.toLowerCase());
            if (families.includes('clip') || families.includes('bert') || 
                families.includes('embedding') || families.includes('e5')) {
              computedType = 'embedding';
              console.log(`模型 ${model.name} 通过family特征判定为嵌入模型`);
            } else if (families.includes('llama') || families.includes('mistral') || 
                     families.includes('gpt') || families.includes('transformer')) {
              computedType = 'llm';
              console.log(`模型 ${model.name} 通过family特征判定为LLM模型`);
            }
          }
          // 如果仍然无法判断，默认为LLM
          else {
            computedType = 'llm';
            console.log(`模型 ${model.name} 无法明确判断类型，默认设置为LLM模型`);
          }
        }
        
        console.log(`最终确定模型 ${index+1}: ${model.name} - 类型为 ${computedType}`);
        
        // 提取模型大小
        const modelSize = model.details?.parameter_size || 
                      (model.size ? `${Math.round(model.size / 1024 / 1024)}MB` : '未知');
        
        // 格式化模型名称 - 更简化处理以避免显示问题
        let modelName = model.name;
        if (modelName.includes(':')) {
          modelName = modelName.split(':')[0];
        }
        
        // 确保模型名称不为空
        if (!modelName || modelName.trim() === '') {
          modelName = `Ollama模型 ${model.digest ? model.digest.substring(0, 8) : '未知'}`;
        }
        
        // 从模型名称推断语言支持
        const hasMultilingualKeyword = model.name.toLowerCase().includes('multilingual') || 
                                     model.name.toLowerCase().includes('多语言');
        const hasChinese = model.name.toLowerCase().includes('chinese') || 
                         model.name.toLowerCase().includes('zh');
                         
        const supportedLanguages = hasMultilingualKeyword 
                                ? ['多语言'] 
                                : hasChinese 
                                  ? ['英语', '中文'] 
                                  : ['英语'];
        
        // 判断是否为多模态模型
        const isMultimodal = model.name.toLowerCase().includes('vision') || 
                           model.name.toLowerCase().includes('llava') ||
                           model.name.toLowerCase().includes('multimodal');
                                  
        const modelData: Model = {
          id: model.name,
          name: modelName,
          provider: 'Ollama',
          type: computedType, // 使用检测到的模型类型
          description: `本地Ollama模型${model.details?.family ? ` (${model.details.family})` : ''}`,
          isActive: false,
          isFavorite: false,
          parameters: {
            temperature: 0.7,
            topP: 0.9,
          },
          maxTokens: 4096,
          supportedLanguages,
          lastUsed: undefined,
          costPerRequest: 0,
          tags: ['开源', computedType === 'embedding' ? 'Embedding' : 'LLM', 'Ollama', '本地部署'],
          modelSize,
          contextLength: '8K',
          multimodal: isMultimodal
        };
        
        console.log(`[Ollama模型${index+1}] ${modelName} (${computedType}) - ID: ${model.name}`);
        return modelData;
      });
      
      console.log(`从Ollama获取了${ollamaModels.length}个模型:`, 
        ollamaModels.map(m => `${m.name} (${m.type})`));
      
      // 打印分类统计
      const llmCount = ollamaModels.filter(m => m.type === 'llm').length;
      const embeddingCount = ollamaModels.filter(m => m.type === 'embedding').length;
      console.log(`Ollama模型分类: LLM模型: ${llmCount}, 嵌入模型: ${embeddingCount}`);
      
      // 如果类型分布异常，打印警告
      if (llmCount === 0 && ollamaModels.length > 0) {
        console.warn('警告: 所有Ollama模型都被归类为嵌入模型！这可能是错误的分类。');
      }
      if (embeddingCount === 0 && ollamaModels.length > 0) {
        console.warn('警告: 所有Ollama模型都被归类为LLM模型！如果您有嵌入模型，可能是错误的分类。');
      }
      
      // 如果原本没有激活的模型，为第一个模型设置默认激活
      if (ollamaModels.length > 0) {
        // 为第一个LLM模型设置默认激活
        const firstLlm = ollamaModels.find(m => m.type === 'llm');
        if (firstLlm) {
          firstLlm.isActive = true;
          console.log('默认激活Ollama LLM模型:', firstLlm.name);
        }
        
        // 为第一个embedding模型设置默认激活
        const firstEmbedding = ollamaModels.find(m => m.type === 'embedding');
        if (firstEmbedding) {
          firstEmbedding.isActive = true;
          console.log('默认激活Ollama嵌入模型:', firstEmbedding.name);
        }
      }
      
      return ollamaModels;
    } catch (error) {
      console.error('获取Ollama模型失败:', error);
      return [];
    }
  },
  
  fetchModels: async () => {
    set({ loading: true, error: null });
    
    try {
      // 保留之前的收藏状态和激活状态
      const currentModels = get().models;
      const favoriteIds = currentModels
        .filter(model => model.isFavorite)
        .map(model => model.id);
      const currentActiveLlm = currentModels.find(model => model.type === 'llm' && model.isActive)?.id;
      const currentActiveEmbedding = currentModels.find(model => model.type === 'embedding' && model.isActive)?.id;
      
      console.log('当前激活的模型: LLM:', currentActiveLlm, 'Embedding:', currentActiveEmbedding);
      
      // 获取本地部署的Ollama模型
      let ollamaModels: Model[] = [];
      const ollamaServer = localStorage.getItem('ollamaServer');
      
      if (ollamaServer) {
        console.log('尝试从Ollama服务器获取模型:', ollamaServer);
        try {
          // 先测试服务器连接
          const versionResponse = await fetch(`${ollamaServer}/api/version`, {
            method: 'GET',
            headers: {
              'Accept': 'application/json',
            },
            cache: 'no-store'
          });
          
          if (versionResponse.ok) {
            const versionData = await versionResponse.json();
            console.log('Ollama服务器连接成功，版本:', versionData.version);
            
            // 然后获取模型
            ollamaModels = await get().fetchOllamaModels();
            console.log(`成功获取到${ollamaModels.length}个Ollama模型`);
            
            // 打印所有Ollama模型名称，便于调试
            if (ollamaModels.length > 0) {
              console.log('Ollama模型详情:');
              ollamaModels.forEach((model, index) => {
                console.log(`[${index + 1}] ${model.name} (${model.type}): ID=${model.id}`);
              });
            }
          } else {
            console.warn('Ollama服务器连接测试失败:', 
              versionResponse.status, versionResponse.statusText);
          }
        } catch (error) {
          console.warn('获取Ollama模型失败:', error);
        }
      } else {
        console.log('未配置Ollama服务器，跳过获取Ollama模型');
      }
      
      // 合并模型列表，确保ID唯一性
      const modelMap = new Map<string, Model>();
      
      // 处理Ollama模型 - 强化类型检测
      ollamaModels.forEach(model => {
        // 多层次类型验证
        if (!model.type || (model.type !== 'llm' && model.type !== 'embedding')) {
          // 尝试从模型名称判断类型
          const modelNameLower = model.name.toLowerCase();
          
          // 1. 检查是否包含嵌入模型关键词
          const hasEmbedKeyword = EMBEDDING_MODEL_KEYWORDS.some(keyword => 
            modelNameLower.includes(keyword.toLowerCase())
          );
          
          // 2. 检查是否包含LLM关键词
          const hasLlmKeyword = LLM_MODEL_KEYWORDS.some(keyword =>
            modelNameLower.includes(keyword.toLowerCase())
          );
          
          // 综合判断类型
          let computedType: 'embedding' | 'llm';
          if (hasEmbedKeyword && !hasLlmKeyword) {
            computedType = 'embedding';
          } else if (!hasEmbedKeyword && hasLlmKeyword) {
            computedType = 'llm';
          } else {
            // 默认为LLM，除非明确包含embedding字样
            computedType = modelNameLower.includes('embed') ? 'embedding' : 'llm';
          }
          
          model.type = computedType;
          console.log(`模型类型修正: ${model.name} => ${model.type} (嵌入关键词: ${hasEmbedKeyword}, LLM关键词: ${hasLlmKeyword})`);
        }
        
        modelMap.set(model.id, {
          ...model, 
          isFavorite: favoriteIds.includes(model.id),
          isActive: (model.type === 'llm' && model.id === currentActiveLlm) || 
                   (model.type === 'embedding' && model.id === currentActiveEmbedding)
        });
      });
      
      // 转换回数组
      let allModels = Array.from(modelMap.values());
      
      // 强化类型修复 - 使用更完善的多层判断
      allModels = allModels.map(model => {
        if (!model.type || (model.type !== 'llm' && model.type !== 'embedding')) {
          const modelNameLower = model.name.toLowerCase();
          
          // 多层次判断
          const hasEmbedKeyword = EMBEDDING_MODEL_KEYWORDS.some(keyword => 
            modelNameLower.includes(keyword.toLowerCase())
          );
          
          const hasLlmKeyword = LLM_MODEL_KEYWORDS.some(keyword =>
            modelNameLower.includes(keyword.toLowerCase())
          );
          
          let computedType: 'embedding' | 'llm';
          
          if (hasEmbedKeyword && !hasLlmKeyword) {
            computedType = 'embedding';
          } else if (!hasEmbedKeyword && hasLlmKeyword) {
            computedType = 'llm';
          } else {
            // 如果无法明确判断，根据名称中是否包含"embed"字样
            computedType = modelNameLower.includes('embed') ? 'embedding' : 'llm';
          }
          
          console.log(`强制设置模型类型: ${model.name} => ${computedType}`);
          return {...model, type: computedType};
        }
        return model;
      });
      
      // 确保每种类型至少有一个激活的模型
      let hasActiveLlm = allModels.some(m => m.type === 'llm' && m.isActive);
      let hasActiveEmbedding = allModels.some(m => m.type === 'embedding' && m.isActive);
      
      // 如果没有活跃的LLM模型，设置第一个为活跃
      if (!hasActiveLlm && allModels.some(m => m.type === 'llm')) {
        const firstLlm = allModels.find(m => m.type === 'llm');
        if (firstLlm) {
          firstLlm.isActive = true;
          console.log('没有活跃的LLM模型，设置第一个LLM模型为活跃:', firstLlm.name);
          hasActiveLlm = true;
        }
      }
      
      // 如果没有活跃的Embedding模型，设置第一个为活跃
      if (!hasActiveEmbedding && allModels.some(m => m.type === 'embedding')) {
        const firstEmbedding = allModels.find(m => m.type === 'embedding');
        if (firstEmbedding) {
          firstEmbedding.isActive = true;
          console.log('没有活跃的Embedding模型，设置第一个Embedding模型为活跃:', firstEmbedding.name);
          hasActiveEmbedding = true;
        }
      }
      
      // 修复异常：如果没有LLM类型的模型，自动将第一个Ollama模型设置为LLM
      if (allModels.filter(m => m.type === 'llm').length === 0 && ollamaModels.length > 0) {
        console.log('自动修复: 未检测到任何LLM模型，尝试将Ollama模型分配为LLM...');
        // 获取第一个Ollama模型
        const firstOllamaModel = allModels.find(m => m.provider === 'Ollama');
        if (firstOllamaModel) {
          firstOllamaModel.type = 'llm';
          firstOllamaModel.isActive = true;
          console.log(`自动修复: 强制将模型 ${firstOllamaModel.name} 设置为LLM类型`);
        }
      }
      
      // 自动报告类型分布情况
      const llmCount = allModels.filter(m => m.type === 'llm').length;
      const embeddingCount = allModels.filter(m => m.type === 'embedding').length;
      console.log(`模型类型分布: 总计${allModels.length}个模型, LLM: ${llmCount}个, Embedding: ${embeddingCount}个`);
      
      // 检测类型分布异常
      if (llmCount === 0) {
        console.warn('严重警告: 所有模型都被归类为嵌入模型，没有LLM类型的模型！');
        
        // 强制修复：将第一个模型分配为LLM类型
        if (allModels.length > 0) {
          const firstModel = allModels[0];
          console.log(`强制将模型 ${firstModel.name} 设为LLM类型，确保至少有一个LLM模型`);
          firstModel.type = 'llm';
          firstModel.isActive = true;
          
          // 如果只有一个模型，则创建一个嵌入模型副本
          if (allModels.length === 1) {
            const embeddingCopy: Model = { 
              ...firstModel,
              id: `${firstModel.id}-embedding`,
              name: `${firstModel.name} (Embedding)`,
              type: 'embedding',
              isActive: true,
              tags: [...firstModel.tags.filter(t => t !== 'LLM'), 'Embedding']
            };
            console.log(`创建嵌入模型副本: ${embeddingCopy.name}`);
            allModels.push(embeddingCopy);
          }
        }
      } else if (embeddingCount === 0) {
        console.warn('警告: 所有模型都被归类为LLM模型，没有嵌入模型！');
        
        // 强制修复：将一个模型分配为嵌入类型或创建副本
        if (allModels.length > 0) {
          // 优先寻找带有"embedding"字样的模型
          const embeddingCandidate = allModels.find(m => 
            m.name.toLowerCase().includes('embed') || 
            m.name.toLowerCase().includes('text') ||
            m.name.toLowerCase().includes('vector')
          );
          
          if (embeddingCandidate) {
            console.log(`强制将模型 ${embeddingCandidate.name} 设为嵌入类型`);
            embeddingCandidate.type = 'embedding';
            embeddingCandidate.isActive = true;
          } else {
            // 如果没有找到合适的候选，创建一个副本
            const firstLlm = allModels.find(m => m.type === 'llm');
            if (firstLlm) {
              const embeddingCopy: Model = {
                ...firstLlm,
                id: `${firstLlm.id}-embedding`,
                name: `${firstLlm.name} (Embedding)`,
                type: 'embedding',
                isActive: true,
                tags: [...firstLlm.tags.filter(t => t !== 'LLM'), 'Embedding']
              };
              console.log(`创建嵌入模型副本: ${embeddingCopy.name}`);
              allModels.push(embeddingCopy);
            }
          }
        }
      }
      
      // 确保有正确的激活模型
      const finalLlmCount = allModels.filter(m => m.type === 'llm').length;
      const finalEmbeddingCount = allModels.filter(m => m.type === 'embedding').length;
      console.log(`调整后模型分布: LLM: ${finalLlmCount}个, Embedding: ${finalEmbeddingCount}个`);
      
      // 再次确保至少有一个模型被激活
      const hasActiveLlmAfterFix = allModels.some(m => m.type === 'llm' && m.isActive);
      if (!hasActiveLlmAfterFix && finalLlmCount > 0) {
        const firstLlm = allModels.find(m => m.type === 'llm');
        if (firstLlm) {
          firstLlm.isActive = true;
          console.log('激活第一个LLM模型:', firstLlm.name);
        }
      }
      
      const hasActiveEmbeddingAfterFix = allModels.some(m => m.type === 'embedding' && m.isActive);
      if (!hasActiveEmbeddingAfterFix && finalEmbeddingCount > 0) {
        const firstEmbedding = allModels.find(m => m.type === 'embedding');
        if (firstEmbedding) {
          firstEmbedding.isActive = true;
          console.log('激活第一个嵌入模型:', firstEmbedding.name);
        }
      }
      
      // 输出调试信息
      const finalActiveLlm = allModels.find(m => m.type === 'llm' && m.isActive);
      const finalActiveEmbedding = allModels.find(m => m.type === 'embedding' && m.isActive);
      
      console.log('最终激活的模型:',
        'LLM:', finalActiveLlm ? `${finalActiveLlm.name} (${finalActiveLlm.id})` : '无',
        'Embedding:', finalActiveEmbedding ? `${finalActiveEmbedding.name} (${finalActiveEmbedding.id})` : '无'
      );
      
      set({ models: allModels, loading: false });
      
      // 触发自定义事件，通知模型类型已更新
      document.dispatchEvent(new CustomEvent('models-type-updated', { 
        detail: { 
          totalCount: allModels.length,
          llmCount,
          embeddingCount,
          ollamaCount: ollamaModels.length
        } 
      }));
      
    } catch (error) {
      set({ loading: false, error: 'Failed to fetch models' });
      console.error('获取模型列表失败:', error);
    }
  },
  
  refreshModels: async () => {
    document.dispatchEvent(new CustomEvent('models-refresh-start'));
    set({ loading: true });
    
    try {
      // 保存当前收藏状态和激活状态
      const currentModels = get().models;
      const favoriteIds = currentModels
        .filter(model => model.isFavorite)
        .map(model => model.id);
      const currentActiveLlm = currentModels.find(model => model.type === 'llm' && model.isActive)?.id;
      const currentActiveEmbedding = currentModels.find(model => model.type === 'embedding' && model.isActive)?.id;
      
      console.log('刷新前的状态: 收藏模型', favoriteIds, '激活LLM', currentActiveLlm, '激活Embedding', currentActiveEmbedding);
      
      // 先清空模型列表，确保UI会更新
      set({ models: [], loading: true });
      
      // 重新获取所有模型
      await get().fetchModels();
      
      // 获取刷新后的模型列表
      const refreshedModels = get().models;
      
      // 恢复收藏状态
      const updatedModels = refreshedModels.map(model => ({
        ...model,
        isFavorite: favoriteIds.includes(model.id) || model.isFavorite,
        // 恢复激活状态
        isActive: (model.type === 'llm' && model.id === currentActiveLlm) || 
                 (model.type === 'embedding' && model.id === currentActiveEmbedding) ||
                 model.isActive
      }));
      
      console.log('刷新后的模型数量:', updatedModels.length);
      console.log('LLM模型:', updatedModels.filter(m => m.type === 'llm').map(m => m.name));
      console.log('嵌入模型:', updatedModels.filter(m => m.type === 'embedding').map(m => m.name));
      
      // 确保至少有一个激活的LLM和Embedding模型
      let hasActiveLlm = updatedModels.some(m => m.type === 'llm' && m.isActive);
      let hasActiveEmbedding = updatedModels.some(m => m.type === 'embedding' && m.isActive);
      
      const finalModels = [...updatedModels];
      
      // 自动修复: 如果某种类型模型为空，检查是否有分类错误
      const llmModelsCount = finalModels.filter(m => m.type === 'llm').length;
      const embeddingModelsCount = finalModels.filter(m => m.type === 'embedding').length;
      
      // 如果只有一种类型的模型，检查是否有分类错误
      if (llmModelsCount === 0 && embeddingModelsCount > 0) {
        console.warn('自动修复: 没有LLM模型，尝试将第一个Ollama模型重新分类为LLM');
        // 将第一个Ollama模型设为LLM
        const ollamaModel = finalModels.find(m => m.provider === 'Ollama');
        if (ollamaModel) {
          ollamaModel.type = 'llm';
          ollamaModel.isActive = true;
          console.log(`自动修复: 将模型 ${ollamaModel.name} 重新分类为LLM类型`);
          hasActiveLlm = true;
        }
      } else if (embeddingModelsCount === 0 && llmModelsCount > 0) {
        console.warn('自动修复: 没有嵌入模型，尝试检测一个可能的嵌入模型');
        // 尝试根据名称查找潜在的嵌入模型
        const possibleEmbedding = finalModels.find(m => 
          m.name.toLowerCase().includes('embed') || 
          m.name.toLowerCase().includes('text-') ||
          m.name.toLowerCase().includes('ada')
        );
        
        if (possibleEmbedding) {
          possibleEmbedding.type = 'embedding';
          possibleEmbedding.isActive = true;
          console.log(`自动修复: 将模型 ${possibleEmbedding.name} 重新分类为嵌入模型`);
          hasActiveEmbedding = true;
        }
      }

      // 强制修复：确保每种类型至少有一个模型
      // 检查修复后的模型类型分布
      let llmCount = finalModels.filter(m => m.type === 'llm').length;
      let embedCount = finalModels.filter(m => m.type === 'embedding').length;
      
      // 如果仍然没有LLM模型，强制创建一个
      if (llmCount === 0) {
        console.warn('强制修复: 没有LLM模型，尝试创建一个');
        
        if (finalModels.length > 0) {
          // 尝试找一个合适的模型转换
          const ollamaModel = finalModels.find(m => m.provider === 'Ollama');
          if (ollamaModel) {
            ollamaModel.type = 'llm';
            ollamaModel.isActive = true;
            console.log(`强制将模型 ${ollamaModel.name} 设为LLM类型`);
            hasActiveLlm = true;
          } else {
            // 如果没有Ollama模型，使用第一个模型
            const firstModel = finalModels[0];
            // 创建LLM副本避免修改原模型
            const llmCopy: Model = { 
              ...firstModel,
              id: `${firstModel.id}-llm`,
              name: `${firstModel.name} (LLM)`,
              type: 'llm',
              isActive: true,
              tags: [...firstModel.tags.filter(t => t !== 'Embedding'), 'LLM']
            };
            console.log(`创建LLM模型: ${llmCopy.name}`);
            finalModels.push(llmCopy);
            hasActiveLlm = true;
          }
          // 更新计数
          llmCount = finalModels.filter(m => m.type === 'llm').length;
        }
      }
      
      // 如果仍然没有嵌入模型，强制创建一个
      if (embedCount === 0) {
        console.warn('强制修复: 没有嵌入模型，尝试创建一个');
        
        // 首先尝试找一个合适的模型转换
        const embeddingCandidate = finalModels.find(m => 
          m.name.toLowerCase().includes('embed') || 
          m.name.toLowerCase().includes('text') ||
          m.name.toLowerCase().includes('vector')
        );
        
        if (embeddingCandidate && embeddingCandidate.type !== 'llm') {
          embeddingCandidate.type = 'embedding';
          embeddingCandidate.isActive = true;
          console.log(`强制将模型 ${embeddingCandidate.name} 设为嵌入类型`);
          hasActiveEmbedding = true;
        } else if (finalModels.length > 0) {
          // 如果没有找到合适候选，使用第一个LLM模型创建副本
          const llmModel = finalModels.find(m => m.type === 'llm');
          if (llmModel) {
            const embeddingCopy: Model = {
              ...llmModel,
              id: `${llmModel.id}-embedding`,
              name: `${llmModel.name} (Embedding)`,
              type: 'embedding',
              isActive: true,
              tags: [...llmModel.tags.filter(t => t !== 'LLM'), 'Embedding']
            };
            console.log(`创建嵌入模型: ${embeddingCopy.name}`);
            finalModels.push(embeddingCopy);
            hasActiveEmbedding = true;
          }
        }
        // 更新计数
        embedCount = finalModels.filter(m => m.type === 'embedding').length;
      }
      
      // 打印最终模型分布
      console.log(`最终模型分布: 总计${finalModels.length}个模型, LLM: ${llmCount}个, Embedding: ${embedCount}个`);
      
      if (!hasActiveLlm && finalModels.some(m => m.type === 'llm')) {
        const firstLlm = finalModels.find(m => m.type === 'llm');
        if (firstLlm) {
          firstLlm.isActive = true;
          console.log('设置默认激活LLM:', firstLlm.name);
        }
      }
      
      if (!hasActiveEmbedding && finalModels.some(m => m.type === 'embedding')) {
        const firstEmbedding = finalModels.find(m => m.type === 'embedding');
        if (firstEmbedding) {
          firstEmbedding.isActive = true;
          console.log('设置默认激活Embedding:', firstEmbedding.name);
        }
      }
      
      // 更新状态
      set({ models: finalModels, loading: false });
      
      // 延迟触发模型刷新完成事件，确保UI有时间更新
      setTimeout(() => {
        document.dispatchEvent(new CustomEvent('models-refresh-complete', {
          detail: {
            modelCount: finalModels.length,
            llmModels: finalModels.filter(m => m.type === 'llm'),
            embeddingModels: finalModels.filter(m => m.type === 'embedding'),
            activeLlm: finalModels.find(m => m.type === 'llm' && m.isActive)?.id,
            activeEmbedding: finalModels.find(m => m.type === 'embedding' && m.isActive)?.id
          }
        }));
      }, 200);
    } catch (error) {
      set({ loading: false, error: 'Failed to refresh models' });
      console.error('刷新模型列表失败:', error);
    }
  },
  
  setActiveLlm: (modelId: string) => {
    const { models } = get();
    const model = models.find(m => m.id === modelId);
    
    if (!model || model.type !== 'llm') {
      console.error('无法激活LLM模型，ID无效或类型不匹配:', modelId);
      return;
    }
    
    console.log('正在激活LLM模型:', model.name, model.id);
    
    // 首先，将所有LLM模型设为非活跃
    const updatedModels = models.map(m => {
      if (m.type === 'llm') {
        return { ...m, isActive: m.id === modelId };
      }
      return m;
    });
    
    set({ models: updatedModels });
    
    // 保存激活的模型到本地存储
    localStorage.setItem(STORAGE_KEY_ACTIVE_LLM, modelId);
    console.log('LLM模型已激活并保存到本地存储:', model.name);
    
    // 触发自定义事件通知其他组件LLM模型已更改
    document.dispatchEvent(new CustomEvent('llm-model-changed', { 
      detail: { modelId, modelName: model.name } 
    }));
  },
  
  setActiveEmbedding: (modelId: string) => {
    const { models } = get();
    const model = models.find(m => m.id === modelId);
    
    if (!model || model.type !== 'embedding') {
      console.error('无法激活Embedding模型，ID无效或类型不匹配:', modelId);
      return;
    }
    
    console.log('正在激活Embedding模型:', model.name, model.id);
    
    // 首先，将所有Embedding模型设为非活跃
    const updatedModels = models.map(m => {
      if (m.type === 'embedding') {
        return { ...m, isActive: m.id === modelId };
      }
      return m;
    });
    
    set({ models: updatedModels });
    
    // 保存激活的模型到本地存储
    localStorage.setItem(STORAGE_KEY_ACTIVE_EMBEDDING, modelId);
    console.log('Embedding模型已激活并保存到本地存储:', model.name);
    
    // 触发自定义事件通知其他组件Embedding模型已更改
    document.dispatchEvent(new CustomEvent('embedding-model-changed', { 
      detail: { modelId, modelName: model.name } 
    }));
  },
  
  setActiveModel: (modelId: string) => {
    const model = get().models.find(m => m.id === modelId);
    if (!model) return;
    
    if (model.type === 'llm') {
      get().setActiveLlm(modelId);
    } else if (model.type === 'embedding') {
      get().setActiveEmbedding(modelId);
    }
  },
  
  toggleFavorite: (modelId: string, modelType?: string) => {
    const { models } = get();
    
    const updatedModels = models.map(model => {
      if (model.id === modelId) {
        return { ...model, isFavorite: !model.isFavorite };
      }
      return model;
    });
    
    set({ models: updatedModels });
    
    // 保存收藏状态
    setTimeout(() => {
      get().saveActiveModelsToStorage();
    }, 100);
  },
  
  updateModelConfig: async (modelId: string, config: Partial<Model>) => {
    const { models } = get();
    
    const updatedModels = models.map(model => {
      if (model.id === modelId) {
        return { ...model, ...config };
      }
      return model;
    });
    
    set({ models: updatedModels });
    return Promise.resolve();
  }
})); 
