Page({
  data: {
    currentTab: 'vram', // 默认显示显存计算页面
    // 场景控制
    scene: 'inference', // 'inference' 或 'training'
    
    // 模型参数
    modelSize: '7', // 模型参数量 (B)
    
    // 推理参数
    inferPrecisions: [
      { label: 'FP32 (32-bit)', value: 4, quantRatio: 1.0 },
      { label: 'FP16 (16-bit)', value: 2, quantRatio: 1.0 },
      { label: 'INT8 (8-bit)', value: 1, quantRatio: 1.0 },
      { label: 'INT4 (4-bit)', value: 0.5, quantRatio: 1.0 }
    ],
    inferPrecision: { label: 'FP16 (16-bit)', value: 2, quantRatio: 1.0 },
    inferContextLen: '2048',
    inferBatchSize: '1',
    
    // 训练参数
    trainModes: [
      { label: '全量微调（Full Fine-tuning）', value: 'full' },
      { label: 'LoRA（低秩适应，省显存）', value: 'lora' },
      { label: 'QLoRA（LoRA+量化，更省显存）', value: 'qlora' }
    ],
    trainMode:  { label: 'LoRA（低秩适应，省显存）', value: 'lora' },
    
    trainPrecisions: [
      { label: 'FP32 (32-bit)', value: 4, quantRatio: 1.0 },
      { label: 'FP16 (16-bit)', value: 2, quantRatio: 1.0 },
      { label: 'BF16 (16-bit)', value: 2, quantRatio: 1.0 }
    ],
    trainPrecision: { label: 'FP16 (16-bit)', value: 2, quantRatio: 1.0 },
    
    optimizers: [
      { label: 'Adam/AdamW', value: 'adam', factor: 8 },
      { label: 'SGD', value: 'sgd', factor: 4 },
      { label: 'Adam8bit', value: 'adam8', factor: 4 },
      { label: 'Lion', value: 'lion', factor: 4 }
    ],
    optimizerType: { label: 'Adam/AdamW', value: 'adam', factor: 8 },
    
    trainContextLen: '2048',
    trainBatchSize: '4',
    trainEpoch: '3',
    
    // LoRA 参数
    loraRanks: [
      { label: '8', value: 8 },
      { label: '16', value: 16 },
      { label: '32', value: 32 },
      { label: '64', value: 64 }
    ],
    loraRank: { label: '8', value: 8 },
    
    loraLayerRatios: [
      { label: '25%', value: 0.25 },
      { label: '50%', value: 0.5 },
      { label: '75%', value: 0.75 },
      { label: '100%', value: 1.0 }
    ],
    loraLayerRatio: { label: '100%', value: 1.0 },
    
    // QLoRA 参数
    qloraQuants: [
      { label: '4-bit', value: 0.5 },
      { label: '3-bit', value: 0.375 },
      { label: '2-bit', value: 0.25 }
    ],
    qloraQuant: { label: '4-bit', value: 0.5 },
    
    // 高级设置控制
    showAdvanced: false,
    
    // 计算状态
    calculating: false,
    
    // 结果数据
    totalVRAM: '0',
    vramSeverityClass: '',
    
    // 显存构成
    staticVRAM: '0',      // 静态显存（模型参数）
    activationVRAM: '0',  // 激活值显存
    kvCacheVRAM: '0',     // KV缓存显存
    optimizerVRAM: '0',   // 优化器显存
    gradientVRAM: '0',    // 梯度显存
    reserveVRAM: '0',     // 预留显存
    
    // 显存占比
    staticProgress: 0,
    activationProgress: 0,
    kvCacheProgress: 0,
    optimizerProgress: 0,
    gradientProgress: 0,
    reserveProgress: 0,
    
    // GPU推荐
    gpuRecommendations: [],
    messages: [], // AI资讯对话消息
    inputValue: '', // 输入框值
    loading: false, // 加载状态
    scrollTop: 0 // 滚动位置
  },

  onNewsInput: function(e) {
    this.setData({
      inputValue: e.detail.value
    });
  },
  // 添加导航切换方法
  onNavTabTap: function(e) {
    const tab = e.currentTarget.dataset.tab;
    if (tab === 'news') {
      // 点击AI资讯tab时直接跳转到news页面
      wx.navigateTo({
        url: '/pages/news/news'
      });
    } else {
      // 显存计算页面，正常切换tab
      this.setData({
        currentTab: tab
      });
    }
  },
  // 场景切换处理 - 增加结果重置逻辑
  onSceneTap(e) {
    const scene = e.currentTarget.dataset.scene;
    // 只有当场景确实改变时才重置结果
    if (scene !== this.data.scene) {
      this.setData({
        scene: scene,
        // 重置所有计算结果相关数据
        totalVRAM: '0',
        vramSeverityClass: '',
        staticVRAM: '0',
        activationVRAM: '0',
        kvCacheVRAM: '0',
        optimizerVRAM: '0',
        gradientVRAM: '0',
        reserveVRAM: '0',
        staticProgress: 0,
        activationProgress: 0,
        kvCacheProgress: 0,
        optimizerProgress: 0,
        gradientProgress: 0,
        reserveProgress: 0,
        gpuRecommendations: []
      });
    }
  },

  // 切换高级设置显示
  toggleAdvanced() {
    const showAdvanced = !this.data.showAdvanced;
    this.setData({
      showAdvanced: showAdvanced
    });
  },

  // 获取模型架构参数（根据模型大小）
  getModelArchitecture(modelSize) {
    let hiddenDim, numHeads;
    if (modelSize < 10) { // 小模型（如7B）
      hiddenDim = 4096;
      numHeads = 32;
    } else if (modelSize < 100) { // 中模型（如30B-70B）
      hiddenDim = 6144;
      numHeads = 48;
    } else { // 大模型（如100B以上）
      hiddenDim = 8192;
      numHeads = 64;
    }
    return { hiddenDim, numHeads, headDim: hiddenDim / numHeads };
  },

  // 计算系统开销
  calculateSystemOverhead(modelSize, isTraining) {
    if (isTraining) {
      return modelSize < 10 ? 2 : modelSize < 100 ? 4 : 8;
    } else {
      return modelSize < 10 ? 1 : modelSize < 100 ? 2 : 4;
    }
  },

  // 计算显存需求
  calculateVRAM() {
    // 设置计算状态
    this.setData({
      calculating: true
    });
    
    // 使用 setTimeout 模拟异步计算过程
    setTimeout(() => {
      // 获取输入参数
      const modelSize = parseFloat(this.data.modelSize) || 0;
      const contextLen = parseInt(this.data.scene === 'inference' ? this.data.inferContextLen : this.data.trainContextLen) || 0;
      const batchSize = parseInt(this.data.scene === 'inference' ? this.data.inferBatchSize : this.data.trainBatchSize) || 0;
      
      // 初始化变量
      let totalVRAM = 0;
      let staticVRAM = 0;
      let activationVRAM = 0;
      let kvCacheVRAM = 0;
      let optimizerVRAM = 0;
      let gradientVRAM = 0;
      let reserveVRAM = 0;
      
      // 获取模型架构参数
      const { hiddenDim, numHeads, headDim } = this.getModelArchitecture(modelSize);
      
      if (this.data.scene === 'inference') {
        // 推理场景计算（完全保留原逻辑）
        const precision = this.data.inferPrecision;
        const precisionBytes = precision.value;
        const quantRatio = precision.quantRatio;
        
        // 1. 静态显存：模型参数（考虑参数密度和量化）
        let paramDensity;
        if (modelSize < 10) { // 小模型（7B）
          paramDensity = 1.05;
        } else if (modelSize < 100) { // 中模型（32B）
          paramDensity = 1.1;
        } else { // 大模型（120B+）
          paramDensity = 1.2;
        }
        staticVRAM = modelSize * precisionBytes * paramDensity * quantRatio;
        
        // 2. 激活值显存
        activationVRAM = (hiddenDim * contextLen * batchSize * 4 * precisionBytes * quantRatio) / (1024 ** 3);
        
        // 3. KV Cache：注意力机制缓存
        kvCacheVRAM = (2 * numHeads * headDim * contextLen * batchSize * precisionBytes * quantRatio) / (1024 ** 3);
        
        // 4. 系统预留显存
        reserveVRAM = this.calculateSystemOverhead(modelSize, false);
        
        // 总显存
        totalVRAM = staticVRAM + activationVRAM + kvCacheVRAM + reserveVRAM;
      } else {
        // 训练场景计算（优化后）
        const trainMode = this.data.trainMode.value;
        const precision = this.data.trainPrecision;
        const precisionBytes = precision.value; // 模型参数和梯度的精度字节
        const optimizerType = this.data.optimizerType.value;
        
        // 估算模型层数（根据模型大小）
        let numLayers;
        if (modelSize < 10) {
          numLayers = 32; // 7B模型通常约32层
        } else if (modelSize < 100) {
          numLayers = 40; // 30-70B模型通常约40层
        } else {
          numLayers = 80; // 100B+模型通常约80层
        }
        
        // 根据训练模式计算有效模型大小
        let effectiveModelSize = modelSize;
        if (trainMode === 'lora') {
          // LoRA场景：仅更新部分参数
          const loraRank = this.data.loraRank.value;
          const layerRatio = this.data.loraLayerRatio.value;
          // LoRA参数约为 (2 * rank * hidden_dim * 4) * 适配层数
          const loraParamFactor = (2 * loraRank * hiddenDim * 4 * layerRatio) / (modelSize * 1e9);
          effectiveModelSize = modelSize * loraParamFactor;
        } else if (trainMode === 'qlora') {
          // QLoRA场景：量化+LoRA
          const loraRank = this.data.loraRank.value;
          const layerRatio = this.data.loraLayerRatio.value;
          const quantRatio = this.data.qloraQuant.value;
          
          // QLoRA参数约为 LoRA参数 * 量化比例
          const loraParamFactor = (2 * loraRank * hiddenDim * 4 * layerRatio) / (modelSize * 1e9);
          effectiveModelSize = modelSize * loraParamFactor * quantRatio;
        }
        // 全量微调时 effectiveModelSize 保持为 modelSize
        
        // 1. 基础模型显存（静态）- 使用模型精度
        staticVRAM = modelSize * precisionBytes;
        
        // 2. 优化器状态显存 - 关键修正：使用FP32精度(4字节)存储优化器状态
        // 参考官方实现：Adam/AdamW需要存储2组状态，其他优化器根据类型调整
        let optimizerStateCount = 0;
        switch(optimizerType) {
          case 'adam':
          case 'adamw':
            optimizerStateCount = 2; // 一阶矩和二阶矩
            break;
          case 'sgd':
            optimizerStateCount = 1; // 仅动量
            break;
          case 'adam8bit':
            optimizerStateCount = 2; // 仍为2组状态，但使用8bit存储
            break;
          case 'lion':
            optimizerStateCount = 1; // 仅一组状态
            break;
        }
        
        // 计算优化器显存 - 官方推荐AdamW使用FP32存储优化器状态
        const optimizerPrecisionBytes = optimizerType === 'adam8bit' ? 1 : 4;
        optimizerVRAM = effectiveModelSize * optimizerStateCount * optimizerPrecisionBytes;
        
        // 3. 梯度显存 - 与模型参数同精度
        gradientVRAM = effectiveModelSize * precisionBytes;
        
        // 4. 激活值显存 - 优化计算，更接近官方值
        // 考虑：层数、激活函数、残差连接等因素
        const activationFactor = trainMode === 'full' ? 3.0 : 1.8; // 全量微调激活值更高
        activationVRAM = (hiddenDim * contextLen * batchSize * numLayers * 
                        precisionBytes * activationFactor) / (1024 **3);
        
        // 5. KV Cache显存
        kvCacheVRAM = (2 * numHeads * headDim * contextLen * batchSize * precisionBytes) / (1024** 3);
        
        // 6. 系统预留显存 - 增加框架和CUDA开销
        reserveVRAM = this.calculateSystemOverhead(modelSize, true) + 2; // 额外2GB框架开销
        
        // 总显存
        totalVRAM = staticVRAM + optimizerVRAM + gradientVRAM + 
                   activationVRAM + kvCacheVRAM + reserveVRAM;
      }
      
      // 确保最小值为0
      totalVRAM = Math.max(0, totalVRAM);
      staticVRAM = Math.max(0, staticVRAM);
      activationVRAM = Math.max(0, activationVRAM);
      kvCacheVRAM = Math.max(0, kvCacheVRAM);
      optimizerVRAM = Math.max(0, optimizerVRAM);
      gradientVRAM = Math.max(0, gradientVRAM);
      reserveVRAM = Math.max(0, reserveVRAM);
      
      // 计算各部分占比
      const total = totalVRAM || 1; // 避免除以零
      const staticProgress = Math.round((staticVRAM / total) * 100);
      const activationProgress = Math.round((activationVRAM / total) * 100);
      const kvCacheProgress = Math.round((kvCacheVRAM / total) * 100);
      const optimizerProgress = Math.round((optimizerVRAM / total) * 100);
      const gradientProgress = Math.round((gradientVRAM / total) * 100);
      const reserveProgress = Math.round((reserveVRAM / total) * 100);
      
      // 根据显存大小设置严重程度类
      let vramSeverityClass = '';
      if (totalVRAM < 20) {
        vramSeverityClass = 'low';
      } else if (totalVRAM < 60) {
        vramSeverityClass = 'medium';
      } else {
        vramSeverityClass = 'high';
      }
      
      // GPU推荐
      const gpuOptions = [
        { model: 'RTX 4090', memory: 24, type: 'consumer' },
        { model: 'RTX 4080', memory: 16, type: 'consumer' },
        { model: 'RTX 3090/3090Ti', memory: 24, type: 'consumer' },
        { model: 'RTX A6000', memory: 48, type: 'workstation' },
        { model: 'A100 40GB', memory: 40, type: 'datacenter' },
        { model: 'A100 80GB', memory: 80, type: 'datacenter' },
        { model: 'H100 80GB', memory: 80, type: 'datacenter' },
        { model: 'H100 160GB', memory: 160, type: 'datacenter' },
        { model: 'L40S', memory: 48, type: 'datacenter' }
      ];
      
      // 根据场景和显存需求排序推荐
      const gpuRecommendations = gpuOptions
        .map(gpu => ({
          ...gpu,
          count: Math.ceil(totalVRAM / gpu.memory)
        }))
        .sort((a, b) => {
          if (a.count === 1 && b.count > 1) return -1;
          if (a.count > 1 && b.count === 1) return 1;
          return (a.memory * a.count - totalVRAM) - (b.memory * b.count - totalVRAM);
        })
        .slice(0, 5);
      
      // 更新数据
      this.setData({
        totalVRAM: totalVRAM.toFixed(2),
        vramSeverityClass: vramSeverityClass,
        staticVRAM: staticVRAM.toFixed(2),
        activationVRAM: activationVRAM.toFixed(2),
        kvCacheVRAM: kvCacheVRAM.toFixed(2),
        optimizerVRAM: optimizerVRAM.toFixed(2),
        gradientVRAM: gradientVRAM.toFixed(2),
        reserveVRAM: reserveVRAM.toFixed(2),
        staticProgress: staticProgress,
        activationProgress: activationProgress,
        kvCacheProgress: kvCacheProgress,
        optimizerProgress: optimizerProgress,
        gradientProgress: gradientProgress,
        reserveProgress: reserveProgress,
        gpuRecommendations: gpuRecommendations,
        calculating: false
      });
    }, 300); // 模拟计算延迟
  },

  // 输入处理函数（保持不变）
  onModelSizeChange(e) {
    this.setData({
      modelSize: e.detail.value
    });
  },

  onInferPrecisionChange(e) {
    this.setData({
      inferPrecision: this.data.inferPrecisions[e.detail.value]
    });
  },

  onInferContextLenChange(e) {
    this.setData({
      inferContextLen: e.detail.value
    });
  },

  onInferBatchSizeChange(e) {
    this.setData({
      inferBatchSize: e.detail.value
    });
  },

  onTrainModeChange(e) {
    this.setData({
      trainMode: this.data.trainModes[e.detail.value]
    });
  },

  onTrainPrecisionChange(e) {
    this.setData({
      trainPrecision: this.data.trainPrecisions[e.detail.value]
    });
  },

  onOptimizerTypeChange(e) {
    this.setData({
      optimizerType: this.data.optimizers[e.detail.value]
    });
  },

  onTrainContextLenChange(e) {
    this.setData({
      trainContextLen: e.detail.value
    });
  },

  onTrainBatchSizeChange(e) {
    this.setData({
      trainBatchSize: e.detail.value
    });
  },

  onLoraRankChange(e) {
    this.setData({
      loraRank: this.data.loraRanks[e.detail.value]
    });
  },

  onLoraLayerRatioChange(e) {
    this.setData({
      loraLayerRatio: this.data.loraLayerRatios[e.detail.value]
    });
  },

  onQloraQuantChange(e) {
    this.setData({
      qloraQuant: this.data.qloraQuants[e.detail.value]
    });
  },

  onTrainEpochChange(e) {
    this.setData({
      trainEpoch: e.detail.value
    });
  },

  // 分享功能（保持不变）
  onShareAppMessage: function(res) {
    return {
      title: '大模型显存计算器',
      path: '/pages/index/index',
      imageUrl: '/images/share-image.png'
    }
  },

  onShareTimeline: function() {
    return {
      title: '大模型显存计算器',
      query: '',
      imageUrl: '/images/moment-image.png'
    }
  }
});
