import type { Tensor, InferenceSession } from 'onnxruntime-web';

export interface PredictionResult {
  label: string;
  confidence: number;
  emoji: string;
}

const LABELS = ['bird', 'boat', 'fish', 'other'];
const EMOJIS = ['🐦', '🚢', '🐟', '❓'];

let isONNXInitialized = false;
let ORT: any | null = null;
let modelSession: InferenceSession | null = null;

// 初始化ONNX Runtime Web环境
async function initializeONNX() {
  if (isONNXInitialized && modelSession) {
    return modelSession;
  }
  
  try {
    console.log('开始初始化ONNX Runtime Web...');
    // 动态导入，避免打包器对 ESM/UMD 导出处理差异导致 undefined
    if (!ORT) {
      // 优先导入明确的 wasm 后端入口
      let ortModule: any;
      try {
        ortModule = await import('onnxruntime-web/wasm');
      } catch (e) {
        console.warn('按 onnxruntime-web/wasm 导入失败，回退到主入口:', e);
        try {
          ortModule = await import('onnxruntime-web');
        } catch (e2) {
          console.warn('按 onnxruntime-web 主入口导入失败，回退到 UMD:', e2);
        }
      }
      ORT = ortModule?.default ?? ortModule;

      // 如果ESM导入为空，尝试UMD全局变量
      if (!ORT || Object.keys(ORT).length === 0) {
        try {
          const umdUrl = 'https://cdn.jsdelivr.net/npm/onnxruntime-web@1.22.0/dist/ort.js';
          // 通过 CDN 引入 UMD 版本，避免类型校验与路径解析问题
          // @ts-ignore
          await import(/* @vite-ignore */ umdUrl);
          const globalOrt = (globalThis as any)?.ort;
          if (globalOrt) {
            ORT = globalOrt;
          }
        } catch (e3) {
          console.warn('按 UMD 方式导入失败:', e3);
        }
      }
    }
    
    // 配置WASM环境
    // 明确指定wasm资源路径，避免在某些打包环境下找不到.wasm文件
    if ((ORT as any)?.env?.wasm) {
      // 使用CDN以简化资源部署；如需本地部署，可将以下路径改为 '/'
      ORT.env.wasm.wasmPaths = 'https://cdn.jsdelivr.net/npm/onnxruntime-web@1.22.0/dist/';
      ORT.env.wasm.numThreads = 1;
      ORT.env.wasm.simd = false; // 如需SIMD可检测支持后置为true
      ORT.env.wasm.proxy = false;
    } else {
      console.warn('ort.env.wasm 不可用，将继续尝试默认配置');
    }
    
    // 创建推理会话
    console.log('正在加载模型...');
    // 兼容Vite部署的基础路径
    const modelUrl = `${import.meta.env.BASE_URL || '/'}model/model.onnx`;
    const InferenceSessionCtor = ORT?.InferenceSession;
    if (!InferenceSessionCtor) {
      console.error('ORT keys:', ORT ? Object.keys(ORT) : []);
      throw new Error('InferenceSession 未定义，onnxruntime-web 导入失败');
    }
    if (typeof InferenceSessionCtor.create === 'function') {
      modelSession = await InferenceSessionCtor.create(modelUrl, {
        executionProviders: ['wasm'],
        graphOptimizationLevel: 'all'
      });
    } else {
      // 备用：某些版本支持实例化后 loadModel
      const session = new InferenceSessionCtor({
        executionProviders: ['wasm'],
        graphOptimizationLevel: 'all'
      });
      if (typeof session.loadModel !== 'function') {
        throw new Error('当前 ORT 版本不支持 create 亦不支持 loadModel');
      }
      await session.loadModel(modelUrl);
      modelSession = session;
    }
    
    isONNXInitialized = true;
    console.log('ONNX Runtime Web初始化完成');
    console.log('模型输入信息:', modelSession?.inputNames ?? []);
    console.log('模型输出信息:', modelSession?.outputNames ?? []);
    
    return modelSession;
  } catch (error) {
    console.error('ONNX Runtime Web初始化失败:', error);
    isONNXInitialized = false;
    modelSession = null;
    throw new Error(`模型初始化失败: ${error instanceof Error ? error.message : '未知错误'}`);
  }
}

export async function preprocessImage(imageDataUrl: string): Promise<Tensor> {
  try {
    console.log('开始预处理图像...');
    
    // 验证输入
    if (!imageDataUrl || !imageDataUrl.startsWith('data:image/')) {
      throw new Error('无效的图像数据URL');
    }
    
    // 创建Image对象
    const img = new Image();
    img.crossOrigin = 'anonymous';
    
    // 等待图像加载
    await new Promise<void>((resolve, reject) => {
      img.onload = () => {
        console.log(`图像加载成功: ${img.width}x${img.height}`);
        resolve();
      };
      img.onerror = (error) => {
        console.error('图像加载失败:', error);
        reject(new Error('图像加载失败'));
      };
      img.src = imageDataUrl;
    });
    
    // 创建Canvas进行图像处理
    const canvas = document.createElement('canvas');
    const ctx = canvas.getContext('2d', { alpha: false });
    if (!ctx) throw new Error('无法获取Canvas上下文');
    
    // 设置Canvas尺寸为224x224
    const targetSize = 224;
    canvas.width = targetSize;
    canvas.height = targetSize;
    
    // 填充白色背景（对于透明图像很重要）
    ctx.fillStyle = 'white';
    ctx.fillRect(0, 0, targetSize, targetSize);
    
    // 计算缩放比例以保持宽高比
    const scale = Math.min(targetSize / img.width, targetSize / img.height);
    const scaledWidth = img.width * scale;
    const scaledHeight = img.height * scale;
    const offsetX = (targetSize - scaledWidth) / 2;
    const offsetY = (targetSize - scaledHeight) / 2;
    
    // 绘制图像到中心位置
    ctx.drawImage(img, offsetX, offsetY, scaledWidth, scaledHeight);
    
    // 获取图像数据
    const imageData = ctx.getImageData(0, 0, targetSize, targetSize);
    const data = imageData.data;
    
    console.log('开始转换图像数据...');
    
    // 先转换为灰度数组，后进行二值化（非笔迹=黑，笔迹=白）
    const grayArray = new Uint8ClampedArray(targetSize * targetSize);
    let p = 0;
    for (let y = 0; y < targetSize; y++) {
      for (let x = 0; x < targetSize; x++) {
        const idx = (y * targetSize + x) * 4; // RGBA
        const r = data[idx];
        const g = data[idx + 1];
        const b = data[idx + 2];
        const gray = Math.round(0.299 * r + 0.587 * g + 0.114 * b);
        grayArray[p++] = gray;
      }
    }

    // 计算阈值（Otsu），让暗色（笔迹）-> 白，亮色（背景）-> 黑
    const threshold = computeOtsuThreshold(grayArray);

    // 二值化并归一化到 [-1, 1]，排列为 CHW（单通道）
    const chData = new Float32Array(1 * targetSize * targetSize);
    for (let i = 0; i < grayArray.length; i++) {
      const isStroke = grayArray[i] < threshold; // 暗色视为笔迹
      const binary = isStroke ? 1.0 : 0.0;       // 笔迹白，背景黑
      const norm = binary * 2 - 1;               // 0->-1, 1->1
      chData[i] = norm;
    }

    console.log(`图像预处理完成，二值阈值: ${threshold}，数据长度: ${chData.length}`);

    // 创建张量 [1, 1, 224, 224] (NCHW格式)
    return new ORT.Tensor('float32', chData, [1, 1, targetSize, targetSize]);
  } catch (error) {
    console.error('图像预处理失败:', error);
    throw new Error(`图像预处理失败: ${error instanceof Error ? error.message : '未知错误'}`);
  }
}

export async function predictImage(imageDataUrl: string): Promise<PredictionResult> {
  try {
    console.log('开始AI图像识别...');
    
    // 验证输入
    if (!imageDataUrl) {
      throw new Error('图像数据为空');
    }
    
    // 初始化ONNX Runtime并获取会话
    const session = await initializeONNX();
    if (!session) {
      throw new Error('模型会话初始化失败');
    }
    
    console.log('开始预处理图像...');
    
    // 预处理图像
    const inputTensor = await preprocessImage(imageDataUrl);
    
    console.log('图像预处理完成，开始模型推理...');
    console.log('输入张量形状:', inputTensor.dims);
    
    // 运行推理
    const inputName = session.inputNames[0] || 'input';
    const feeds = { [inputName]: inputTensor };
    
    const startTime = performance.now();
    const results = await session.run(feeds);
    const endTime = performance.now();
    
    console.log(`推理耗时: ${(endTime - startTime).toFixed(2)}ms`);
    
    // 获取输出
    const outputName = session.outputNames[0] || 'output';
    const output = results[outputName]?.data as Float32Array;
    
    if (!output || output.length === 0) {
      throw new Error('模型输出为空');
    }
    
    console.log('原始输出:', Array.from(output));
    
    // 应用softmax获得概率分布
    const softmaxOutput = applySoftmax(Array.from(output));
    console.log('Softmax输出:', softmaxOutput);
    
    // 找到最大概率的类别
    const maxIndex = softmaxOutput.indexOf(Math.max(...softmaxOutput));
    const confidence = softmaxOutput[maxIndex];
    
    console.log('预测结果:', LABELS[maxIndex], '置信度:', confidence.toFixed(4));
    
    // 验证结果
    if (maxIndex < 0 || maxIndex >= LABELS.length) {
      throw new Error('预测结果索引超出范围');
    }
    
    return {
      label: LABELS[maxIndex],
      confidence: confidence,
      emoji: EMOJIS[maxIndex]
    };
  } catch (error) {
    console.error('模型推理失败:', error);
    throw new Error(`AI识别失败: ${error instanceof Error ? error.message : '未知错误'}`);
  }
}

// Softmax函数
function applySoftmax(logits: number[]): number[] {
  const maxLogit = Math.max(...logits);
  const expLogits = logits.map(x => Math.exp(x - maxLogit));
  const sumExp = expLogits.reduce((sum, x) => sum + x, 0);
  return expLogits.map(x => x / sumExp);
}

// 计算 Otsu 阈值
function computeOtsuThreshold(gray: Uint8ClampedArray): number {
  const histogram = new Uint32Array(256);
  for (let i = 0; i < gray.length; i++) histogram[gray[i]]++;

  const total = gray.length;
  let sum = 0;
  for (let t = 0; t < 256; t++) sum += t * histogram[t];

  let sumB = 0;
  let wB = 0;
  let wF = 0;
  let varMax = -1;
  let threshold = 127;

  for (let t = 0; t < 256; t++) {
    wB += histogram[t];
    if (wB === 0) continue;
    wF = total - wB;
    if (wF === 0) break;

    sumB += t * histogram[t];
    const mB = sumB / wB;
    const mF = (sum - sumB) / wF;
    const between = wB * wF * (mB - mF) * (mB - mF);
    if (between > varMax) {
      varMax = between;
      threshold = t;
    }
  }
  return threshold;
}
