import { clsx, type ClassValue } from "clsx"
import { twMerge } from "tailwind-merge"
import { invoke } from '@tauri-apps/api/core'

export function cn(...inputs: ClassValue[]) {
  return twMerge(clsx(inputs))
}

// 加载模型配置
export const loadModelConfigs = () => {
  const savedConfigs = localStorage.getItem('modelConfigs');
  const savedStatus = localStorage.getItem('modelStatus');
  return {
    configs: savedConfigs ? JSON.parse(savedConfigs) : null,
    status: savedStatus ? JSON.parse(savedStatus) : null
  };
};

// 获取当前激活的模型
const getActiveModel = () => {
  const { configs, status } = loadModelConfigs();
  if (!configs || !status) return null;
  
  // 查找第一个启用的模型
  const activeModelId = Object.keys(status).find(modelId => status[modelId]);
  if (!activeModelId) return null;
  
  return {
    id: activeModelId,
    config: configs[activeModelId]
  };
};

// 将图片转换为Base64
const imageToBase64 = (file: File): Promise<string> => {
  return new Promise((resolve, reject) => {
    const reader = new FileReader();
    reader.onloadend = () => resolve(reader.result as string);
    reader.onerror = reject;
    reader.readAsDataURL(file);
  });
};

// 调用VLM模型API
export const callVlmModel = async (imageFile: File, modelId?: string, prompt: string = '识别图片中的文字内容并提取'): Promise<string> => {
  let activeModel;
  
  if (modelId) {
    // 使用指定的模型
    const { configs } = loadModelConfigs();
    if (!configs || !configs[modelId]) {
      throw new Error(`未找到指定的模型: ${modelId}`);
    }
    activeModel = {
      id: modelId,
      config: configs[modelId]
    };
  } else {
    // 使用激活的模型
    activeModel = getActiveModel();
    if (!activeModel) {
      throw new Error('未找到激活的模型，请在设置中启用并配置模型');
    }
  }

  const { id, config } = activeModel;
  const imageBase64 = await imageToBase64(imageFile);
  const base64Data = imageBase64.split(',')[1]; // 移除data URL前缀

  try {
    switch (id) {
      case 'ollama':
        // Ollama API调用
        const ollamaResponse = await fetch(`${config.baseUrl}/api/generate`, {
          method: 'POST',
          headers: { 'Content-Type': 'application/json' },
          body: JSON.stringify({
            model: config.model,
            prompt: prompt,
            images: [base64Data],
            stream: false
          })
        });

        if (!ollamaResponse.ok) throw new Error('Ollama API调用失败');
        const ollamaData = await ollamaResponse.json();
        return ollamaData.response || '未识别到文本';

      case 'openai':
        // OpenAI API调用
        const openaiResponse = await fetch(`${config.baseUrl}/chat/completions`, {
          method: 'POST',
          headers: {
            'Content-Type': 'application/json',
            'Authorization': `Bearer ${config.apiKey}`
          },
          body: JSON.stringify({
            model: config.model,
            messages: [{
              role: 'user',
              content: [
                { type: 'text', text: prompt },
                { type: 'image_url', image_url: { url: `data:image/jpeg;base64,${base64Data}` } }
              ]
            }]
          })
        });

        if (!openaiResponse.ok) throw new Error('OpenAI API调用失败');
        const openaiData = await openaiResponse.json();
        return openaiData.choices[0]?.message?.content || '未识别到文本';

      case 'gemini':
        // Gemini API调用
        const geminiResponse = await fetch(
          `${config.baseUrl}/v1/models/${config.model}:generateContent?key=${config.apiKey}`,
          {
            method: 'POST',
            headers: { 'Content-Type': 'application/json' },
            body: JSON.stringify({
              contents: [{
                parts: [
                  { text: prompt },
                  { inline_data: { mime_type: 'image/jpeg', data: base64Data } }
                ]
              }]
            })
          }
        );

        if (!geminiResponse.ok) throw new Error('Gemini API调用失败');
        const geminiData = await geminiResponse.json();
        return geminiData.candidates[0]?.content?.parts[0]?.text || '未识别到文本';

      case 'lmstudio':
      case 'custom':
        // LM Studio和自定义模型API调用（通用格式）
        const customResponse = await fetch(`${config.baseUrl}/v1/chat/completions`, {
          method: 'POST',
          headers: {
            'Content-Type': 'application/json',
            ...(config.apiKey && { 'Authorization': `Bearer ${config.apiKey}` })
          },
          body: JSON.stringify({
            model: config.model,
            messages: [{
              role: 'user',
              content: [
                { type: 'text', text: prompt },
                { type: 'image_url', image_url: { url: `data:image/jpeg;base64,${base64Data}` } }
              ]
            }]
          })
        });

        if (!customResponse.ok) throw new Error('自定义模型API调用失败');
        const customData = await customResponse.json();
        return customData.choices[0]?.message?.content || '未识别到文本';

      default:
        throw new Error(`不支持的模型类型: ${id}`);
    }
  } catch (error) {
    console.error('VLM模型调用错误:', error);
    throw error;
  }
};

// 调用PaddleOCR进行PDF文字识别（离线调用）
export const callPaddleOCRForPDF = async (pdfFile: File): Promise<string> => {
  // 获取PaddleOCR配置
  const paddleOcrConfigStr = localStorage.getItem('paddleOcrConfig');
  if (!paddleOcrConfigStr) {
    throw new Error('未找到PaddleOCR配置，请在设置中配置PaddleOCR');
  }

  const config = JSON.parse(paddleOcrConfigStr);
  
  try {
    // 使用Tauri的command API调用本地PaddleOCR模型处理PDF
    
    // 将PDF转换为ArrayBuffer
    const arrayBuffer = await pdfFile.arrayBuffer();
    const uint8Array = new Uint8Array(arrayBuffer);
    
    // 调用本地PaddleOCR进行PDF文字识别
    const result = await invoke('ocr_pdf_with_paddle', {
      pdfData: Array.from(uint8Array),
      pdfName: pdfFile.name,
      options: {
        model_path: config.modelPath || './models/paddleocr',
        language: config.language || 'ch',
        enable_angle_cls: config.enableAngleCls !== false,
        use_gpu: config.useGpu || false,
        gpu_id: config.gpuId || 0,
        max_side_len: config.maxSideLen || 960,
        det_db_thresh: config.detDbThresh || 0.3,
        det_db_box_thresh: config.detDbBoxThresh || 0.6,
        det_db_unclip_ratio: config.detDbUnclipRatio || 1.5,
        rec_batch_num: config.recBatchNum || 6,
        rec_img_h: config.recImgH || 48
      }
    });

    // 处理返回结果
    if (result && typeof result === 'object' && 'text' in result) {
      const ocrResult = result as { text: string; boxes?: any[]; confidence?: number[] };
      return ocrResult.text || '未识别到文本';
    }
    
    if (typeof result === 'string') {
      return result || '未识别到文本';
    }

    return '未识别到文本';

  } catch (error) {
    console.error('PaddleOCR PDF离线调用错误:', error);
    throw new Error('PaddleOCR PDF离线调用失败，请确保已安装PaddleOCR本地环境');
  }
};

// 调用PaddleOCR进行文字识别（离线调用）
export const callPaddleOCR = async (imageFile: File): Promise<string> => {
  // 获取PaddleOCR配置
  const paddleOcrConfigStr = localStorage.getItem('paddleOcrConfig');
  if (!paddleOcrConfigStr) {
    throw new Error('未找到PaddleOCR配置，请在设置中配置PaddleOCR');
  }

  const config = JSON.parse(paddleOcrConfigStr);
  
  try {
    // 使用Tauri的command API调用本地PaddleOCR模型
    
    // 将图片转换为ArrayBuffer
    const arrayBuffer = await imageFile.arrayBuffer();
    const uint8Array = new Uint8Array(arrayBuffer);
    
    // 调用本地PaddleOCR进行文字识别
    const result = await invoke('ocr_with_paddle', {
      imageData: Array.from(uint8Array),
      imageName: imageFile.name,
      options: {
        model_path: config.modelPath || './models/paddleocr',
        language: config.language || 'ch',
        enable_angle_cls: config.enableAngleCls !== false,
        use_gpu: config.useGpu || false,
        gpu_id: config.gpuId || 0,
        max_side_len: config.maxSideLen || 960,
        det_db_thresh: config.detDbThresh || 0.3,
        det_db_box_thresh: config.detDbBoxThresh || 0.6,
        det_db_unclip_ratio: config.detDbUnclipRatio || 1.5,
        rec_batch_num: config.recBatchNum || 6,
        rec_img_h: config.recImgH || 48
      }
    });

    // 处理返回结果
    if (result && typeof result === 'object' && 'text' in result) {
      const ocrResult = result as { text: string; boxes?: any[]; confidence?: number[] };
      return ocrResult.text || '未识别到文本';
    }
    
    if (typeof result === 'string') {
      return result || '未识别到文本';
    }

    return '未识别到文本';

  } catch (error) {
    console.error('PaddleOCR离线调用错误:', error);
    
    // 如果Tauri命令不可用，回退到HTTP API调用
    try {
      const imageBase64 = await imageToBase64(imageFile);
      const base64Data = imageBase64.split(',')[1];
      
      const response = await fetch('http://localhost:8000/paddleocr', {
        method: 'POST',
        headers: {
          'Content-Type': 'application/json',
        },
        body: JSON.stringify({
          image: base64Data,
          model_path: config.modelPath || './models/paddleocr',
          language: config.language || 'ch',
          enable_angle_cls: config.enableAngleCls !== false,
          use_gpu: config.useGpu || false,
          gpu_id: config.gpuId || 0,
          max_side_len: config.maxSideLen || 960,
          det_db_thresh: config.detDbThresh || 0.3,
          det_db_box_thresh: config.detDbBoxThresh || 0.6,
          det_db_unclip_ratio: config.detDbUnclipRatio || 1.5,
          rec_batch_num: config.recBatchNum || 6,
          rec_img_h: config.recImgH || 48
        })
      });

      if (!response.ok) {
        throw new Error(`PaddleOCR调用失败: ${response.statusText}`);
      }

      const data = await response.json();
      
      if (data.error) {
        throw new Error(data.error);
      }

      if (data.results && Array.isArray(data.results)) {
        const texts = data.results.map((item: any) => item.text || '').filter((text: string) => text.trim());
        return texts.join('\n') || '未识别到文本';
      }

      return data.text || data.result || '未识别到文本';
      
    } catch (fallbackError) {
      console.error('PaddleOCR回退调用错误:', fallbackError);
      throw new Error('PaddleOCR离线调用失败，请确保已安装PaddleOCR本地环境');
    }
  }
};
