// Prevents additional console window on Windows in release, DO NOT REMOVE!!
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]

use serde::{Deserialize, Serialize};
use reqwest::Client;

#[derive(Debug, Serialize, Deserialize)]
struct ChatMessage {
    role: String,
    content: String,
}

#[derive(Debug, Serialize, Deserialize)]
struct ChatRequest {
    model: String,
    messages: Vec<ChatMessage>,
    temperature: f32,
    max_tokens: u32,
}

// OpenAI API返回的全部响应
#[derive(Debug, Serialize, Deserialize)]
struct OpenAIResponse {
    id: String,
    choices: Vec<Choice>,
    #[serde(skip_serializing_if = "Option::is_none")]
    error: Option<OpenAIError>,
}

#[derive(Debug, Serialize, Deserialize)]
struct Choice {
    message: Message,
    #[serde(skip_serializing_if = "Option::is_none")]
    finish_reason: Option<String>,
    index: u32,
}

#[derive(Debug, Serialize, Deserialize)]
struct Message {
    content: String,
}

#[derive(Debug, Serialize, Deserialize)]
struct OpenAIError {
    message: String,
    #[serde(skip_serializing_if = "Option::is_none")]
    r#type: Option<String>,
}

#[derive(Debug, Serialize, Deserialize)]
struct APIResponse {
    success: bool,
    content: Option<String>,
    error: Option<String>,
}

#[tauri::command]
async fn chat_with_ai(prompt: String, api_key: String) -> Result<String, String> {
    let client = Client::new();
    let url = "https://api.openai.com/v1/chat/completions";

    let payload = serde_json::json!({
        "model": "gpt-4o", // 或其他模型
        "messages": [
            {
                "role": "user",
                "content": prompt
            }
        ],
        "temperature": 0.7
    });

    match client
        .post(url)
        .header("Content-Type", "application/json")
        .header("Authorization", format!("Bearer {}", api_key))
        .json(&payload)
        .send()
        .await
    {
        Ok(response) => match response.json::<OpenAIResponse>().await {
            Ok(data) => {
                if let Some(choice) = data.choices.first() {
                    Ok(choice.message.content.clone())
                } else {
                    Err("No response from AI".into())
                }
            }
            Err(e) => Err(format!("Failed to parse response: {}", e)),
        },
        Err(e) => Err(format!("Request failed: {}", e)),
    }
}

#[tauri::command]
async fn call_ai_api(api_key: String, request_options: String) -> Result<serde_json::Value, String> {
    // 解析请求选项
    let chat_request: ChatRequest = match serde_json::from_str(&request_options) {
        Ok(parsed) => parsed,
        Err(e) => {
            eprintln!("Error parsing request_options: {}", e);
            return Err(format!("Error parsing request_options: {}", e));
        }
    };

    // 获取模型名称
    let model = chat_request.model.as_str();
    println!("Processing request for model: {}", model);
    
    // 检查是否处于开发环境，可以使用环境变量来控制
    let dev_mode = std::env::var("TAURI_DEV").unwrap_or_else(|_| "false".to_string()) == "true";
    
    // 开发环境下，使用模拟响应
    if dev_mode {
        println!("DEV MODE: Using mock response for model: {}", model);
        return mock_api_response(model, &chat_request);
    }

    // 确定API提供商
    let provider = if model.contains("gpt") {
        "openai"
    } else if model.contains("deepseek") {
        "deepseek" 
    } else if model.contains("gemini") {
        "gemini"
    } else if model.contains("claude") {
        "anthropic"
    } else {
        "openai" // 默认
    };
    
    println!("Detected provider: {} for model: {}", provider, model);

    // 根据提供商选择API端点和头信息
    let (api_endpoint, headers) = match provider {
        "openai" => (
            "https://api.openai.com/v1/chat/completions".to_string(),
            vec![
                ("Content-Type".to_string(), "application/json".to_string()),
                ("Authorization".to_string(), format!("Bearer {}", api_key)),
            ],
        ),
        "deepseek" => (
            "https://api.deepseek.com/v1/chat/completions".to_string(),
            vec![
                ("Content-Type".to_string(), "application/json".to_string()),
                ("Authorization".to_string(), format!("Bearer {}", api_key)),
            ],
        ),
        "gemini" => {
            let endpoint = format!(
                "https://generativelanguage.googleapis.com/v1/models/{}:generateContent",
                model
            );
            (
                endpoint,
                vec![
                    ("Content-Type".to_string(), "application/json".to_string()),
                    ("Authorization".to_string(), format!("Bearer {}", api_key)),
                ],
            )
        },
        "anthropic" => (
            "https://api.anthropic.com/v1/messages".to_string(),
            vec![
                ("Content-Type".to_string(), "application/json".to_string()),
                ("x-api-key".to_string(), api_key),
            ],
        ),
        _ => (
            "https://api.openai.com/v1/chat/completions".to_string(),
            vec![
                ("Content-Type".to_string(), "application/json".to_string()),
                ("Authorization".to_string(), format!("Bearer {}", api_key)),
            ],
        ),
    };

    println!("Using API endpoint: {}", api_endpoint);

    // 创建HTTP客户端
    let client = reqwest::Client::new();
    let mut request_builder = client.post(&api_endpoint);

    // 添加请求头
    for (key, value) in headers {
        request_builder = request_builder.header(key, value);
    }

    // 构建请求体
    let request_body = serde_json::json!({
        "model": model,
        "messages": chat_request.messages,
        "temperature": chat_request.temperature,
        "max_tokens": chat_request.max_tokens
    });

    println!("Sending request with model: {}", model);

    // 发送请求并获取响应
    let response = match request_builder.json(&request_body).send().await {
        Ok(response) => response,
        Err(e) => {
            eprintln!("API request error: {}", e);
            return Err(format!("API request error: {}", e));
        }
    };

    // 处理API响应
    if response.status().is_success() {
        match response.json::<serde_json::Value>().await {
            Ok(json_response) => {
                // 从响应中提取内容
                let content = extract_content_from_response(provider, &json_response);
                
                Ok(serde_json::json!({
                    "success": true,
                    "content": content
                }))
            }
            Err(e) => {
                eprintln!("Failed to parse API response: {}", e);
                Err(format!("Failed to parse API response: {}", e))
            }
        }
    } else {
        // 解析错误响应
        let status = response.status();
        let error_text = match response.text().await {
            Ok(text) => text,
            Err(_) => "无法获取错误详情".to_string(),
        };
        
        eprintln!("API error response: {} {}", status, error_text);
        
        // 尝试解析错误详情
        let error_json: Result<serde_json::Value, _> = serde_json::from_str(&error_text);
        
        // 提取具体的错误信息
        let error_message = if let Ok(details) = &error_json {
            // 处理不同API提供商的错误格式
            if let Some(message) = details.get("error").and_then(|e| e.get("message")).and_then(|m| m.as_str()) {
                // OpenAI 和 DeepSeek 错误格式
                message.to_string()
            } else if let Some(message) = details.get("error").and_then(|e| e.as_str()) {
                // 简单的错误消息
                message.to_string()
            } else if let Some(message) = details.get("details").and_then(|d| d.as_str()) {
                // Gemini 错误格式
                message.to_string()
            } else if let Some(code) = details.get("error_code").and_then(|e| e.as_u64()) {
                // Claude 错误格式
                format!("Error code: {}", code)
            } else {
                // 通用错误处理
                format!("{} - {}", status, error_text)
            }
        } else {
            // 无法解析JSON时，返回原始错误文本
            format!("{} - {}", status, error_text)
        };
        
        let error_details = error_json.unwrap_or_else(|_| {
            serde_json::json!({
                "message": error_text,
                "status": status.as_u16()
            })
        });
        
        // 返回更详细的错误信息
        Ok(serde_json::json!({
            "success": false,
            "error": error_message,
            "details": error_details,
            "status": status.as_u16(),
            "model": model,
            "provider": provider
        }))
    }
}

// 根据不同的提供商从响应中提取内容
fn extract_content_from_response(provider: &str, response: &serde_json::Value) -> String {
    match provider {
        "openai" | "deepseek" => {
            // OpenAI和DeepSeek使用相同的响应格式
            response
                .get("choices")
                .and_then(|choices| choices.get(0))
                .and_then(|choice| choice.get("message"))
                .and_then(|message| message.get("content"))
                .and_then(|content| content.as_str())
                .unwrap_or("")
                .to_string()
        },
        "gemini" => {
            // Google Gemini响应格式
            response
                .get("candidates")
                .and_then(|candidates| candidates.get(0))
                .and_then(|candidate| candidate.get("content"))
                .and_then(|content| content.get("parts"))
                .and_then(|parts| parts.get(0))
                .and_then(|part| part.get("text"))
                .and_then(|text| text.as_str())
                .unwrap_or("")
                .to_string()
        },
        "anthropic" => {
            // Anthropic Claude响应格式
            response
                .get("content")
                .and_then(|content| content.get(0))
                .and_then(|first| first.get("text"))
                .and_then(|text| text.as_str())
                .unwrap_or("")
                .to_string()
        },
        _ => {
            // 尝试通用提取方法
            let content = response
                .get("choices")
                .and_then(|choices| choices.get(0))
                .and_then(|choice| choice.get("message"))
                .and_then(|message| message.get("content"))
                .and_then(|content| content.as_str());
                
            if let Some(text) = content {
                return text.to_string();
            }
            
            // 尝试其他格式
            let gemini_content = response
                .get("candidates")
                .and_then(|candidates| candidates.get(0))
                .and_then(|candidate| candidate.get("content"))
                .and_then(|content| content.get("parts"))
                .and_then(|parts| parts.get(0))
                .and_then(|part| part.get("text"))
                .and_then(|text| text.as_str());
                
            if let Some(text) = gemini_content {
                return text.to_string();
            }
            
            // 如果都提取不到，返回JSON字符串
            serde_json::to_string_pretty(&response).unwrap_or_else(|_| "无法解析响应".to_string())
        }
    }
}

// 为开发模式提供模拟响应
fn mock_api_response(model: &str, request: &ChatRequest) -> Result<serde_json::Value, String> {
    // 获取用户的最后一条消息
    let empty_string = "".to_string();
    let last_message = request.messages.last()
        .filter(|msg| msg.role == "user")
        .map(|msg| &msg.content)
        .unwrap_or(&empty_string);
    
    // 构建模拟响应前缀
    let model_prefix = match model {
        m if m.contains("gpt-3.5") => "作为GPT-3.5 Turbo模型，我的回复是：",
        m if m.contains("gpt-4") => "作为GPT-4模型，我的回复是：",
        m if m.contains("deepseek-chat") => "作为DeepSeek Chat模型，我的回答是：",
        m if m.contains("deepseek-reasoner") => "作为DeepSeek Reasoner模型，我的推理过程如下：\n\n首先分析问题...\n然后考虑可能的解决方案...\n最终得出结论...",
        m if m.contains("gemini") => "Gemini AI 回复：",
        m if m.contains("claude") => "Claude的思考过程：\n\n分析完你的问题后，我认为...",
        _ => "AI回复：",
    };
    
    // 生成模拟回复
    let mock_response = format!(
        "{}这是模拟响应，用于测试。\n\n你的输入是：{}\n\n该应用程序处于开发模式，正在使用模拟API响应。",
        model_prefix,
        last_message
    );
    
    // 返回模拟响应
    Ok(serde_json::json!({
        "success": true,
        "content": mock_response
    }))
}

// 为保持向后兼容性而保留的旧函数
#[tauri::command]
async fn call_openai_api(api_key: String, request_options: String) -> Result<serde_json::Value, String> {
    println!("调用call_openai_api, 参数: api_key长度={}, request_options长度={}", api_key.len(), request_options.len());
    
    // 解析请求选项
    let chat_request: ChatRequest = match serde_json::from_str(&request_options) {
        Ok(parsed) => parsed,
        Err(e) => {
            eprintln!("解析请求选项失败: {}", e);
            return Err(format!("解析请求选项失败: {}", e));
        }
    };

    // 获取模型名称
    let model = chat_request.model.as_str();
    println!("处理模型请求: {}", model);
    
    // 检查是否处于开发环境，可以使用环境变量来控制
    let dev_mode = std::env::var("TAURI_DEV").unwrap_or_else(|_| "false".to_string()) == "true";
    
    // 开发环境下，使用模拟响应
    if dev_mode {
        println!("开发模式: 为模型 {} 使用模拟响应", model);
        return mock_api_response(model, &chat_request);
    }

    // 确定API提供商
    let provider = if model.contains("gpt") {
        "openai"
    } else if model.contains("deepseek") {
        "deepseek" 
    } else if model.contains("gemini") {
        "gemini"
    } else if model.contains("claude") {
        "anthropic"
    } else {
        "openai" // 默认
    };
    
    println!("检测到提供商: {} 用于模型: {}", provider, model);

    // 根据提供商选择API端点和头信息
    let (api_endpoint, headers) = match provider {
        "openai" => (
            "https://api.openai.com/v1/chat/completions".to_string(),
            vec![
                ("Content-Type".to_string(), "application/json".to_string()),
                ("Authorization".to_string(), format!("Bearer {}", api_key)),
            ],
        ),
        "deepseek" => (
            "https://api.deepseek.com/v1/chat/completions".to_string(),
            vec![
                ("Content-Type".to_string(), "application/json".to_string()),
                ("Authorization".to_string(), format!("Bearer {}", api_key)),
            ],
        ),
        "gemini" => {
            let endpoint = format!(
                "https://generativelanguage.googleapis.com/v1/models/{}:generateContent?key={}",
                model.replace("gemini-", ""),
                api_key
            );
            (
                endpoint,
                vec![
                    ("Content-Type".to_string(), "application/json".to_string()),
                ],
            )
        },
        "anthropic" => (
            "https://api.anthropic.com/v1/messages".to_string(),
            vec![
                ("Content-Type".to_string(), "application/json".to_string()),
                ("x-api-key".to_string(), api_key),
                ("anthropic-version".to_string(), "2023-06-01".to_string()),
            ],
        ),
        _ => (
            "https://api.openai.com/v1/chat/completions".to_string(),
            vec![
                ("Content-Type".to_string(), "application/json".to_string()),
                ("Authorization".to_string(), format!("Bearer {}", api_key)),
            ],
        ),
    };

    println!("使用API端点: {}", api_endpoint);

    // 创建HTTP客户端
    let client = reqwest::Client::new();
    let mut request_builder = client.post(&api_endpoint);

    // 添加请求头
    for (key, value) in headers {
        request_builder = request_builder.header(key, value);
    }

    // 构建请求体 - 根据不同API调整格式
    let request_body = match provider {
        "gemini" => {
            // 转换为Gemini API格式
            let gemini_contents = chat_request.messages.iter().map(|msg| {
                let role = match msg.role.as_str() {
                    "assistant" => "MODEL",
                    "user" => "USER",
                    _ => "SYSTEM",
                };
                serde_json::json!({
                    "role": role,
                    "parts": [{"text": msg.content}]
                })
            }).collect::<Vec<_>>();
            
            serde_json::json!({
                "contents": gemini_contents,
                "generationConfig": {
                    "temperature": chat_request.temperature,
                    "maxOutputTokens": chat_request.max_tokens
                }
            })
        },
        "anthropic" => {
            // 转换为Claude API格式
            serde_json::json!({
                "model": if model.contains("gork") { "claude-3-opus-20240229" } else { model },
                "messages": chat_request.messages,
                "max_tokens": chat_request.max_tokens,
                "temperature": chat_request.temperature
            })
        },
        _ => {
            // OpenAI 和 DeepSeek 使用相同的格式
            serde_json::json!({
                "model": model,
                "messages": chat_request.messages,
                "temperature": chat_request.temperature,
                "max_tokens": chat_request.max_tokens
            })
        }
    };

    println!("发送请求，模型: {}", model);

    // 发送请求并获取响应
    let response = match request_builder.json(&request_body).send().await {
        Ok(response) => response,
        Err(e) => {
            eprintln!("API请求错误: {}", e);
            return Ok(serde_json::json!({
                "success": false,
                "error": format!("API请求错误: {}", e)
            }));
        }
    };

    // 处理API响应
    if response.status().is_success() {
        match response.json::<serde_json::Value>().await {
            Ok(json_response) => {
                // 从响应中提取内容
                let content = extract_content_from_response(provider, &json_response);
                
                Ok(serde_json::json!({
                    "success": true,
                    "content": content
                }))
            }
            Err(e) => {
                eprintln!("解析API响应失败: {}", e);
                Ok(serde_json::json!({
                    "success": false,
                    "error": format!("解析API响应失败: {}", e)
                }))
            }
        }
    } else {
        // 解析错误响应
        let status = response.status();
        let error_text = match response.text().await {
            Ok(text) => text,
            Err(_) => "无法获取错误详情".to_string(),
        };
        
        eprintln!("API错误响应: {} {}", status, error_text);
        
        // 尝试解析错误详情
        let error_json: Result<serde_json::Value, _> = serde_json::from_str(&error_text);
        
        // 提取具体的错误信息
        let error_message = if let Ok(details) = &error_json {
            // 处理不同API提供商的错误格式
            if let Some(message) = details.get("error").and_then(|e| e.get("message")).and_then(|m| m.as_str()) {
                // OpenAI 和 DeepSeek 错误格式
                message.to_string()
            } else if let Some(message) = details.get("error").and_then(|e| e.as_str()) {
                // 简单的错误消息
                message.to_string()
            } else if let Some(message) = details.get("details").and_then(|d| d.as_str()) {
                // Gemini 错误格式
                message.to_string()
            } else if let Some(code) = details.get("error_code").and_then(|e| e.as_u64()) {
                // Claude 错误格式
                format!("Error code: {}", code)
            } else {
                // 通用错误处理
                format!("{} - {}", status, error_text)
            }
        } else {
            // 无法解析JSON时，返回原始错误文本
            format!("{} - {}", status, error_text)
        };
        
        let error_details = error_json.unwrap_or_else(|_| {
            serde_json::json!({
                "message": error_text,
                "status": status.as_u16()
            })
        });
        
        // 返回更详细的错误信息
        Ok(serde_json::json!({
            "success": false,
            "error": error_message,
            "details": error_details,
            "status": status.as_u16(),
            "model": model,
            "provider": provider
        }))
    }
}

fn main() {
    tauri::Builder::default()
        .plugin(tauri_plugin_store::Builder::default().build())
        .plugin(tauri_plugin_log::Builder::default().build())
        .invoke_handler(tauri::generate_handler![
            chat_with_ai,
            call_openai_api,
            call_ai_api
        ])
        .run(tauri::generate_context!())
        .expect("error while running tauri application");
}
