use super::property;
use crate::{util, Document};
use axum::{
    body::{Body, Bytes},
    extract::{Request, State},
    http::{self, HeaderValue},
    middleware::Next,
    response::{IntoResponse, Response},
    routing::{get, post},
    Json, Router,
};
use color_eyre::owo_colors::OwoColorize;
use log::{debug, info, warn};
use reqwest::StatusCode;
use rig::{
    client::{completion::CompletionClientDyn, EmbeddingsClient},
    providers::{self, ollama::Client},
    vector_store::{VectorSearchRequest, VectorStoreIndex},
};
use rig::{completion::Chat, vector_store::VectorStoreIndexDyn};
use rig_sqlite::SqliteVectorStore;
use rusqlite::ffi::sqlite3_auto_extension;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use sqlite_vec::sqlite3_vec_init;
use std::{collections::HashMap, convert::Infallible, path::Path, sync::Arc, time::Instant};
use tokio_rusqlite::Connection;

#[derive(Serialize, Deserialize, Debug, Clone)]
struct OllamaCompletionReq {
    model: String,
    prompt: String,
    images: Option<Vec<String>>,
    options: serde_json::Value,
    system: Option<String>,
    template: Option<String>,
    context: Option<String>,
    format: Option<String>,
    keep_alive: Option<u32>,
    raw: Option<bool>,
    stream: Option<bool>,
}

#[derive(Serialize, Deserialize, Debug, Clone)]
struct OllamaCompletionResp {
    model: String,
    created_at: Option<String>,
    response: String,
    done: bool,
    done_reason: Option<String>,
    context: Option<Vec<i32>>,
    prompt_eval_duration: Option<u64>,
    eval_count: Option<u64>,
    eval_duration: Option<u64>,
}

#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct OpenAiChatCompletionReq {
    pub model: String,
    pub prompt: Option<String>,
    #[serde(skip_serializing_if = "Option::is_none")]
    pub temperature: Option<f32>,
    #[serde(skip_serializing_if = "Option::is_none")]
    pub max_tokens: Option<u32>,
    #[serde(skip_serializing_if = "Option::is_none")]
    pub stream: Option<bool>,
    #[serde(skip_serializing_if = "Option::is_none")]
    pub top_p: Option<f32>,
    #[serde(skip_serializing_if = "Option::is_none")]
    pub stop: Option<Vec<String>>,
    #[serde(skip_serializing_if = "Option::is_none")]
    pub presence_penalty: Option<f32>,
    #[serde(skip_serializing_if = "Option::is_none")]
    pub frequency_penalty: Option<f32>,
    #[serde(flatten)]
    pub extra: HashMap<String, serde_json::Value>,
}

struct ServerState<'a> {
    agent: rig::agent::Agent<rig::client::completion::CompletionModelHandle<'a>>,
    vector_index:
        rig_sqlite::SqliteVectorIndex<providers::ollama::EmbeddingModel<reqwest::Client>, Document>,
}

// axum创建serve服务
pub async fn serve(vector_db_path: &String) -> Result<(), anyhow::Error> {
    unsafe {
        sqlite3_auto_extension(Some(std::mem::transmute(sqlite3_vec_init as *const ())));
    }

    let env = property::Env::init();

    // 检查文件是否存在
    let has_vector_db: bool = Path::new(vector_db_path).exists();
    if has_vector_db {
        println!("find vector db: {}", vector_db_path);
    }

    // 打开存储库
    let env = property::Env::init();
    let client = Client::builder().base_url(&env.embedding_endpoint).build();
    let model = client.embedding_model_with_ndims(&env.embedding_model, 1024);
    let conn = Connection::open(vector_db_path).await?;
    let vector_store: SqliteVectorStore<
        providers::ollama::EmbeddingModel<reqwest::Client>,
        Document,
    > = SqliteVectorStore::new(conn, &model).await?;
    let vector_index = vector_store.index(model);

    // llm解析函数
    let client = providers::openrouter::Client::builder(&env.code_llm_api_key)
        .base_url(&env.code_llm_endpoint)
        .build();
    let agent: rig::agent::Agent<rig::client::completion::CompletionModelHandle> = client
        .agent(&env.code_llm_model)
        .temperature(1.0)
        .preamble("你是代码补全助手，根据代码，补全接下来实现的功能。只描述功能，说明如何实现，不要输出无关的文本。输出内容不要超过60字")
        .build();

    // axum参数
    let serverState = ServerState {
        agent,
        vector_index,
    };

    let app = Router::new()
        .route("/", get(hi_handler))
        .route("/api/generate", post(ollama_completion_handler))
        .route("/api/show", post(api_show))
        .route("/api/completions", post(openai_completion_handler))
        .layer(axum::middleware::from_fn(middleware_name))
        .with_state(Arc::new(serverState));
    let listener = tokio::net::TcpListener::bind(format!("0.0.0.0:{}", env.code_completion_port))
        .await
        .unwrap();
    axum::serve(listener, app.into_make_service())
        .await
        .unwrap();

    Ok(())
}

// 适配tabbyml的中间件
async fn middleware_name(
    mut req: axum::http::Request<axum::body::Body>,
    next: Next,
) -> Result<Response, StatusCode> {
    debug!("url: {}", req.uri());
    let headers = req.headers_mut();
    if !headers.contains_key(http::header::CONTENT_TYPE) {
        headers.insert(
            http::header::CONTENT_TYPE,
            HeaderValue::from_static("application/json"),
        );
    }

    Ok(next.run(req).await)
}

// hello world
async fn hi_handler() -> &'static str {
    "hi, world!"
}

// ollama 代码补全
async fn ollama_completion_handler<'a>(
    State(config): State<Arc<ServerState<'a>>>,
    Json(payload): Json<OllamaCompletionReq>,
) -> Response {
    debug!("Received payload: {:?}", payload.prompt.green());
    let mut prompt_formated = payload.prompt.clone();

    let env = property::Env::init();
    let agent = &config.agent;
    let vector_index = &config.vector_index;
    let mut duration_list: Vec<String> = Vec::new();

    // 预测接下来的功能
    let desc_code_start = Instant::now();
    let resp: Result<String, rig::completion::PromptError> =
        agent.chat(prompt_formated.clone(), vec![]).await;
    duration_list.push(format!("desc_code {:?}", desc_code_start.elapsed()));

    // 搜索代码
    if let Ok(implementing) = resp.as_ref() {
        debug!("desc code {}", implementing.green());

        let code_number = 5;
        // 搜索代码库
        let query_req = VectorSearchRequest::builder()
            .samples(code_number as u64 * 10)
            .query(implementing)
            .build()
            .unwrap();
        let search_code_start = Instant::now();
        let results: Vec<(f64, String, Value)> =
            vector_index.top_n(query_req).await.unwrap_or(vec![]);
        duration_list.push(format!("search_code {:?}", search_code_start.elapsed()));

        // 重排序
        let rerank_start = Instant::now();
        let desc_list: Vec<String> = results
            .iter()
            .map(|doc| {
                let v: &serde_json::Value = &doc.2;
                v["desc"].as_str().unwrap_or("").to_string().clone()
            })
            .collect();
        let reranker = util::rerank::Reranker {
            end_point: env.rerank_endpoint.clone(),
        };
        let results: Vec<(f64, String, serde_json::Value)> = reranker
            .rerank(implementing.clone(), desc_list.clone(), results.clone())
            .await;
        let results: Vec<(f64, String, serde_json::Value)> =
            results.into_iter().take(code_number).collect();
        duration_list.push(format!("rerank {:?}", rerank_start.elapsed()));

        // 输出命中的代码
        for result in &results {
            let doc = &result.2;
            debug!(
                "{}\n{}\n",
                doc["desc"].as_str().unwrap_or("").green(),
                doc["content"].as_str().unwrap_or(""),
            );
        }

        // 将参考代码融合进prompt
        let mut commented_code = Vec::<String>::new();
        for result in &results {
            let doc = &result.2;
            let code_content = doc["content"].as_str().unwrap_or("");
            let code_content = code_content.replace("\n", "\n//");
            commented_code.push(format!("//{}", code_content));
        }

        prompt_formated = format!("{}\n{}", commented_code.join("\n\n"), prompt_formated);
    } else {
        warn!("{:?}", resp);
    }

    // 输出耗时
    info!("durations: {}", duration_list.join(","));

    // ollam代码补全
    let client = reqwest::Client::new();
    let req = OllamaCompletionReq {
        model: env.code_completion_model.unwrap_or(payload.model.clone()),
        prompt: prompt_formated,
        images: payload.images.clone(),
        options: payload.options.clone(),
        // system: payload.system.clone(),
        system: env
            .code_completion_system
            .or_else(|| payload.system.clone()),
        template: payload.template.clone(),
        context: payload.context.clone(),
        format: payload.format.clone(),
        keep_alive: payload.keep_alive.clone(),
        raw: payload.raw.clone(),
        stream: payload.stream.clone(),
    };
    let resp_result: Result<reqwest::Response, reqwest::Error> = client
        .post(format!("{}/api/generate", env.code_completion_endpoint))
        .json(&req)
        .send()
        .await;
    if let Ok(resp) = resp_result {
        let status = resp.status();
        let headers = resp.headers().clone();
        let body_stream = resp.bytes_stream();

        // 构建流式响应
        let mut response_builder = Response::builder().status(status);
        for (name, value) in headers {
            response_builder = response_builder.header(name.unwrap(), value);
        }
        return response_builder
            .body(Body::from_stream(body_stream))
            .unwrap();
    } else {
        warn!("{:?}", resp_result);
    }

    Response::builder().status(503).body(Body::empty()).unwrap()
}

// openai 代码补全
async fn openai_completion_handler<'a>(
    State(config): State<Arc<ServerState<'a>>>,
    Json(payload): Json<OpenAiChatCompletionReq>,
) -> Response {
    // let mut prompt_formated = payload
    //     .messages
    //     .iter()
    //     .filter(|item| item.role == "user")
    //     .next()
    //     .map(|item| item.content.clone())
    //     .unwrap_or("".to_string());
    let mut prompt_formated = payload.prompt.unwrap_or("".to_string());
    debug!("received content: {:?}", prompt_formated.green());

    let env = property::Env::init();
    let agent = &config.agent;
    let vector_index = &config.vector_index;
    let mut duration_list: Vec<String> = Vec::new();

    // 预测接下来的功能
    let desc_code_start = Instant::now();
    let resp: Result<String, rig::completion::PromptError> =
        agent.chat(prompt_formated.clone(), vec![]).await;
    duration_list.push(format!("desc_code {:?}", desc_code_start.elapsed()));

    // 搜索代码
    if let Ok(implementing) = resp.as_ref() {
        debug!("desc code {}", implementing.green());

        let code_number = 5;
        // 搜索代码库
        let query_req = VectorSearchRequest::builder()
            .samples(code_number as u64 * 10)
            .query(implementing)
            .build()
            .unwrap();
        let search_code_start = Instant::now();
        let results: Vec<(f64, String, Value)> =
            vector_index.top_n(query_req).await.unwrap_or(vec![]);
        duration_list.push(format!("search_code {:?}", search_code_start.elapsed()));

        // 重排序
        let rerank_start = Instant::now();
        let desc_list: Vec<String> = results
            .iter()
            .map(|doc| {
                let v: &serde_json::Value = &doc.2;
                v["desc"].as_str().unwrap_or("").to_string().clone()
            })
            .collect();
        let reranker = util::rerank::Reranker {
            end_point: env.rerank_endpoint.clone(),
        };
        let results: Vec<(f64, String, serde_json::Value)> = reranker
            .rerank(implementing.clone(), desc_list.clone(), results.clone())
            .await;
        let results: Vec<(f64, String, serde_json::Value)> =
            results.into_iter().take(code_number).collect();
        duration_list.push(format!("rerank {:?}", rerank_start.elapsed()));

        // 输出命中的代码
        for result in &results {
            let doc = &result.2;
            debug!(
                "{}\n{}\n",
                doc["desc"].as_str().unwrap_or("").green(),
                doc["content"].as_str().unwrap_or(""),
            );
        }

        // 将参考代码融合进prompt
        let mut commented_code = Vec::<String>::new();
        for result in &results {
            let doc = &result.2;
            let code_content = doc["content"].as_str().unwrap_or("");
            let code_content = code_content.replace("\n", "\n//");
            commented_code.push(format!("//{}", code_content));
        }

        prompt_formated = format!("{}\n{}", commented_code.join("\n\n"), prompt_formated);
    } else {
        warn!("{:?}", resp);
    }

    // 输出耗时
    info!("durations: {}", duration_list.join(","));

    // ollam代码补全
    let client = reqwest::Client::new();

    let req = OpenAiChatCompletionReq {
        model: env.code_completion_model.unwrap_or(payload.model.clone()),
        prompt: Some(format!(
            "{}\n{}",
            env.code_completion_system.unwrap_or("".to_string()),
            prompt_formated
        )),
        temperature: payload.temperature.clone(),
        max_tokens: payload.max_tokens.clone(),
        stream: payload.stream.clone(),
        top_p: payload.top_p.clone(),
        stop: payload.stop.clone(),
        presence_penalty: payload.presence_penalty.clone(),
        frequency_penalty: payload.frequency_penalty.clone(),
        extra: payload.extra.clone(),
    };

    let resp_result: Result<reqwest::Response, reqwest::Error> = client
        .post(format!("{}/completions", env.code_completion_endpoint))
        .json(&req)
        .send()
        .await;
    if let Ok(resp) = resp_result {
        let status = resp.status();
        let headers = resp.headers().clone();
        let body_stream = resp.bytes_stream();

        // 构建流式响应
        let mut response_builder = Response::builder().status(status);
        for (name, value) in headers {
            response_builder = response_builder.header(name.unwrap(), value);
        }
        return response_builder
            .body(Body::from_stream(body_stream))
            .unwrap();
    } else {
        warn!("{:?}", resp_result);
    }

    Response::builder().status(503).body(Body::empty()).unwrap()
}

async fn api_show(req_body: String) -> Response {
    let env = property::Env::init();
    debug!("api_show body:{}", req_body);

    // 发起请求并获取响应流
    let client = reqwest::Client::new();
    let http_req = client
        .post(format!("{}/api/show", env.code_completion_endpoint))
        .body(req_body);

    let resp_result: Result<reqwest::Response, reqwest::Error> = http_req.send().await;
    if let Ok(resp) = resp_result {
        let status = resp.status();
        let headers = resp.headers().clone();
        let body_stream = resp.bytes_stream();

        // 构建流式响应
        let mut response_builder = Response::builder().status(status);
        for (name, value) in headers {
            debug!("name:{:?}, value:{:?}", name, value);
            response_builder = response_builder.header(name.unwrap(), value);
        }
        return response_builder
            .body(Body::from_stream(body_stream))
            .unwrap();
    }

    Response::builder().status(503).body(Body::empty()).unwrap()
}
