// Learn more about Tauri commands at https://tauri.app/v1/guides/features/command
use crate::*;
use anyhow::anyhow;
use chrono::Local;
use reqwest::header::HeaderMap;
use serde::de;
use std::collections::HashMap;
use std::path::{self, Path};
use tracing::{debug, error, info};
use anyhow::{bail};

#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ChatParam {
      pub question: String,      
}


#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct GlmRequestParam{
    pub model	:String,
    pub messages: Vec<GlmMessage>,
    pub temperature: f32,        
}

#[derive(Debug, Deserialize, Serialize, Clone, Default)]
pub struct GlmResponse{
    pub id:	String,
    pub created:	i32,
    pub model:	String,
    pub choices:Vec<GlmChoices>,
    pub usage:GlmUsage,
}

#[derive(Debug, Deserialize, Serialize, Clone, Default)]
pub struct GlmChoices{
    pub finish_reason: String,
    pub index: i32,
    pub message: GlmMessage,
}

#[derive(Debug, Deserialize, Serialize, Clone, Default)]
pub struct GlmMessage{
    pub content: String,
    pub role: String,
}

#[derive(Debug, Deserialize, Serialize, Clone, Default)]
pub struct GlmUsage{
    pub completion_tokens: i32,
    // pub promp_tokens: i32,
    pub total_tokens: i32,
}

#[derive(Default, Serialize, Deserialize, Debug, Clone)]
pub struct ChatHistory {  
    #[serde(default)]
    pub id: i32,
    #[serde(default)]
    pub question: String,    
    #[serde(default)]    
    pub timestamp: String,
    #[serde(default)]
    pub response: String,    
}

pub async fn chat_with_glm_response(msg: &str) -> anyhow::Result<String> {
    let rs =  chat_with_glm(msg).await?;
   
    if rs.choices.len() > 0 {
        if rs.choices.len() > 1 {
            error!("response msg is more than 1, but only use the first one now!");
        }
        let choice = rs.choices[0].clone();
        let content = choice.message.content;
        return Ok(content);
    
    }else{
        error!("response msg is empty, TODO");
        return Ok("".to_string());
    }
}
  
  /**
   * 简单调用GLM开方接口，采用glm-4-flash模型，进行对话。
   */
  pub async fn chat_with_glm(msg: &str) -> anyhow::Result<GlmResponse> {
      if msg.is_empty(){
        bail!("msg is empty");
      }
      let client = reqwest::Client::new();
      let api = "https://open.bigmodel.cn/api/paas/v4/chat/completions";
  
      let mut headers = HeaderMap::new();
      headers.insert(
          "Authorization",
          "Bearer efee28671779b38ba107b5a8f89491cc.V2R8xEzrqo5FG8RT".parse()?,
      );
      headers.insert("Content-Type", "application/json; charset=utf-8".parse()?);
  
      let messages = vec![GlmMessage {
          role: "user".to_string(),
          content: msg.to_string(),
      }];
  
      // logging::log!("{:#?}", messages);
  
      let askmsg: GlmRequestParam = GlmRequestParam {
          model: "glm-4-flash".to_string(),
          messages,
          temperature: 0.9,
      };
  
      let r: GlmResponse = client
          .post(api)
          .headers(headers)
          .json(&askmsg)
          .send()
          .await?
          .json()
          .await?;
      // logging::log!("{:#?}", r);
      return Ok(r);
  }
  