use chrono::prelude::*;
use model_graph_types::{
    assistant::{
        chat::{ChatMessage, ChatOptions, ChatProvider},
        shell::{ShellRequestMessage, ShellRequestOptions, ShellResponseMessage},
    },
    channel::{CliRequest, CommandResult, HttpRequest, HttpResponse},
    peer::{get_peer_id, is_self},
};
use std::{
    collections::HashMap,
    process::{Command, Stdio},
};
use std::{thread::sleep, time::Duration};

#[tokio::main]
async fn main() -> anyhow::Result<()> {
    tracing_subscriber::fmt()
        .with_max_level(tracing::Level::DEBUG)
        .init();
    tracing::info!("RRAI 启动...");

    let token = String::from("eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyaWQiOiI1OTEyMDg4ZTQ4YmM0MGFjODQ2MjJiMzMwZTg0MTc1ZiIsInVuaW9uaWQiOiJvbXVSV3c4N2w1Q3ZacVhaZmFTbmVzRnVXZVBvIiwib3BlbmlkIjoib2VDTEo1a0FkM0d5RnN2bDhRWWF0WTZIS3RzRSIsImV4cCI6MTcxMDgxNDU5MTMwN30.eXKKXqklU0W9vKH1xuHqNu37clEbeqpebfF17jg1zZY");

    let _ = model_graph_peer::peer_start().await;

    let result = model_graph_peer::peer_login(&token).await;
    //
    let request = ShellRequestMessage {
        env: HashMap::new(),
        current: Some(String::from("D:/Linly-Talker/SadTalker")),
        before: Some(vec![]),
        script: vec![vec![String::from(
            r#"d: && cd D:/Linly-Talker/SadTalker && conda activate D:/conda/envs/linly &&  python D:/Linly-Talker/SadTalker/inference.py --driven_audio  "E:/dib/output2.wav" --source_image  "E:/dib/test.jpg"  --preprocess full --enhancer gfpgan &&  conda deactivate  "#,
        )]],
        after: Some(vec![]),
    };

    let result =
        model_graph_peer::assistant::assistant_local_shell(request, ShellRequestOptions::new()?)
            .await?;

    tracing::debug!("{:?}", result);
    Ok(())
}
