mod args;
mod config;
mod datasource;
mod endpoint;
mod task;

use crate::endpoint::Message;
use args::CmdLineArgs;
use clap::Parser;
use config::IndexerConfig;
use log::{error, info};
use simplelog::*;
use std::collections::HashMap;
use std::fs::OpenOptions;
use std::io::{stdin, Write};
use std::process;
use std::process::{Command, Stdio};
use std::{env, thread};
use sysinfo::{PidExt, ProcessExt, SystemExt};
use time::UtcOffset;
use ureq::Error;

/// indexer entry
fn main() {
    //read arguments from command line
    let args: CmdLineArgs = CmdLineArgs::parse();

    //to kill process already started
    if args.is_stop() {
        kill_self();
        return;
    }

    //init logger
    init_logger(&args);
    //read configurations

    match IndexerConfig::parse(args.config()) {
        Ok(config) => {
            //check every parts of configurations (datasource, endpoint and tasks)
            if args.is_test() {
                config.test();
                return;
            }

            //execute data clean
            if args.is_clean() {
                clean(&config);
                return;
            }

            //execute initial procedure (such as create trigger, table, full data put , etc.)
            if args.is_init() {
                init(&config);
                return;
            }

            if args.is_start() {
                //start daemon
                let p_args: Vec<String> = env::args().collect();
                let program = &p_args[0];
                Command::new(program)
                    .args(args.start_args())
                    .stdin(Stdio::piped())
                    .stdout(Stdio::piped())
                    .spawn()
                    .unwrap();
            } else {
                info!("started.");
                start(&config);
            }
        }
        Err(e) => {
            error!("failed to read {:?}: {}", args.config(), e);
        }
    }
}

/// initialize database and triggers
///
/// # Arguments
/// * `config` global configurations from `indexer.yml`
///
fn init(config: &IndexerConfig) {
    let mut error = false;
    if confirm("Create tasks table and triggers (y/N)? ") {
        //get all dedup datasources used in tasks
        let mut ds_names: Vec<String> =
            config.tasks().iter().map(|t| t.1.datasource.clone()).collect();
        ds_names.dedup();
        //init the datasource
        for ds_name in ds_names {
            if let Some(datasource) = config.datasource(&ds_name) {
                //get tasks that use this datasource
                let mut tasks = HashMap::new();
                tasks.extend(config.tasks().iter().filter(|t| t.1.datasource == ds_name));
                if let Ok(ids) = datasource.inst() {
                    if let Err(e) = ids.init(&tasks) {
                        error!("failed to init datasource, reason: {}", e);
                        error = true;
                    }
                }
            } else {
                error!("datasource [\"{}\"] undefined.", ds_name);
            }
        }
        if !error {
            info!("indexer initialized.");
        }
    }
    if !error && confirm("Begin to execute full synchronization (y/N)? ") {
        match begin_sync(&config) {
            Ok(_) => info!("indexer full synchronized."),
            Err(Error::Status(code, resp)) => {
                if let Ok(msg) = resp.into_json::<Message>() {
                    error!("sync failed with code {}, reason: {:?}", msg.error, msg.message);
                } else {
                    error!("sync failed with code {}", code);
                }
            }
            Err(Error::Transport(t)) => {
                if let Some(msg) = t.message() {
                    error!("sync failed, reason: {}", msg);
                }
            }
        }
    }
}

/// clean tables, functions and triggers that indexer create.
/// when the user is no longer using the indexer, this function is to purge the data
fn clean(config: &IndexerConfig) {
    if !confirm("Are you sure clear relevant data created by the indexer? (y/N)? ") {
        return;
    }
    let mut ds_names: Vec<String> = config.tasks().iter().map(|t| t.1.datasource.clone()).collect();
    ds_names.dedup();
    //init the datasource
    for ds_name in ds_names {
        if let Some(datasource) = config.datasource(&ds_name) {
            //get tasks that use this datasource
            let mut tasks = HashMap::new();
            tasks.extend(config.tasks().iter().filter(|t| t.1.datasource == ds_name));
            if let Ok(ids) = datasource.inst() {
                if let Err(e) = ids.clean(&tasks) {
                    error!("failed to clean datasource, reason: {}", e);
                } else {
                    info!("datasource [\"{}\"] cleaned.", ds_name);
                }
            }
        } else {
            error!("datasource [\"{}\"] undefined.", ds_name);
        }
    }
}

/// synchronize all tasks
///
/// # Arguments
/// * `config` global configurations from `indexer.yml`
///
fn begin_sync(config: &IndexerConfig) -> Result<(), Error> {
    const LIMIT: u32 = 1000;
    let token = config.endpoint().token()?;
    for task in config.tasks().iter() {
        if let Some(datasource) = config.datasource(&task.1.datasource) {
            if let Some(index) = config.index(&task.1.index) {
                if let Ok(ids) = datasource.inst() {
                    let offset: u32 = 0;
                    loop {
                        //read all records to sync
                        match ids.records(task.1, LIMIT, offset) {
                            Ok(records) => {
                                if records.len() > 0 {
                                    //push records to endpoint
                                    if let Err(e) =
                                        config.endpoint().inserts(&token, &records, index)
                                    {
                                        error!("{}: push records failed, reason: {:?}", task.0, e);
                                        break;
                                    } else {
                                        info!(
                                            "{}: {} records pushed to indexea [{},{}]",
                                            task.0,
                                            records.len(),
                                            index.app,
                                            index.index,
                                        );
                                    }
                                }
                                if records.len() < LIMIT as usize {
                                    break;
                                }
                            }
                            Err(e) => {
                                error!("{}: failed to list records, reason: {:?}", task.0, e);
                                break;
                            }
                        }
                    }
                } else {
                    error!("{}: failed getting datasource instance", task.0);
                }
            } else {
                error!("{}: task index [\"{}\"] undefined.", task.0, task.1.index);
            }
        } else {
            error!("{}: task datasource [\"{}\"] undefined.", task.0, task.1.datasource);
        }
    }
    Ok(())
}

/// check table `indexea_tasks` for increment data
/// # Arguments
/// * `config` global configurations from `indexer.yml`
///
fn start(config: &IndexerConfig) {
    let mut children = vec![];
    for task in config.tasks().iter() {
        if let Some(datasource) = config.datasource(&task.1.datasource) {
            //copy variables to thread
            let tname: String = task.0.clone();
            let source = datasource.clone();
            let endp = config.endpoint().clone();
            let this_task = task.1.clone();
            let child = thread::spawn(move || this_task.start(&tname, &source, &endp));
            children.push(child);
        }
    }
    for child in children {
        // Wait for the thread to finish. Returns a result.
        let _ = child.join();
    }
}

/// init logger with file and terminal logs
///
/// # Arguments
///
/// * `CmdLineArgs` - command line arguments
///
fn init_logger(args: &CmdLineArgs) {
    let time_offset = UtcOffset::current_local_offset().expect("");
    let config = ConfigBuilder::new()
        .set_time_format_custom(format_description!(
            "[year]-[month]-[day] [hour]:[minute]:[second]"
        ))
        .set_time_offset(time_offset)
        .build();
    let config_file = config.clone();

    CombinedLogger::init(vec![
        TermLogger::new(LevelFilter::Info, config, TerminalMode::Mixed, ColorChoice::Auto),
        WriteLogger::new(
            LevelFilter::Info,
            config_file,
            OpenOptions::new().append(true).create(true).open(args.log()).unwrap(),
        ),
    ])
    .unwrap();
}

/// operation confirm
fn confirm(msg: &str) -> bool {
    print!("{}", msg);
    std::io::stdout().flush().unwrap();
    let mut buffer = String::new();
    if let Ok(_) = stdin().read_line(&mut buffer) {
        return buffer.trim().eq("y");
    }
    false
}

/// kill process of indexer
fn kill_self() {
    let mut system = sysinfo::System::new_all();
    system.refresh_processes();
    let current_process_id = process::id();
    let ps = system.processes_by_name("indexer").filter(|p| p.pid().as_u32() != current_process_id);
    for p in ps {
        p.kill();
        println!("{}({}) exited.", p.exe().display(), p.pid().as_u32());
    }
}
