use anyhow::Result;
use http_client::h1::H1Client as Client;
use sqlx::{MySql, MySqlPool};
use std::collections::HashMap;

use crate::api::out::{CommitWorkerJob, JobCount, JobOverview, PreWorkerJob, WorkerJobOutput};
use crate::api::table::{SealTask, Worker, WorkerConfig};
use crate::node;
use crate::node::out::Job;
use crate::node::types::TaskType;
use crate::state::StoreMinerSealer;

#[derive(Default, Debug)]
struct WorkerJobCount {
    ap: JobCount,
    pc_one: JobCount,
    pc_two: JobCount,
    c_one: JobCount,
    c_two: JobCount,
    finalize: JobCount,
    fetch: JobCount,
}

#[derive(Default, Debug)]
struct WorkerJobLimit {
    ap: u32,
    pc_one: u32,
    pc_two: u32,
    c_one: u32,
    c_two: u32,
    finalize: u32,
    fetch: u32,
}

pub async fn worker_sealing_jobs(
    db: &MySqlPool,
    client: &Client,
    node: &StoreMinerSealer,
    key: &str,
) -> Result<WorkerJobOutput> {
    let jobs: HashMap<String, Vec<Job>> =
        node::api::sealing_jobs(client, node.node_api.as_str(), node.node_token.as_str()).await?;
    let worker = "select worker_id, worker_host, miner_name, worker_enable, accept_new, update_time from worker where miner_name = ?";
    let workers = sqlx::query_as::<MySql, Worker>(worker)
        .bind(key)
        .fetch_all(db)
        .await?;
    let conf = "select wc.worker_host, wc.task_type, wc.task_limit from worker w, worker_config wc where w.worker_host = wc.worker_host and w.miner_name = ?";
    let array: Vec<WorkerConfig> = sqlx::query_as::<MySql, WorkerConfig>(conf)
        .bind(key)
        .fetch_all(db)
        .await?;
    let run = r#"
    select t.task_id, t.task_type, t.sector_number, t.miner_actor, t.worker_host, t.assign_time, t.receive_time, t.complete_time, t.task_status
    from seal_task t, worker w
    where t.worker_host = w.worker_host
    and w.miner_name = ?
    and t.task_status < 2
    "#;
    let task = sqlx::query_as::<MySql, SealTask>(run)
        .bind(key)
        .fetch_all(db)
        .await?;

    let mut out = WorkerJobOutput::new();
    if workers.is_empty() {
        return Ok(out);
    }
    // log::info!("jobs {:#?}", jobs);

    let f = |t: &str, s: u32, c: &mut WorkerJobCount| match t {
        "AP" => c.ap.add_db(s),
        "PC1" => c.pc_one.add_db(s),
        "PC2" => c.pc_two.add_db(s),
        "C1" => c.c_one.add_db(s),
        "C2" => c.c_two.add_db(s),
        "FIN" => c.finalize.add_db(s),
        "GET" => c.fetch.add_db(s),
        _ => (),
    };
    let mut db_job_map = HashMap::<String, WorkerJobCount>::new();
    for t in task {
        let o = db_job_map.get_mut(&t.worker_host);
        match o {
            None => {
                let mut c = WorkerJobCount::default();
                f(t.task_type.as_str(), t.task_status as u32, &mut c);
                db_job_map.insert(t.worker_host, c);
            }
            Some(c) => {
                f(t.task_type.as_str(), t.task_status as u32, c);
            }
        }
    }
    let f = |t: TaskType, s: i32, c: &mut WorkerJobCount| match t {
        TaskType::AddPiece => c.ap.add_miner(s),
        TaskType::PreCommit1 => c.pc_one.add_miner(s),
        TaskType::PreCommit2 => c.pc_two.add_miner(s),
        TaskType::Commit1 => c.c_one.add_miner(s),
        TaskType::Commit2 => c.c_two.add_miner(s),
        TaskType::Finalize => c.finalize.add_miner(s),
        TaskType::Fetch => c.fetch.add_miner(s),
        _ => (),
    };
    let mut miner_job_map = HashMap::<String, WorkerJobCount>::new();
    for (id, job) in jobs {
        for j in job {
            let o = miner_job_map.get_mut(&id);
            // log::info!("id {}, job {:#?}, o {:#?}", id, j, o);
            match o {
                None => {
                    let mut c = WorkerJobCount::default();
                    f(j.task, j.run_wait, &mut c);
                    miner_job_map.insert(id.clone(), c);
                }
                Some(c) => {
                    f(j.task, j.run_wait, c);
                }
            }
        }
    }
    // log::info!("{:#?}", miner_job_map);
    let f = |t: &str, l: u32, c: &mut WorkerJobLimit| match t {
        "AP" => c.ap = l,
        "PC1" => c.pc_one = l,
        "PC2" => c.pc_two = l,
        "C1" => c.c_one = l,
        "C2" => c.c_two = l,
        "FIN" => c.finalize = l,
        "GET" => c.fetch = l,
        _ => (),
    };
    let mut job_limit_map = HashMap::<String, WorkerJobLimit>::new();
    for limit in array {
        let o = job_limit_map.get_mut(&limit.worker_host);
        match o {
            None => {
                let mut l = WorkerJobLimit::default();
                f(limit.task_type.as_str(), limit.task_limit as u32, &mut l);
                job_limit_map.insert(limit.worker_host, l);
            }
            Some(l) => {
                f(limit.task_type.as_str(), limit.task_limit as u32, l);
            }
        }
    }
    let f = |input: Option<&WorkerJobCount>| {
        match input {
            None => (
                JobCount::default(),
                JobCount::default(),
                JobCount::default(),
                JobCount::default(),
                JobCount::default(),
                JobCount::default(),
            ),
            Some(o) => (
                o.ap.clone(),
                o.pc_one.clone(),
                o.pc_two.clone(),
                o.c_one.clone(),
                o.finalize.clone(),
                o.fetch.clone(),
            ),
        }
    };
    for w in workers {
        let enable = w.worker_enable == 1;
        let limit = job_limit_map.get(&w.worker_host);
        if let Some(l) = limit {
            if l.c_two > 0 {
                let db = db_job_map.get(&w.worker_host);
                let (db_commit, db_finalize, db_fetch) = match db {
                    None => (
                        JobCount::default(),
                        JobCount::default(),
                        JobCount::default(),
                    ),
                    Some(o) => (o.c_two.clone(), o.finalize.clone(), o.fetch.clone()),
                };
                let miner = miner_job_map.get(&w.worker_id);
                let (r_commit, r_finalize, r_fetch) = match miner {
                    None => (
                        JobCount::default(),
                        JobCount::default(),
                        JobCount::default(),
                    ),
                    Some(o) => (o.c_two.clone(), o.finalize.clone(), o.fetch.clone()),
                };
                let job = CommitWorkerJob::new(
                    w.worker_id.clone(),
                    w.worker_host.clone(),
                    enable,
                    JobOverview::with_data(l.c_two, db_commit, r_commit),
                    JobOverview::with_data(l.finalize, db_finalize, r_finalize),
                    JobOverview::with_data(l.fetch, db_fetch, r_fetch),
                );
                out.commit.push(job);
            } else {
                let (db_ap, db_p_o, db_p_t, db_c_o, db_finalize, db_fetch) = f(db_job_map.get(&w.worker_host));
                let (r_ap, r_p_o, r_p_t, r_c_o, r_finalize, r_fetch) = f(miner_job_map.get(&w.worker_id));

                let job = PreWorkerJob::new(
                    w.worker_id.clone(),
                    w.worker_host.clone(),
                    enable,
                    JobOverview::with_data(l.ap, db_ap, r_ap),
                    JobOverview::with_data(l.pc_one, db_p_o, r_p_o),
                    JobOverview::with_data(l.pc_two, db_p_t, r_p_t),
                    JobOverview::with_data(l.c_one, db_c_o, r_c_o),
                    JobOverview::with_data(l.finalize, db_finalize, r_finalize),
                    JobOverview::with_data(l.fetch, db_fetch, r_fetch),
                );
                out.pre.push(job);
            }
        } else {
            log::warn!("worker config for {} not found.", w.worker_host);
        }
    }
    Ok(out)
}
