use anyhow::Result;
use candid::Decode;
use ic_nns_governance_api::ProposalStatus;
use ic_nns_test_utils::governance::wait_for_final_state;
use ic_registry_nns_data_provider::registry::RegistryCanister;
use ic_registry_subnet_type::SubnetType;
use ic_types::{Cycles, RegistryVersion};
use ic_universal_canister::{management, wasm};
use registry_canister::mutations::do_create_subnet::CanisterCyclesCostSchedule;
use std::{collections::HashSet, time::Duration};

use ic_system_test_driver::{
    driver::{
        group::SystemTestGroup,
        ic::{InternetComputer, Subnet},
        test_env::TestEnv,
        test_env_api::{
            HasPublicApiUrl, HasTopologySnapshot, IcNodeContainer, NnsInstallationBuilder,
            SshSession,
        },
    },
    nns::{
        self, get_software_version_from_snapshot, get_subnet_list_from_registry,
        submit_create_application_subnet_proposal, vote_on_proposal,
    },
    systest,
    types::CreateCanisterResult,
    util::{UniversalCanister, assert_create_agent, block_on, runtime_from_url},
};
use slog::info;

fn main() -> Result<()> {
    SystemTestGroup::new()
        .with_setup(setup)
        .add_test(systest!(test))
        .execute_from_args()?;
    Ok(())
}

pub fn setup(env: TestEnv) {
    InternetComputer::new()
        .add_subnet(Subnet::fast(SubnetType::System, 1))
        .with_unassigned_nodes(1)
        .setup_and_start(&env)
        .expect("failed to setup IC under test");
    env.topology_snapshot().subnets().for_each(|subnet| {
        subnet
            .nodes()
            .for_each(|node| node.await_status_is_healthy().unwrap())
    });
    env.topology_snapshot()
        .unassigned_nodes()
        .for_each(|node| node.await_can_login_as_admin_via_ssh().unwrap());
}

pub fn test(env: TestEnv) {
    let log = &env.logger();

    info!(log, "[Phase I] Prepare NNS");
    let nns_node = env
        .topology_snapshot()
        .root_subnet()
        .nodes()
        .next()
        .expect("there is no NNS node");
    NnsInstallationBuilder::new()
        .install(&nns_node, &env)
        .expect("NNS canisters not installed");
    info!(&env.logger(), "NNS canisters installed");

    let topology_snapshot = &env.topology_snapshot();
    let subnet = topology_snapshot.root_subnet();
    let endpoint = subnet.nodes().next().unwrap();

    // get IDs of all unassigned nodes
    let mut unassigned_nodes = topology_snapshot
        .unassigned_nodes()
        .map(|node| node.node_id);

    info!(log, "[Phase II] Execute and validate the testnet changes");

    let client = RegistryCanister::new_with_query_timeout(
        vec![endpoint.get_public_url()],
        Duration::from_secs(10),
    );

    let (subnet_ids, topology_snapshot) = block_on(async move {
        let original_subnets = get_subnet_list_from_registry(&client).await;
        assert!(!original_subnets.is_empty(), "registry contains no subnets");
        info!(log, "original subnets: {:?}", original_subnets);

        // get current replica version and Governance canister
        let version = get_software_version_from_snapshot(&endpoint)
            .await
            .expect("could not obtain replica software version");
        let nns = runtime_from_url(endpoint.get_public_url(), endpoint.effective_canister_id());
        let governance = nns::get_governance_canister(&nns);

        // Submit and adopt the configured number of create subnet proposals
        let nodes = unassigned_nodes.by_ref().take(1).collect();
        info!(
            log,
            "Submitting proposal to create subnet with nodes: {nodes:?}"
        );
        let proposal_id = submit_create_application_subnet_proposal(
            &governance,
            nodes,
            version.clone(),
            Some(CanisterCyclesCostSchedule::Free),
        )
        .await;
        info!(log, "Voting on proposal {proposal_id}");
        vote_on_proposal(&governance, proposal_id).await;

        // Wait until all proposals are executed
        info!(log, "Waiting on proposal {proposal_id}");
        let proposal_info = wait_for_final_state(&governance, proposal_id).await;
        assert_eq!(
            proposal_info.status,
            ProposalStatus::Executed as i32,
            "proposal {proposal_id} did not execute: {proposal_info:?}"
        );

        let new_topology_snapshot = topology_snapshot
            .block_for_min_registry_version(RegistryVersion::new(2))
            .await
            .expect("Could not obtain updated registry.");

        // Check that the registry indeed contains the data
        let final_subnets = get_subnet_list_from_registry(&client).await;
        info!(log, "final subnets: {:?}", final_subnets);

        let original_subnet_set = set(&original_subnets);
        let final_subnet_set = set(&final_subnets);
        // check that there are exactly 1 added subnets
        assert_eq!(
            original_subnet_set.len() + 1,
            final_subnet_set.len(),
            "final number of subnets should be 1 above number of original subnets"
        );
        assert!(
            original_subnet_set.is_subset(&final_subnet_set),
            "final number of subnets should be a superset of the set of original subnets"
        );

        // Return all subnet IDs
        (final_subnets, new_topology_snapshot)
    });

    info!(log, "[Phase III] install a canister without cycles");
    // operational
    for subnet_id in subnet_ids {
        info!(log, "Asserting healthy status of subnet {subnet_id}");
        let subnet = topology_snapshot
            .subnets()
            .find(|subnet| subnet.subnet_id == subnet_id)
            .expect("Could not find newly created subnet.");
        subnet
            .nodes()
            .for_each(|node| node.await_status_is_healthy().unwrap());
        let endpoint = subnet
            .nodes()
            .next()
            .expect("Could not find any node in newly created subnet.");
        // skip nns subnet, only test new subnet which has free cycles schedule
        if subnet_id == nns_node.subnet_id().unwrap() {
            continue;
        }
        block_on(async move {
            let agent = assert_create_agent(endpoint.get_public_url().as_str()).await;

            // this one is created with the provisional API
            let universal_canister =
                UniversalCanister::new(&agent, endpoint.effective_canister_id()).await;
            // this universal canister is created with the normal API, but without cycles.
            let CreateCanisterResult {
                canister_id: new_canister_id,
            } = universal_canister
                .update(wasm().call(management::create_canister(Cycles::new(0))))
                .await
                .map(|res| Decode!(&res, CreateCanisterResult).unwrap())
                .unwrap();
            let new_uni_can = UniversalCanister::new(&agent, new_canister_id.into()).await;
            // this universal canister has no cycles, but can execute this message, and use memory
            new_uni_can
                .update(wasm().stable_grow(1).reply())
                .await
                .unwrap();
            const UPDATE_MSG_1: &[u8] =
                b"This beautiful prose should be persisted for future generations";

            new_uni_can.store_to_stable(0, UPDATE_MSG_1).await;
            info!(log, "successfully saved message in the universal canister");

            assert_eq!(
                new_uni_can
                    .try_read_stable(0, UPDATE_MSG_1.len() as u32)
                    .await,
                UPDATE_MSG_1.to_vec(),
                "could not validate that subnet is healthy: universal canister is broken"
            );
        });
    }
}

fn set<H: Clone + std::cmp::Eq + std::hash::Hash>(data: &[H]) -> HashSet<H> {
    HashSet::from_iter(data.iter().cloned())
}
