# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
# pylint: disable=line-too-long

import pytest

from helpers.cluster import ClickHouseCluster

cluster = ClickHouseCluster(__file__)
node = cluster.add_instance(
    "node",
    main_configs=["configs/remote_servers.xml", "configs/another_remote_servers.xml"],
    stay_alive=True,
)

cluster_param = pytest.mark.parametrize(
    "cluster",
    [
        ("test_cluster_internal_replication"),
        ("test_cluster_no_internal_replication"),
    ],
)


def get_dist_path(cluster, node, table, dist_format):
    data_path = node.query(
        f"SELECT arrayElement(data_paths, 1) FROM system.tables WHERE database='test' AND name='{table}'"
    ).strip()
    if dist_format == 0:
        return f"{data_path}/default@not_existing:9000"
    if cluster == "test_cluster_internal_replication":
        return f"{data_path}/shard1_all_replicas"
    return f"{data_path}/shard1_replica1"


@pytest.fixture(scope="module")
def started_cluster():
    try:
        cluster.start()
        node.query("create database test")
        yield cluster

    finally:
        cluster.shutdown()


@cluster_param
def test_single_file(started_cluster, cluster):
    node.query("drop table if exists test.distr_1 sync")

    node.query(
        "create table test.distr_1 (x UInt64, s String) engine = Distributed('{}', database, table)".format(
            cluster
        )
    )
    node.query(
        "insert into test.distr_1 values (1, 'a'), (2, 'bb'), (3, 'ccc')",
        settings={"use_compact_format_in_distributed_parts_names": "1"},
    )

    path = get_dist_path(cluster, node, "distr_1", 1)
    query = f"select * from file('{path}/1.bin', 'Distributed')"
    out = node.exec_in_container(
        ["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query]
    )

    assert out == "1\ta\n2\tbb\n3\tccc\n"

    query = f"""
    create table t (x UInt64, s String) engine = File('Distributed', '{path}/1.bin');
    select * from t;
    """
    out = node.exec_in_container(
        ["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query]
    )

    assert out == "1\ta\n2\tbb\n3\tccc\n"

    node.query("drop table test.distr_1 sync")


@cluster_param
def test_two_files(started_cluster, cluster):
    node.query("drop table if exists test.distr_2 sync")
    node.query(
        "create table test.distr_2 (x UInt64, s String) engine = Distributed('{}', database, table)".format(
            cluster
        )
    )
    node.query(
        "insert into test.distr_2 values (0, '_'), (1, 'a')",
        settings={
            "use_compact_format_in_distributed_parts_names": "1",
        },
    )
    node.query(
        "insert into test.distr_2 values (2, 'bb'), (3, 'ccc')",
        settings={
            "use_compact_format_in_distributed_parts_names": "1",
        },
    )

    path = get_dist_path(cluster, node, "distr_2", 1)
    query = f"select * from file('{path}/{{1,2,3,4}}.bin', 'Distributed') order by x"
    out = node.exec_in_container(
        ["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query]
    )

    assert out == "0\t_\n1\ta\n2\tbb\n3\tccc\n"

    query = f"""
    create table t (x UInt64, s String) engine = File('Distributed', '{path}/{{1,2,3,4}}.bin');
    select * from t order by x;
    """
    out = node.exec_in_container(
        ["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query]
    )

    assert out == "0\t_\n1\ta\n2\tbb\n3\tccc\n"

    node.query("drop table test.distr_2 sync")


@cluster_param
def test_single_file_old(started_cluster, cluster):
    node.query("drop table if exists test.distr_3 sync")
    node.query("drop table if exists t sync")
    node.query(
        "create table test.distr_3 (x UInt64, s String) engine = Distributed('{}', database, table)".format(
            cluster
        )
    )
    node.query(
        "insert into test.distr_3 values (1, 'a'), (2, 'bb'), (3, 'ccc')",
        settings={
            "use_compact_format_in_distributed_parts_names": "0",
        },
    )

    path = get_dist_path(cluster, node, "distr_3", 0)
    query = f"select * from file('{path}/1.bin', 'Distributed')"
    out = node.exec_in_container(
        ["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query]
    )

    assert out == "1\ta\n2\tbb\n3\tccc\n"

    query = f"""
    create table t (x UInt64, s String) engine = File('Distributed', '{path}/1.bin');
    select * from t;
    """
    out = node.exec_in_container(
        ["/usr/bin/clickhouse", "local", "--stacktrace", "-q", query]
    )

    assert out == "1\ta\n2\tbb\n3\tccc\n"

    node.query("drop table test.distr_3")


def test_remove_replica(started_cluster):
    node.query("drop table if exists test.local_4 sync")
    node.query("drop table if exists test.distr_4 sync")
    node.query(
        "create table test.local_4 (x UInt64, s String) engine = MergeTree order by x"
    )
    node.query(
        "create table test.distr_4 (x UInt64, s String) engine = Distributed('test_cluster_remove_replica1', test, local_4)"
    )
    node.query(
        "insert into test.distr_4 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd')"
    )
    node.query("detach table test.distr_4")

    node.exec_in_container(
        [
            "sed",
            "-i",
            "s/test_cluster_remove_replica1/test_cluster_remove_replica_tmp/g",
            "/etc/clickhouse-server/config.d/another_remote_servers.xml",
        ]
    )
    node.exec_in_container(
        [
            "sed",
            "-i",
            "s/test_cluster_remove_replica2/test_cluster_remove_replica1/g",
            "/etc/clickhouse-server/config.d/another_remote_servers.xml",
        ]
    )
    node.query("SYSTEM RELOAD CONFIG")
    node.query("attach table test.distr_4", ignore_error=True)
    node.query("SYSTEM FLUSH DISTRIBUTED test.distr_4", ignore_error=True)
    assert node.query("select 1") == "1\n"

    node.query("drop table test.local_4 sync")
    node.query("drop table test.distr_4 sync")

    # revert back the configs for the subsequent runs
    node.exec_in_container(
        [
            "sed",
            "-i",
            "s/test_cluster_remove_replica1/test_cluster_remove_replica2/g",
            "/etc/clickhouse-server/config.d/another_remote_servers.xml",
        ]
    )
    node.exec_in_container(
        [
            "sed",
            "-i",
            "s/test_cluster_remove_replica_tmp/test_cluster_remove_replica1/g",
            "/etc/clickhouse-server/config.d/another_remote_servers.xml",
        ]
    )

def test_invalid_shard_directory_format(started_cluster):
    """
    Test that ClickHouse doesn't crash when it encounters
    a malformed directory name like 'shard1_all_replicas_bkp'
    during distributed table initialization.
    """
    node.query("drop table if exists test.dist_invalid sync")
    node.query("drop table if exists test.local_invalid sync")
    node.query(
        "create table test.local_invalid (x UInt64, s String) engine = MergeTree order by x"
    )
    node.query(
        "create table test.dist_invalid (x UInt64, s String) "
        "engine = Distributed('test_cluster_internal_replication', test, local_invalid)"
    )

    node.query(
        "insert into test.dist_invalid values (1, 'a'), (2, 'bb')",
        settings={"use_compact_format_in_distributed_parts_names": "1"},
    )

    data_path = node.query(
        "SELECT arrayElement(data_paths, 1) FROM system.tables "
        "WHERE database='test' AND name='dist_invalid'"
    ).strip()

    # Create a malformed directory that would cause the bug
    malformed_dir = f"{data_path}/shard1_all_replicas_bkp"
    node.exec_in_container(["mkdir", "-p", malformed_dir])

    # Create a dummy file so the directory isn't considered empty
    node.exec_in_container(["touch", f"{malformed_dir}/dummy.txt"])

    invalid_formats = [
        "shard1_all_replicas_backup",
        "shard1_all_replicas_old",
        "shard2_all_replicas_tmp",
    ]
    for invalid_dir in invalid_formats:
        invalid_path = f"{data_path}/{invalid_dir}"
        node.exec_in_container(["mkdir", "-p", invalid_path])
        # just dummy file to have something in the directory
        node.exec_in_container(["touch", f"{invalid_path}/dummy.txt"])

    # Reproduce server restart with detach and attach
    node.query("detach table test.dist_invalid")
    node.query("attach table test.dist_invalid")

    node.query("SYSTEM FLUSH LOGS system.text_log")

    error_logs = node.query(
        """
        SELECT count()
        FROM system.text_log
        WHERE level = 'Error'
          AND message LIKE '%Invalid replica_index%'
          AND message LIKE '%shard1_all_replicas%'
        """
    ).strip()

    # We should have at least one error log for each malformed directory
    # But we don't strictly require this in case logging is disabled
    # The important thing is that the server didn't crash
    print(f"Found {error_logs} error log entries for invalid directories")

    # Clean up
    node.query("drop table test.dist_invalid sync")
    node.query("drop table test.local_invalid sync")
