import json
import logging
from collections.abc import Sequence
from datetime import datetime
from typing import TYPE_CHECKING, Any, Optional

import requests
from sqlalchemy import func
from sqlalchemy.exc import NoResultFound

import rucio.core.did
from rucio.core.distance import get_distances
import rucio.core.lock
import rucio.core.replica
from rucio.common.config import config_get_int
from rucio.common.constants import RseAttr
from rucio.common.exception import InsufficientTargetRSEs
from rucio.common.types import InternalScope
from rucio.core import account_counter, rse_counter
from rucio.core import request as request_core
from rucio.core.rse import get_rse, get_rse_attribute, get_rse_name, list_rses
from rucio.core.rse_selector import RSESelector
from rucio.core.rule_grouping import create_lock_and_replica, create_transfer_dict
from rucio.db.sqla import models
from rucio.db.sqla.constants import (
    OBSOLETE,
    DIDType,
    LockState,
    ReplicaState,
    RequestType,
    RuleGrouping,
)
from rucio.db.sqla.session import transactional_session

if TYPE_CHECKING:
    from sqlalchemy.orm import Session

SCHEDULER_GATEWAY = "http://172.17.0.1:9999/"


def __create_replica(
    rse_id, scope, name, bytes_, state, md5, adler32, logger=logging.log
):
    """
    Create and return a new SQLAlchemy replica object.

    :param rse_id:        RSE id of the replica.
    :param scope:         Scope of the replica.
    :param name:          Name of the replica.
    :param bytes_:         Bytes of the replica.
    :param state:         State of the replica.
    :param md5:           MD5 checksum of the replica.
    :param adler32:       ADLER32 checksum of the replica.
    :param logger:        Optional decorated logger that can be passed from the calling daemons or servers.
    """

    new_replica = models.RSEFileAssociation(
        rse_id=rse_id,
        scope=scope,
        name=name,
        bytes=bytes_,
        md5=md5,
        adler32=adler32,
        tombstone=None,
        state=state,
        lock_cnt=0,
    )
    logger(
        logging.DEBUG, "Creating %s replica for %s:%s on %s", state, scope, name, rse_id
    )
    return new_replica


# @transactional_session
# def apply_rule_with_scheduler(did, rule, rses, source_rses, scheduler_name=None, *, session: "Session", logger=logging.log):
@transactional_session
def apply_rule_with_scheduler(
    did,
    rule,
    rses,
    source_rses,
    scheduler_name=None,
    *,
    session: "Session",
    logger=logging.log
):
    """
    Apply a replication rule to one did.

    :param did:          the did object
    :param rule:         the rule object
    :param rses:         target rses_ids
    :param source_rses:  source rses_ids
    :param logger:       Optional decorated logger that can be passed from the calling daemons or servers.
    :param session:      the database session in use
    """
    max_partition_size = config_get_int(
        "rules", "apply_rule_max_partition_size", default=2000, session=session
    )  # process dataset files in bunches of max this size

    # accounting counters
    rse_counters_files = {}
    rse_counters_bytes = {}
    account_counters_files = {}
    account_counters_bytes = {}

    # if did.did_type == DIDType.FILE:
    if False:
        # NOTE: silently ignore rule.grouping
        if True:  # instead of -> if rule.grouping == RuleGrouping.NONE:
            locks = {}  # {(scope,name): [SQLAlchemy]}
            replicas = {}  # {(scope, name): [SQLAlchemy]}
            source_replicas = {}  # {(scope, name): [rse_id]
            # get files and replicas, lock the replicas
            replicas[(did.scope, did.name)] = (
                rucio.core.replica.get_and_lock_file_replicas(
                    scope=did.scope,
                    name=did.name,
                    nowait=True,
                    restrict_rses=rses,
                    session=session,
                )
            )
            # prnt(did, 'file')
            # prnt(replicas, 'replicas')

            # get and lock the locks
            locks[(did.scope, did.name)] = rucio.core.lock.get_replica_locks(
                scope=did.scope,
                name=did.name,
                nowait=True,
                restrict_rses=rses,
                session=session,
            )
            # prnt(locks, 'locks')

            # if needed get source replicas
            if source_rses:
                source_replicas[(did.scope, did.name)] = (
                    rucio.core.replica.get_source_replicas(
                        scope=did.scope,
                        name=did.name,
                        source_rses=source_rses,
                        session=session,
                    )
                )
            else:
                source_replicas = {}
            # prnt(source_replicas, 'source_replicas')

            # to align code with cases below, create file dict
            file = {
                "name": did.name,
                "scope": did.scope,
                "bytes": did.bytes,
                "md5": did.md5,
                "adler32": did.adler32,
            }

            # calculate target RSEs
            rse_coverage = {
                replica.rse_id: file["bytes"]
                for replica in replicas[(file["scope"], file["name"])]
            }
            # prnt(rse_coverage)
            preferred_rse_ids = rse_coverage.keys()
            # prnt(preferred_rse_ids)
            rse_tuples = rseselector.select_rse(
                size=file["bytes"],
                preferred_rse_ids=preferred_rse_ids,
                prioritize_order_over_weight=True,
                existing_rse_size=rse_coverage,
            )
            # prnt(rse_tuples)

            # initialize accumulators for __create_lock_and_replica calls
            locks_to_create = {}  # {'rse_id': [locks]}
            replicas_to_create = {}  # {'rse_id': [replicas]}
            transfers_to_create = (
                []
            )  # [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]

            for rse_id, staging_area, availability_write in rse_tuples:
                # check for bug ????
                if (
                    len(
                        [
                            lock
                            for lock in locks[(file["scope"], file["name"])]
                            if lock.rule_id == rule.id and lock.rse_id == rse_id
                        ]
                    )
                    == 1
                ):
                    logger(
                        logging.DEBUG,
                        ">>> WARNING unexpected duplicate lock for file %s at RSE %s"
                        % (file, rse_id),
                    )
                    continue
                # proceed
                __create_lock_and_replica(
                    file=file,
                    dataset={"scope": None, "name": None},
                    rule=rule,
                    rse_id=rse_id,
                    staging_area=staging_area,
                    availability_write=availability_write,
                    source_rses=source_rses,
                    replicas=replicas,
                    locks=locks,
                    source_replicas=source_replicas,
                    locks_to_create=locks_to_create,
                    replicas_to_create=replicas_to_create,
                    transfers_to_create=transfers_to_create,
                    session=session,
                )

            # prnt(locks_to_create, 'locks_to_create')
            # prnt(replicas_to_create, 'replicas_to_create')
            # prnt(transfers_to_create, 'transfers_to_create')

            # flush to DB
            session.add_all(
                [item for sublist in replicas_to_create.values() for item in sublist]
            )
            session.add_all(
                [item for sublist in locks_to_create.values() for item in sublist]
            )
            request_core.queue_requests(requests=transfers_to_create, session=session)
            session.flush()

            # increment counters
            # align code with the one used inside the file loop below
            for rse_id in replicas_to_create.keys():
                rse_counters_files[rse_id] = len(
                    replicas_to_create[rse_id]
                ) + rse_counters_files.get(rse_id, 0)
                rse_counters_bytes[rse_id] = sum(
                    [replica.bytes for replica in replicas_to_create[rse_id]]
                ) + rse_counters_bytes.get(rse_id, 0)
            # prnt(rse_counters_files, 'rse_counters_files')
            # prnt(rse_counters_bytes, 'rse_counters_bytes')

            for rse_id in locks_to_create.keys():
                account_counters_files[rse_id] = len(
                    locks_to_create[rse_id]
                ) + account_counters_files.get(rse_id, 0)
                account_counters_bytes[rse_id] = sum(
                    [lock.bytes for lock in locks_to_create[rse_id]]
                ) + account_counters_bytes.get(rse_id, 0)
            # prnt(account_counters_files, 'account_counters_files')
            # prnt(account_counters_bytes, 'account_counters_bytes')

    else:
        # handle dataset case by converting it to singleton container case
        # NOTE: this will handle DATASET/ALL as if it was DATASET/DATASET
        datasets = []  # [(scope,name)]
        if did.did_type == DIDType.DATASET:
            datasets.append((did.scope, did.name))
        elif did.did_type == DIDType.CONTAINER:
            for child_dataset in rucio.core.did.list_child_datasets(
                scope=did.scope, name=did.name, session=session
            ):
                # ensure there are no duplicates
                newds = (
                    child_dataset["scope"],
                    child_dataset["name"],
                )
                if newds not in datasets:
                    datasets.append(newds)
        elif did.did_type == DIDType.FILE:
            datasets.append((did.scope, did.name))

        # sort alphabetically for deterministic order
        try:
            datasets = sorted(datasets)
        except Exception:
            pass

        dss = []
        for ds_scope, ds_name in datasets:
            ds = rucio.core.did.get_did(
                scope=ds_scope,
                name=ds_name,
                dynamic_depth=DIDType.FILE,
                session=session,
            )
            files = rucio.core.did.list_files(ds_scope, ds_name)
            files_in_dataset = []
            all_files = {}
            for file in files:
                source_replicas = rucio.core.replica.get_source_replicas(
                    file["scope"], file["name"], session=session
                )
                file["rses"] = source_replicas
                file["scope"] = file["scope"].external  # transform to string type
                files_in_dataset.append(file)
                all_files[(file['scope'], file['name'])] = file
            dss.append(files_in_dataset)

        all_rses = list_rses(session=session)
        distances = get_distances(session=session)

        req = {
            "datasets": dss,
            "copies": rule.copies,
            "candidate_dst_rses": rses,
            "candidate_src_rses": source_rses,
            "topology": {
                "rses": [rse["id"] for rse in all_rses],
                "distances": [
                    {k: dist[k] for k in ["src_rse_id", "dest_rse_id", "distance"]}
                    for dist in distances
                ],
            },
            "grouping": rule.grouping.value,
        }
        logger(logging.DEBUG, ">>> request to scheduler %s" % json.dumps(req))
        
        
        with open('etc/schedulers.json') as f:
            schedulers = json.load(f)
            
        if scheduler_name not in schedulers:
            raise NameError(scheduler_name)
        
        scheduler_endpoint = schedulers[scheduler_name]
        
        resp = requests.post(scheduler_endpoint + '/schedule', json=req).json()
        logger(logging.DEBUG, ">>> response from scheduler %s" % json.dumps(resp))

        transfers = []

        # initialize accumulators for __create_lock_and_replica calls
        locks_to_create = {}  # {'rse_id': [locks]}
        replicas_to_create = {}  # {'rse_id': [replicas]}
        transfers_to_create = (
            []
        )  # [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]
        replicas = []
        locks = {}
        for trans in resp:
            transfer_dict = create_transfer_dict(
                dest_rse_id=trans["dst_rse_id"],
                request_type=RequestType.TRANSFER,
                scope=InternalScope(trans["file"]["scope"]),
                name=trans["file"]["name"],
                rule=rule,
                bytes_=trans['file']['bytes'],
                session=session,
            )
            transfer_dict["source_rse_id"] = trans["src_rse_id"]
            transfers.append(transfer_dict)

            replica = __create_replica(
                trans["dst_rse_id"],
                InternalScope(trans["file"]["scope"]),
                trans["file"]["name"],
                bytes_=trans["file"]["bytes"],
                state=ReplicaState.COPYING,
                md5=0,
                # md5=all_files[(trans["file"]["scope"], trans['file']['name'])]['md5'],
                adler32=trans["file"]["adler32"],
            )
            
            replicas.append(replica)
            

        session.add_all([item for item in replicas])
        # session.add_all(
        #     [item for sublist in locks_to_create.values() for item in sublist]
        # )
        request_core.queue_requests(requests=transfers, session=session)
        session.flush()

        return

        # prnt(datasets)

        rse_coverage = {}  # rse_coverage = { rse_id : bytes }
        rse_tuples = []  # rse_tuples = [(rse_id, staging_area, availability_write)]
        used_rse_ids = []  # for NONE grouping keep track of actual used RSEs

        if rule.grouping == RuleGrouping.ALL:
            # calculate target RSEs
            nbytes = 0
            rse_coverage = {}
            # simply loop over child datasets
            # this is an approximation because ignoring the possibility of file overlap
            for ds_scope, ds_name in datasets:
                ds = rucio.core.did.get_did(
                    scope=ds_scope,
                    name=ds_name,
                    dynamic_depth=DIDType.FILE,
                    session=session,
                )  # this will be retrieved again later on -> could be optimized
                nbytes += ds["bytes"]
                one_rse_coverage = rucio.core.replica.get_RSEcoverage_of_dataset(
                    scope=ds_scope, name=ds_name, session=session
                )
                for rse_id, bytes_ in one_rse_coverage.items():
                    rse_coverage[rse_id] = bytes_ + rse_coverage.get(rse_id, 0)

            # prnt(rse_coverage)
            preferred_rse_ids = [
                x[0]
                for x in sorted(
                    rse_coverage.items(), key=lambda tup: tup[1], reverse=True
                )
            ]
            # prnt(preferred_rse_ids)
            rse_tuples = rseselector.select_rse(
                size=nbytes,
                preferred_rse_ids=preferred_rse_ids,
                prioritize_order_over_weight=True,
                existing_rse_size=rse_coverage,
            )
            # prnt(rse_tuples)

        for ds_scope, ds_name in datasets:
            # prnt(('processing dataset ',ds_scope, ds_name))
            #
            ds = rucio.core.did.get_did(
                scope=ds_scope,
                name=ds_name,
                dynamic_depth=DIDType.FILE,
                session=session,
            )
            ds_length = ds["length"]
            ds_bytes = ds["bytes"]
            ds_open = ds["open"]
            # prnt(ds)

            # calculate number of partitions based on nr of files
            npartitions = int(ds_length / max_partition_size) + 1
            # prnt(npartitions)

            if rule.grouping == RuleGrouping.DATASET:
                # calculate target RSEs
                rse_coverage = rucio.core.replica.get_RSEcoverage_of_dataset(
                    scope=ds_scope, name=ds_name, session=session
                )
                # prnt(rse_coverage)
                preferred_rse_ids = [
                    x[0]
                    for x in sorted(
                        rse_coverage.items(), key=lambda tup: tup[1], reverse=True
                    )
                ]
                # prnt(preferred_rse_ids)
                # rse_tuples = rseselector.select_rse(size=ds_bytes, preferred_rse_ids=preferred_rse_ids,
                #                                     prioritize_order_over_weight=True, existing_rse_size=rse_coverage)
                # prnt(rse_tuples)

            # loop over the partitions even if it is just one
            for p in range(npartitions):
                # prnt(('processing partition ', p, npartitions))

                # files is [{'scope':, 'name':, 'bytes':, 'md5':, 'adler32':}]
                # locks is {(scope,name): [SQLAlchemy]}
                # replicas = {(scope, name): [SQLAlchemy]}
                # source replicas is {(scope, name): [SQLAlchemy]}

                # get files and replicas, lock the replicas
                files, replicas = (
                    rucio.core.replica.get_and_lock_file_replicas_for_dataset(
                        scope=ds_scope,
                        name=ds_name,
                        nowait=True,
                        restrict_rses=rses,
                        total_threads=npartitions,
                        thread_id=p,
                        session=session,
                    )
                )
                # prnt(files, 'files')
                # prnt(replicas, 'replicas')

                # get and lock the replica locks
                locks = rucio.core.lock.get_files_and_replica_locks_of_dataset(
                    scope=ds_scope,
                    name=ds_name,
                    nowait=True,
                    restrict_rses=rses,
                    total_threads=npartitions,
                    thread_id=p,
                    session=session,
                )
                # prnt(locks, 'locks')

                # if needed get source replicas
                if source_rses:
                    source_replicas = (
                        rucio.core.replica.get_source_replicas_for_dataset(
                            scope=ds_scope,
                            name=ds_name,
                            source_rses=source_rses,
                            total_threads=npartitions,
                            thread_id=p,
                            session=session,
                        )
                    )
                else:
                    source_replicas = {}
                # prnt(source_replicas, 'source_replicas')

                # initialize accumulators for __create_lock_and_replica calls
                locks_to_create = {}  # {'rse_id': [locks]}
                replicas_to_create = {}  # {'rse_id': [replicas]}
                transfers_to_create = (
                    []
                )  # [{'dest_rse_id':, 'scope':, 'name':, 'request_type':, 'metadata':}]

                # loop over the rse tuples
                for file in files:
                    # check for duplicate due to dataset overlap within container
                    if (
                        len(
                            [
                                lock
                                for lock in locks[(file["scope"], file["name"])]
                                if lock.rule_id == rule.id
                            ]
                        )
                        == rule.copies
                    ):
                        logger(
                            logging.DEBUG,
                            ">>> WARNING skipping (shared?) file %s" % file,
                        )
                        continue

                    if rule.grouping == RuleGrouping.NONE:
                        # calculate target RSEs
                        rse_coverage = {
                            replica.rse_id: file["bytes"]
                            for replica in replicas[(file["scope"], file["name"])]
                        }
                        # prnt(rse_coverage)
                        preferred_rse_ids = rse_coverage.keys()
                        # prnt(preferred_rse_ids)
                        # rse_tuples = rseselector.select_rse(size=file['bytes'], preferred_rse_ids=preferred_rse_ids,
                        #                                     prioritize_order_over_weight=True, existing_rse_size=rse_coverage)
                        # prnt(rse_tuples)
                        # keep track of used RSEs
                        for rt in rse_tuples:
                            if not rt[0] in used_rse_ids:
                                used_rse_ids.append(rt[0])

                    for rse_id, staging_area, availability_write in rse_tuples:
                        # check for bug ????
                        if (
                            len(
                                [
                                    lock
                                    for lock in locks[(file["scope"], file["name"])]
                                    if lock.rule_id == rule.id and lock.rse_id == rse_id
                                ]
                            )
                            == 1
                        ):
                            logger(
                                logging.DEBUG,
                                ">>> WARNING unexpected duplicate lock for file %s at RSE %s"
                                % (file, rse_id),
                            )
                            continue
                        # proceed
                        # __create_lock_and_replica(file=file, dataset={'scope': ds_scope, 'name': ds_name}, rule=rule,
                        #                           rse_id=rse_id, staging_area=staging_area, availability_write=availability_write, source_rses=source_rses,
                        #                           replicas=replicas, locks=locks, source_replicas=source_replicas,
                        #                           locks_to_create=locks_to_create, replicas_to_create=replicas_to_create, transfers_to_create=transfers_to_create,
                        #                           session=session)

                # prnt(locks_to_create, 'locks_to_create')
                # prnt(replicas_to_create, 'replicas_to_create')
                # prnt(transfers_to_create, 'transfers_to_create')

                # flush to DB
                session.add_all(
                    [
                        item
                        for sublist in replicas_to_create.values()
                        for item in sublist
                    ]
                )
                session.add_all(
                    [item for sublist in locks_to_create.values() for item in sublist]
                )
                request_core.queue_requests(
                    requests=transfers_to_create, session=session
                )
                session.flush()

                # increment counters
                # do not update (and lock !) counters inside loop here, update at very end and only once
                for rse_id in replicas_to_create.keys():
                    rse_counters_files[rse_id] = len(
                        replicas_to_create[rse_id]
                    ) + rse_counters_files.get(rse_id, 0)
                    rse_counters_bytes[rse_id] = sum(
                        [replica.bytes for replica in replicas_to_create[rse_id]]
                    ) + rse_counters_bytes.get(rse_id, 0)
                # prnt(rse_counters_files, 'rse_counters_files')
                # prnt(rse_counters_bytes, 'rse_counters_bytes')

                for rse_id in locks_to_create.keys():
                    account_counters_files[rse_id] = len(
                        locks_to_create[rse_id]
                    ) + account_counters_files.get(rse_id, 0)
                    account_counters_bytes[rse_id] = sum(
                        [lock.bytes for lock in locks_to_create[rse_id]]
                    ) + account_counters_bytes.get(rse_id, 0)
                # prnt(account_counters_files, 'account_counters_files')
                # prnt(account_counters_bytes, 'account_counters_bytes')

                # mem()

            # dataset lock/replica
            u_rses = (
                used_rse_ids
                if rule.grouping == RuleGrouping.NONE
                else [x[0] for x in rse_tuples]
            )
            # prnt(u_rses, 'used RSE ids')
            for u_rse in u_rses:
                # prnt('creating dataset lock/replica for %s on %s' % (ds_name,u_rse))
                if (
                    rule.grouping == RuleGrouping.DATASET
                    or rule.grouping == RuleGrouping.ALL
                ):
                    # add dataset lock
                    models.DatasetLock(
                        scope=ds_scope,
                        name=ds_name,
                        rule_id=rule.id,
                        rse_id=u_rse,
                        state=LockState.REPLICATING,
                        account=rule.account,
                        length=ds_length if not ds_open else None,
                        bytes=ds_bytes if not ds_open else None,
                    ).save(session=session)

                # add dataset replica if not already existing (rule_id is not in PK)
                try:
                    session.query(models.CollectionReplica).filter(
                        models.CollectionReplica.scope == ds_scope,
                        models.CollectionReplica.name == ds_name,
                        models.CollectionReplica.rse_id == u_rse,
                    ).one()
                except NoResultFound:
                    models.CollectionReplica(
                        scope=ds_scope,
                        name=ds_name,
                        did_type=DIDType.DATASET,
                        rse_id=u_rse,
                        bytes=0,
                        length=0,
                        available_bytes=0,
                        available_replicas_cnt=0,
                        state=ReplicaState.UNAVAILABLE,
                    ).save(session=session)

                    models.UpdatedCollectionReplica(
                        scope=ds_scope, name=ds_name, did_type=DIDType.DATASET
                    ).save(session=session)

    # update account and rse counters
    for rse_id in rse_counters_files:
        rse_counter.increase(
            rse_id=rse_id,
            files=rse_counters_files[rse_id],
            bytes_=rse_counters_bytes[rse_id],
            session=session,
        )
    for rse_id in account_counters_files:
        account_counter.increase(
            rse_id=rse_id,
            account=rule.account,
            files=account_counters_files[rse_id],
            bytes_=account_counters_bytes[rse_id],
            session=session,
        )
    session.flush()

    return


# apply_rule_with_scheduler(1, 1, 1, 1)
