from abc import ABCMeta, abstractmethod
from sqlalchemy.orm import Session
from docker import DockerClient
from app.controller.model.storage import ScaleStorageRes
from app.dao.storage import get_storage, get_storage_count
from app.docker import Client


class StorageScaler(metaclass=ABCMeta):
    @abstractmethod
    def scale(self) -> ScaleStorageRes:
        pass


class StorageScalerImpl(StorageScaler):
    db: Session
    service_id: str
    #node_id: str
    client: DockerClient
    storage_scale: int

    def __init__(self, db: Session, service_id: str, storage_scale: int, client: DockerClient) -> None:
        super().__init__()
        self.db = db
        self.service_id = service_id
        #self.node_id = node_id
        self.client = client
        self.storage_scale = storage_scale

    def _get_detail_by_service_id(self, id):
        services = self.client.services.list(filters={'id': id})
        if len(services) == 0:
            return None
        service = services[0]
        return service

    def get_scale_before(self, servicename):
        services = self.client.services.list()
        count = 0
        length = len(servicename)
        for service in services:
            if service.name != servicename and service.name[0:length] == servicename:
                count += 1
        return count

    def delstorage_by_servicename(self, service_name):
        services = self.client.services.list()
        for service in services:
            if service.name == service_name:
                service.remove()
                return

    def deal_with_HDFS(self, service):
        scale_before = self.get_scale_before(service.name)
        if scale_before == self.storage_scale:
            return
        elif self.storage_scale > scale_before:
            tar_index = scale_before
            for i in range(self.storage_scale-scale_before):
                new_name = f'{service.name}_dn{tar_index}'
                tar_index += 1
                #constraint = "node.id == " + self.node_id
                self.client.services.create(
                    image='test_hadoop:1.8',
                    command=["/bin/bash", "datanode.sh"],
                    networks=[service.name],
                    name=new_name
                )
        else:
            tar_index = scale_before - 1
            while tar_index >= self.storage_scale:
                tar_name = f'{service.name}_dn{tar_index}'
                self.delstorage_by_servicename(tar_name)
                tar_index -= 1

    def deal_with_other(self, service):
        service.scale(self.storage_scale)
        return None

    def scale(self) -> ScaleStorageRes:
        service = self._get_detail_by_service_id(self.service_id)
        if service.name[0:4] == 'HDFS':
            self.deal_with_HDFS(service)
        else:
            self.deal_with_other(service)

        return ScaleStorageRes(service_id=self.service_id)


def get_storage_scaler(db: Session, service_id: str, storage_scale: int) -> StorageScaler:
    return StorageScalerImpl(db, service_id,  storage_scale,  client=Client().client)
