from distutils.command.config import config
from random import shuffle
from time import sleep
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
import torch
from torchsummary import summary
from data.dataset import build_dataset
import os
import cv2
import numpy as np
from data.transform import Pipeline
from .task import ClassTask
from data.collate import collate_function


# AVAIL_GPUS = min(1,torch.cuda.device_count())


class ClassWorker():
    def __init__(self, config) -> None:
        self.config = config
       
        # self.train_dataloader = 
        self.model = ClassTask(self.config).cuda()
        checkpoint_callback = ModelCheckpoint(
            dirpath='save_module',
            filename='{epoch:02d}-{val_loss:.2f}',
            monitor='val_loss'
        )
        # logger = TensorBoardLogger('save_logs', name=self.config.model.arch.backbone.name)
        AVAIL_GPUS = min(1, torch.cuda.device_count())
        self.trainer = Trainer(callbacks=checkpoint_callback,
                                gpus=self.config.device.gpu_ids,
                                max_epochs=self.config.schedule.total_epochs,
                                # logger=logger,
                                log_every_n_steps=5,
                                progress_bar_refresh_rate=1,
                                default_root_dir='save_module')

    def summary(self):
        w, h = self.config.data.train.input_size
        batch_size = self.config.device.batchsize_per_gpu
        summary(self.model.to(torch.device('cpu')), input_size=(
                3, w, h), batch_size=batch_size, device='cpu')

    def train(self, ckpt_path=None):
        # 加载数据集
        self.train_data = build_dataset(self.config.data.train,class_name=self.config.class_names, mode='train')
        self.val_data = build_dataset(self.config.data.val, class_name=self.config.class_names,mode='val')
        # TODO 生成dataloader
        # train_sampler = torch.utils.data.distributed.DistributedSampler(self.train_data)
        train_loader = torch.utils.data.DataLoader(self.train_data, batch_size=self.config.device.batchsize_per_gpu,
                                                        num_workers=self.config.device.workers_per_gpu,collate_fn=collate_function, pin_memory=True,
                                                        shuffle=True,drop_last=True)
        # val_sampler = torch.utils.data.distributed.DistributedSampler(self.val_data)
        val_loader = torch.utils.data.DataLoader(self.val_data, batch_size=self.config.device.batchsize_per_gpu,
                                                        num_workers=self.config.device.workers_per_gpu, collate_fn=collate_function, pin_memory=True,
                                                        drop_last=True)

            
        
        self.trainer.fit(self.model, train_loader,
                         val_loader, ckpt_path=ckpt_path)

    def validate(self, ckpt_path=None):
        self.val_data = build_dataset(self.config.data.val, class_name=self.config.class_names,mode='val')
        # val_sampler = torch.utils.data.distributed.DistributedSampler(self.val_data)
        val_loader = torch.utils.data.DataLoader(self.val_data, batch_size=self.config.device.batchsize_per_gpu,
                                                        num_workers=self.config.device.workers_per_gpu, pin_memory=True,
                                                        drop_last=True)
        self.trainer.validate(self.model, val_loader, ckpt_path=ckpt_path)

    def inference(self, image, ckpt_path=None):
        device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
        self.pipeline = Pipeline(
            self.config.data.val.pipeline, self.config.data.val.keep_ratio)
        img_info = {}
        if isinstance(image, str):
            img_info['file_name'] = os.path.basename(img)
            img = cv2.imdecode(np.fromfile(
                image, dtype=np.uint8), -1)[..., ::-1]
        else:
            img_info['file_name'] = None

        height, width = img.shape[:2]
        img_info['height'] = height
        img_info['width'] = width
        meta = dict(img_info=img_info,
                    raw_img=img,
                    img=img)
        meta = self.pipeline(meta, self.cfg.data.val.input_size)
        meta['img'] = torch.from_numpy(meta['img'].transpose(
            2, 0, 1)).unsqueeze(0).to(self.device)
        self.model.load_state_dict(torch.load(
            ckpt_path, map_location=device), strict=False)
        with torch.no_grad():
            output = self.model(meta['img'])
            output = torch.squeeze(output).cpu()
            predict_cls = torch.argmax(output).numpy()
            print("{filename}: 预测为 {predict_cls},概率为 {output}".format(
                filename=img_info['file_name'],
                predict_cls=self.config.class_names[predict_cls],
                output=output[predict_cls].numpy()
            ))



