import logging
import os.path
import platform
import shutil

import paddle
import psutil
import sys

from Analysis.utils import *
from Entitys.tasks import BatchTask, UpdateModel, TrainTask
from FormatConver.conver import batch_convert_niigz, niigz2raw
from GenerateModelsFromLabels.update_model import update
from PredictTools.runScirpt import excuteCommand
from Tools.redisTools import RedisTools
from mqTools import MqTools
from TrainModel.runtTrain import do_train
from PredictTools.fileMove import moveFilesToUserRoot

mq_config_path = os.path.join(os.path.dirname(__file__), 'PredictTools', 'configs', 'mqConfig.yml')
logging.getLogger().setLevel(logging.INFO)

DEVICE_ID = 21445


class PredReasoningBackend:
    def __init__(self):

        self.device_inf = get_device_inf()
        logging.info(f'this device inf {self.device_inf}')
        try:
            self.r = RedisTools()
            self.r.set(f'device_{DEVICE_ID}', self.device_inf)
            logging.info(f'ReasoningBackend init redis success')
        except Exception as e:
            logging.error(f'ReasoningBackend init redis error {e}')
            # return

        logging.info('ReasoningBackend start init')
        self.mq = MqTools(mq_config_path)
        logging.info(f'ReasoningBackend mq config {self.mq.ymls}')
        self.hasGpu = paddle.is_compiled_with_cuda()
        logging.info(f'ReasoningBackend hasGpu {self.hasGpu}')
        self.mq.make_channel(self.mq.queues_names)
        logging.info('ReasoningBackend init success')

        if self.device_inf['system'] == 'Windows':
            logging.warning('if the system is windows, the nnunet will not be able to run normally')

        self.is_runing = False

    def update_model(self, ch, method, propreties, body):

        if self.is_runing is True:
            # recover
            ch.basic_recover(requeue=True)
            return

        self.is_runing = True

        up = UpdateModel()
        up.load_data(body)
        ch.basic_ack(delivery_tag=method.delivery_tag)
        try:
            update(up.niiPath, up.savePath)
            self.mq.send(self.mq.queues_names['model_update_result'],
                         json.dumps({'userId': up.userId, 'status': 'success'}))
        except Exception as e:
            logging.error(f'update model error {e}')
            self.mq.send(self.mq.queues_names['model_update_result'],
                         json.dumps({'userId': up.userId, 'status': 'fail'}))
        finally:
            self.is_runing = False

    def batch_task_callback(self, ch, method, properties, body):

        # if self.r.get(f'device_{DEVICE_ID}') is None or self.is_runing is True:
        #     # recover
        #     ch.basic_recover(requeue=True)
        #     return
        if self.is_runing is True:
            # recover
            ch.basic_recover(requeue=True)
            return

        self.is_runing = True
        batch_task = BatchTask()
        batch_task.load_data(body)

        # ch.basic_ack(delivery_tag=method.delivery_tag)
        logging.info(f'get a batch task {batch_task}')
        # 将用户id丢入redis
        # self.r.set(f'task_userid_{batch_task.userId}', batch_task.userId)
        try:
            task_frame = 'MedicalSeg' if os.path.exists(os.path.join(batch_task.modelPath, 'deploy.yaml')) else 'nnUNet'
            logging.info(f'using {task_frame} to predict')
            logging.info(f'convert data to decathlon format')
            if not os.path.exists(os.path.join(batch_task.savePath, 'decathlon')):
                os.makedirs(os.path.join(batch_task.savePath, 'decathlon'))

            logging.info(f'copy data to decathlon format')
            org_path = batch_task.imgPaths
            if task_frame == 'nnUNet':
                for file_name in batch_task.imgPaths:
                    # rename
                    base_name = os.path.basename(file_name)
                    base_name = base_name.split('.')
                    base_name[0] += '_0000'
                    base_name = '.'.join(base_name)
                    shutil.copy(file_name, os.path.join(batch_task.savePath, 'decathlon', base_name))
            else:
                for file_name in batch_task.imgPaths:
                    shutil.copy(file_name, os.path.join(batch_task.savePath, 'decathlon'))

            batch_task.imgPaths = os.path.join(batch_task.savePath, 'decathlon')

            convert_tmp_path = os.path.join(batch_task.savePath, 'convert_tmp')
            if not os.path.exists(convert_tmp_path):
                os.makedirs(convert_tmp_path)

            error_files = batch_convert_niigz(batch_task.imgPaths, convert_tmp_path)
            if len(error_files) > 0:
                logging.info(f'found not support file {error_files}, we will skip it')

                if not os.path.exists(os.path.join(batch_task.savePath, 'error')):
                    os.makedirs(os.path.join(batch_task.savePath, 'error'))

                for error_file in error_files:
                    shutil.move(error_file, os.path.join(batch_task.savePath, 'error'))
                    batch_task.savePath.pop(error_file)

            pred_log_out = None
            generate_log_out = None

            logging.info('start predict')
            if task_frame == 'MedicalSeg':
                # 如果是MedicalSeg，定义用户上传的模型压缩包必须是MedicalSeg导出模型的整个压缩包
                infer_scipt_path = os.path.join(os.path.dirname(__file__), 'PredictTools', 'MedicalSeg', 'infer.py')
                model_config_path = os.path.join(batch_task.modelPath, 'deploy.yaml')
                image_path = convert_tmp_path
                batch_size = 1
                save_dir = os.path.join(batch_task.savePath, 'pred')
                device = 'gpu' if self.hasGpu else 'cpu'

                infer_cmd = f'python {infer_scipt_path} --config {model_config_path} --image_path {image_path} --batch_size {batch_size} --save_dir {save_dir} --device {device}'

                pred_log_out = excuteCommand(infer_cmd)

            # 由于nnunet在windows上无法运行，并且必须具有gpu，所以在windows上关闭nnunet的推理选择
            elif task_frame == 'nnUNet':
                if not self.hasGpu and self.device_inf['system'] == 'Windows':
                    logging.error('NNUnet not support windows')
                    raise Exception('NNUnet not support windows')
                infer_scipt_path = os.path.join(os.path.dirname(__file__), 'PredictTools', 'MedicalSeg', 'nnunet',
                                                'predict.py')
                image_folder = convert_tmp_path
                output_folder = os.path.join(batch_task.savePath, 'pred')

                # 为了简单和方便调试，这里直接将nnunet的模型类型写死，并且默认将plan_path放入modelPath中
                # 总结就是，用户的modelPath中必须包含nnUNetPlansv2.1_plans_3D.pkl、model.pdparams。postprocessing.json可有可无， 并且上传的模型必须是cascade_lowres

                # model_type = batch_task.nnunetModelType
                plan_path = os.path.join(batch_task.modelPath, 'nnUNetPlansv2.1_plans_3D.pkl')
                model_path = os.path.join(batch_task.modelPath, 'model.pdparams')
                postprocessing_json_path = os.path.join(batch_task.modelPath, 'postprocessing.json')
                # 为了提高推理效率 关闭tta和后处理

                infer_cmd = f'python {infer_scipt_path} --image_folder {image_folder} --output_folder {output_folder} --model_type cascade_lowres --plan_path {plan_path} --model_path {model_path} --postprocessing_json_path {postprocessing_json_path} --disable_tta --disable_postprocessing'
                pred_log_out = excuteCommand(infer_cmd)
                if os.path.exists(os.path.join(batch_task.savePath, 'pred', 'nnUNetPlansv2.1_plans_3D.pkl')):
                    os.remove(os.path.join(batch_task.savePath, 'pred', 'nnUNetPlansv2.1_plans_3D.pkl'))
            else:
                logging.error(f'not support task frame {task_frame}')
                return

            # 删除不需要的类别
            # removeClassInDir(os.path.join(batch_task.savePath, 'pred'), batch_task.backClass)

            # generate models
            TheerDobj_path = os.path.join(batch_task.savePath, 'TheerDobj')
            generate_cmd = f'python {os.path.join(os.path.dirname(__file__), "GenerateModelsFromLabels", "main_tool.py")} --file_path {os.path.join(batch_task.savePath, "pred")} --save_root {TheerDobj_path}'
            generate_log_out = excuteCommand(generate_cmd)

            orginal_format = os.path.join(batch_task.savePath, 'orginal_format')
            niigz2raw(os.path.join(batch_task.savePath, 'pred'), batch_task.imgPaths, orginal_format)

            # Data analysis
            ana_path(convert_tmp_path, os.path.join(batch_task.savePath, 'pred'),
                     os.path.join(batch_task.savePath, 'analysis'))

            # move files to userRoot
            user_viewlabel_path, user_orglabel_path, user_obj_path, user_ana_path = moveFilesToUserRoot(
                batch_task, TheerDobj_path, os.path.join(batch_task.savePath, 'pred'),
                orginal_format, os.path.join(batch_task.savePath, 'analysis'),
                batch_task.savePath
            )


            # sead mq
            resutl = {'taskId': batch_task.taskId,  # 任务id
                      'userId': batch_task.userId,  # 用户id
                      'predLogOut': pred_log_out,  # 推理日志
                      'generateLogOut': generate_log_out,  # 生成模型日志
                      'ErrorFiles': error_files,  # 不支持的文件

                      'theerDobjPath': user_obj_path,  # 生成的模型路径
                      'orginalFormat': user_orglabel_path,  # 原始格式的推理结果
                      'niigzFormatPred': user_viewlabel_path,  # niigz格式的推理结果，前端展示用
                      'analysisPath': user_ana_path,
                      'labelsetId': batch_task.labelSetId}  # 数据分析json路径
            resutl = json.dumps(resutl)
            self.mq.send('batch_task_result', resutl)
        except Exception as e:
            logging.error(e)
            result = {'batch_task_id': batch_task.taskId,
                      'user_id': batch_task.userId,
                      'error': str(e)}
            result = json.dumps(result)
            self.mq.send('batch_task_result', result)
        finally:
            self.is_runing = False

    def train(self, ch, method, properties, body):
        if self.is_runing is True:
            # recover
            ch.basic_recover(requeue=True)
            return
        self.is_runing = True

        task = TrainTask()
        task.load_data(body)
        logging.info(f'start train task {task.taskId}')
        try:

            scr_path = os.path.join(os.path.dirname(__file__), 'PredictTools', 'MedicalSeg', 'train.py')

            # only support nnunet lowres
            log = do_train(task, scr_path)

            result = {'task_id': task.taskId,  # 任务id
                      'user_id': task.userId,  # 用户id
                      'log': log}  # 训练日志
            result = json.dumps(result)
            self.mq.send('train_task_result', result)

        except Exception as e:
            logging.error(e)
            result = {'task_id': task.taskId,
                      'user_id': task.userId,
                      'error': str(e)}
            result = json.dumps(result)
            self.mq.send('train_task_result', result)
        finally:
            self.is_runing = False

    def convaert(self, ch, method, properties, body):
        if self.is_runing is True:
            # recover
            ch.basic_recover(requeue=True)
            return
        self.is_runing = True

        task = dict(body)
        try:
            batch_convert_niigz(task['org_img'], task['view_img'])

        except Exception as e:
            logging.error(e)
        finally:
            self.is_runing = False


def get_device_inf():
    # 每个设备在部署前必须修改device_id再上线
    device_inf = {'device_id': DEVICE_ID, 'device_name': 'test_device', 'cpu_count': psutil.cpu_count(),
                  'system': platform.system(), 'system_version': platform.version(),
                  'gpu': paddle.is_compiled_with_cuda(), 'gpu_count': paddle.device.cuda.device_count(),
                  'disk': round(shutil.disk_usage('/').total / (1024.0 * 1024.0 * 1024.0)),
                  'memory': round(psutil.virtual_memory().total / (1024.0 * 1024.0 * 1024.0))}
    for i in range(device_inf['gpu_count']):
        device_inf[f'gpu_{i}_memory'] = round(
            paddle.device.cuda.get_device_properties(0).total_memory / (1024.0 * 1024.0 * 1024.0)),

    return device_inf


if __name__ == '__main__':
    pred_backend = PredReasoningBackend()
    logging.info('ReasoningBackend start consuming')
    pred_backend.mq.start_consuming(pred_backend.batch_task_callback, pred_backend.update_model, pred_backend.train)
