from torch import optim
import torch
import tqdm
from torch.utils.tensorboard import SummaryWriter
import datetime
import os
from models.model import SegmentModel
from utils.cal_dice_iou import Meter
from dataset.data_factory import provider
from utils.set_seed import seed_torch
from models.solver import Solver
from utils.loss import MultiLoss
from utils.optim import get_optim
from utils.choose_thre_area import ChooseThresholdMinArea
from omegaconf import OmegaConf
# from utils.funcs import Logger
from utils.oss_file import Logger, upload_local_file, aliyun_prefix
import uuid
from utils.mysql_crud import create_connection, set_log, update_status, update_data, select_data
from utils.redis_crud import get_redis_client, is_pause, del_pause


class TrainVal:
    def __init__(self, config, device):
        self.config = config
        self.device = device
        # 数据库连接
        self.connection = create_connection()
        self.redis_client = get_redis_client()

        self.model = SegmentModel(config.model.name, config.model.pretrained, class_num=config.model.class_num,
                                  **config.model.parameters).to(device)

        self.solver = Solver(self.model, device)
        self.criterion = MultiLoss(config.loss.names, config.loss.parameters, config.loss.weights)

        self.optimizer = get_optim(config.optim)(params=self.model.parameters(), lr=config.lr,
                                                 weight_decay=config.weight_decay)
        if config.scheduler:
            self.lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(self.optimizer, self.config.epoch + 10)
        else:
            self.lr_scheduler = None
        self.start_epoch = 1
        self.logger = Logger()
        if not self.resume_train():
            save_path = f'{config.save_path}/segment/{config.model.name}/{datetime.datetime.now().strftime("%Y-%m-%d")}-{uuid.uuid4()}'
            self.save_path = save_path
            if not os.path.exists(save_path):
                os.makedirs(save_path)
            update_query = "UPDATE train_task SET is_pause=%s,checkpoint_folder=%s,log=%s WHERE id=%s"
            update_data(self.connection, update_query, (1, save_path, self.logger.link, self.config.train_task_id))
            self.logger.log(OmegaConf.to_yaml(config))
        self.writer = SummaryWriter(log_dir=self.save_path)
        self.max_dice_valid = 0
        seed_torch(config.seed)

    def train(self, train_loader, valid_loader):
        global_step = 0
        for epoch in range(self.start_epoch, self.config.epoch + 1):
            epoch_loss = 0
            self.model.train(True)

            tbar = tqdm.tqdm(train_loader)
            for i, samples in enumerate(tbar):
                # 样本为空则跳过
                if len(samples) == 0:
                    continue
                images, masks = samples[0], samples[1]
                # 网络的前向传播与反向传播，损失函数中包含了sigmoid函数
                masks_predict = self.solver.forward(images)
                loss = self.solver.cal_loss(masks, masks_predict, self.criterion)
                epoch_loss += loss.item()
                self.solver.backword(self.optimizer, loss)

                # 保存到tensorboard，每一步存储一个
                self.writer.add_scalar('train_loss', loss.item(), global_step + i)

                # 判断是否终止
                if is_pause(self.redis_client, self.config.train_task_id):
                    self.connection.close()
                    self.redis_client.close()
                    return

            # 每一个epoch完毕之后，执行学习率衰减
            if self.lr_scheduler is not None:
                self.lr_scheduler.step()
            global_step += len(train_loader)

            # Print the log info
            self.logger.log(
                'Finish Epoch [%d/%d], Average Loss: %.7f' % (epoch, self.config.epoch, epoch_loss / len(tbar)))

            # 验证模型
            loss_valid, dice_valid, iou_valid = self.validation(valid_loader)
            if dice_valid > self.max_dice_valid:
                is_best = True
                self.max_dice_valid = dice_valid
            else:
                is_best = False

            state = {
                'epoch': epoch,
                'state_dict': self.model.state_dict(),
                'max_dice_valid': self.max_dice_valid,
                'optimizer_state_dict': self.optimizer.state_dict(),
                'lr_scheduler_state_dict': self.lr_scheduler.state_dict() if self.lr_scheduler is not None else None,
                'logger_next_position': self.logger.next_position
            }

            self.solver.save_checkpoint(os.path.join(self.save_path, 'net.pth'), state, is_best, self.logger)
            self.writer.add_scalar('valid_loss', loss_valid, epoch)
            self.writer.add_scalar('valid_dice', dice_valid, epoch)

            update_status(self.connection, self.config.train_task_id, round(epoch / self.config.epoch, 3))

        self.chooseThresholdMinArea(valid_loader)
        self.final_crud()

    def validation(self, valid_loader):
        self.model.eval()
        meter = Meter()
        tbar = tqdm.tqdm(valid_loader)
        loss_sum = 0

        with torch.no_grad():
            for i, samples in enumerate(tbar):
                if len(samples) == 0:
                    continue
                images, masks = samples[0], samples[1]
                # 完成网络的前向传播
                masks_predict = self.solver.forward(images)
                loss = self.solver.cal_loss(masks, masks_predict, self.criterion)
                loss_sum += loss.item()
                # 注意，损失函数中包含sigmoid函数，meter.update中也包含了sigmoid函数
                # masks_predict_binary = torch.sigmoid(masks_predict) > 0.5
                meter.update(masks, masks_predict.detach().cpu())
        loss_mean = loss_sum / len(tbar)

        dices, iou = meter.get_metrics()
        dice, dice_neg, dice_pos = dices
        self.logger.log(
            "IoU: %0.4f | dice: %0.4f | dice_neg: %0.4f | dice_pos: %0.4f" % (iou, dice, dice_neg, dice_pos))
        return loss_mean, dice, iou

    def chooseThresholdMinArea(self, valid_loader):
        self.solver.load_checkpoint(os.path.join(self.save_path, 'net_best.pth'))
        tool = ChooseThresholdMinArea(self.solver, self.config.model.name, valid_loader, self.save_path,
                                      self.model.class_num, self.device)
        best_thresholds, best_minareas, max_dices = tool.choose_threshold_minarea()
        self.threshold_minarea = {'best_thresholds': best_thresholds, 'best_minareas': best_minareas,
                                  'max_dices': max_dices}
        self.logger.log(f'have get threshold and minarea\n{str(self.threshold_minarea)}')

    def resume_train(self):
        # 查询数据库中checkpoint_folder是否有值且status不为0
        select_query = f"SELECT checkpoint_folder,status,log FROM train_task WHERE id = {self.config.train_task_id} limit 1"
        save_path, status, log = select_data(self.connection, select_query)[0]
        if save_path is not None and save_path != '' and status > 0:
            self.save_path = save_path
            checkpoint_path = os.path.join(save_path, 'net.pth')
            checkpoint = torch.load(checkpoint_path)
            self.solver.load_checkpoint(checkpoint_path)  # 加载模型参数
            # 恢复优化起、学习率调度器、start_epoch
            self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
            if self.lr_scheduler is not None:
                self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict'])
            self.start_epoch = checkpoint['epoch'] + 1
            # 恢复logger
            self.logger.link = log
            self.logger.next_position = checkpoint['logger_next_position']
            self.logger.upload_path = log.replace(aliyun_prefix + '/', '')
            self.logger.log('Resume training from breakpoint......')
            return True
        return False

    def final_crud(self):
        """
        1、上传最佳权重至oss
        2、上传网格搜索图
        3、更新数据库：
            save_path
            checkpoint_link
            other_res
        """
        self.logger.log('uploading best model to oss ......')
        best_net_path = os.path.join(self.save_path, 'net_best.pth')
        link = upload_local_file(best_net_path)
        wangge_search_link = upload_local_file(f'{self.save_path}/{self.config.model.name}.png')
        self.threshold_minarea['wangge_search_link'] = wangge_search_link
        update_query = "UPDATE train_task SET checkpoint_link=%s,is_pause=%s,other_res=%s WHERE id=%s"
        update_data(self.connection, update_query,
                    (link, 1, str(self.threshold_minarea), self.config.train_task_id))
        del_pause(self.redis_client, self.config.train_task_id)

        self.connection.close()
        self.redis_client.close()
        self.logger.log('mysql and redis connection closed.')
