from torch import optim
import torch
import tqdm
import uuid
from models.solver import Solver
from torch.utils.tensorboard import SummaryWriter
import datetime
import os
from models.model import ClassifyModel
from utils.loss import MultiLoss
from utils.cal_classify_accuracy import Meter
from utils.set_seed import seed_torch
from utils.optim import get_optim
from utils.oss_file import Logger, upload_local_file, aliyun_prefix
from omegaconf import OmegaConf
from utils.mysql_crud import create_connection, update_status, update_data, select_data
from utils.redis_crud import get_redis_client, is_pause, del_pause


class TrainVal:
    def __init__(self, config, device):
        self.config = config
        self.device = device
        # 数据库连接
        self.connection = create_connection()
        self.redis_client = get_redis_client()

        self.model = ClassifyModel(config.model.name, config.model.pretrained, class_num=config.model.class_num,
                                   **config.model.parameters).to(device)

        self.solver = Solver(self.model, device)
        self.criterion = MultiLoss(config.loss.names, config.loss.parameters, config.loss.weights)
        self.optimizer = get_optim(config.optim)(params=self.model.parameters(), lr=config.lr,
                                                 weight_decay=config.weight_decay)
        if config.scheduler:
            self.lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(self.optimizer, self.config.epoch + 10)
        else:
            self.lr_scheduler = None
        self.start_epoch = 1

        self.logger = Logger()

        if not self.resume_train():
            save_path = f'{config.save_path}/classify/{config.model.name}/{datetime.datetime.now().strftime("%Y-%m-%d")}-{uuid.uuid4()}'
            self.save_path = save_path
            if not os.path.exists(save_path):
                os.makedirs(save_path)
            update_query = "UPDATE train_task SET is_pause=%s,checkpoint_folder=%s,log=%s WHERE id=%s"
            update_data(self.connection, update_query, (1, save_path, self.logger.link, self.config.train_task_id))
            self.logger.log(OmegaConf.to_yaml(config))
        self.writer = SummaryWriter(log_dir=self.save_path)
        self.max_accuracy_valid = 0
        seed_torch(config.seed)

    def train(self, train_loader, valid_loader):
        global_step = 0
        for epoch in range(self.start_epoch, self.config.epoch + 1):
            epoch_loss = 0
            self.model.train(True)

            tbar = tqdm.tqdm(train_loader)
            for i, (images, labels) in enumerate(tbar):
                labels_predict = self.solver.forward(images)
                loss = self.solver.cal_loss(labels, labels_predict, self.criterion)
                epoch_loss += loss.item()
                self.solver.backword(self.optimizer, loss)
                self.writer.add_scalar('train_loss', loss.item(), global_step + i)
                # 判断是否终止
                if is_pause(self.redis_client, self.config.train_task_id):
                    print('训练终止')
                    self.connection.close()
                    self.redis_client.close()
                    return

            if self.lr_scheduler is not None:
                self.lr_scheduler.step()
            global_step += len(train_loader)

            # Print the log info
            self.logger.log(
                'Finish Epoch [%d/%d], Average Loss: %.7f' % (epoch, self.config.epoch, epoch_loss / len(tbar)))

            # 验证模型
            class_neg_accuracy, class_pos_accuracy, class_accuracy, neg_accuracy, pos_accuracy, accuracy, loss_valid = \
                self.validation(valid_loader)

            if accuracy > self.max_accuracy_valid:
                is_best = True
                self.max_accuracy_valid = accuracy
            else:
                is_best = False

            state = {
                'epoch': epoch,
                'state_dict': self.model.state_dict(),
                'max_accuracy_valid': self.max_accuracy_valid,
                'optimizer_state_dict': self.optimizer.state_dict(),
                'lr_scheduler_state_dict': self.lr_scheduler.state_dict() if self.lr_scheduler is not None else None,
                'logger_next_position': self.logger.next_position
            }

            self.solver.save_checkpoint(
                os.path.join(self.save_path, 'net.pth'), state, is_best, self.logger)
            self.writer.add_scalar('valid_loss', loss_valid, epoch)
            self.writer.add_scalar('valid_accuracy', accuracy, epoch)
            for c in range(self.config.model.class_num):
                self.writer.add_scalar(f'valid_class_{c}_accuracy', class_accuracy[c], epoch)

            update_status(self.connection, self.config.train_task_id, round(epoch / self.config.epoch, 3))

        self.final_crud()

    def validation(self, valid_loader):
        self.model.eval()
        meter = Meter()
        tbar = tqdm.tqdm(valid_loader)
        loss_sum = 0

        with torch.no_grad():
            for i, (images, labels) in enumerate(tbar):
                # 完成网络的前向传播
                labels_predict = self.solver.forward(images)
                loss = self.solver.cal_loss(labels, labels_predict, self.criterion)
                loss_sum += loss.item()
                meter.update(labels, labels_predict.cpu())
        loss_mean = loss_sum / len(tbar)

        class_neg_accuracy, class_pos_accuracy, class_accuracy, neg_accuracy, pos_accuracy, accuracy = meter.get_metrics()
        self.logger.log(
            "Class_0_accuracy: %0.4f | Class_1_accuracy: %0.4f | Class_2_accuracy: %0.4f | Class_3_accuracy: %0.4f | "
            "Negative accuracy: %0.4f | positive accuracy: %0.4f | accuracy: %0.4f" %
            (class_accuracy[0], class_accuracy[1], class_accuracy[2], class_accuracy[3],
             neg_accuracy, pos_accuracy, accuracy))
        return class_neg_accuracy, class_pos_accuracy, class_accuracy, neg_accuracy, pos_accuracy, accuracy, loss_mean

    def resume_train(self):
        # 查询数据库中checkpoint_folder是否有值且status不为0
        select_query = f"SELECT checkpoint_folder,status,log FROM train_task WHERE id = {self.config.train_task_id} limit 1"
        save_path, status, log = select_data(self.connection, select_query)[0]
        if save_path is not None and save_path != '' and status > 0:
            self.save_path = save_path
            checkpoint_path = os.path.join(save_path, 'net.pth')
            checkpoint = torch.load(checkpoint_path)
            self.solver.load_checkpoint(checkpoint_path)  # 加载模型参数
            # 恢复优化起、学习率调度器、start_epoch
            self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
            if self.lr_scheduler is not None:
                self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict'])
            self.start_epoch = checkpoint['epoch'] + 1
            # 恢复logger
            self.logger.link = log
            self.logger.next_position = checkpoint['logger_next_position']
            self.logger.upload_path = log.replace(aliyun_prefix + '/', '')
            self.logger.log('Resume training from breakpoint......')
            return True
        return False

    def final_crud(self):
        """
        上传最有权重至oss
        更新数据库
        """
        self.logger.log('uploading best model to oss ......')
        best_net_path = os.path.join(self.save_path, 'net_best.pth')
        link = upload_local_file(best_net_path)

        update_query = "UPDATE train_task SET checkpoint_link=%s,is_pause=%s WHERE id=%s"
        update_data(self.connection, update_query, (link, 1, self.config.train_task_id))
        del_pause(self.redis_client, self.config.train_task_id)

        self.connection.close()
        self.redis_client.close()
        self.logger.log('mysql and redis connection closed.')
