import os.path

import torch.nn
from torch.utils.data import DataLoader
from tqdm import tqdm

from MySAM2 import SAM2
from MyData import MyDataset
from tensorboardX import SummaryWriter
from torch.optim import lr_scheduler
from EvaluatorModule import Evaluator
from torchvision.transforms import functional as F
import random
from PIL import Image, ImageDraw
import numpy as np


def train(model, dataset, losser, optimizer, scheduler, summary_writer, evaluator, epoch_time):
    # 设置为train模式
    model.sam2_model.sam_prompt_encoder.train()
    model.sam2_model.sam_mask_decoder.train()
    dataset.shuffle()
    status = {'loss': 0, 'f1': 0, 'rec': 0, 'pre': 0, 'qua': 0, 'com': 0, 'cor': 0}
    with tqdm(total=len(dataset), desc='{:<5} {:<5} {:<3}'.format('train', 'epoch', epoch_time)) as pbar:
        for idx, data in enumerate(dataset):
            # 正向传播
            out = model.forward(image=data['img'])[0]
            # 损失计算
            loss = losser(out, data['mask'])
            # 累积损失
            status['loss'] += loss.item()
            # 计算评估指标
            pre, rec, f1 = evaluator.evaluateSingleFR(torch.where(out>= 0.5, 1.0, 0), data['mask'])
            cor, com, qua = evaluator.evaluateSingleTP(torch.where(out>= 0.5, 1.0, 0), data['mask'])
            # 累积评估指标
            status['pre'] += pre
            status['rec'] += rec
            status['f1'] += f1
            status['com'] += com
            status['cor'] += cor
            status['qua'] += qua
            # 梯度清零
            optimizer.zero_grad()
            # 反向传播
            loss.backward()
            # 优化参数
            optimizer.step()
            info = {'Loss': status['loss'] / (idx + 1),
                    'Pre': status['pre'] / (idx + 1),
                    'Rec': status['rec'] / (idx + 1),
                    'F1': status['f1'] / (idx + 1),
                    'Com': status['com'] / (idx + 1),
                    'Cor': status['cor'] / (idx + 1),
                    'Qua': status['qua'] / (idx + 1)
                    }
            # 展示信息
            pbar.set_postfix(info)
            pbar.update()
    # 更新学习率
    scheduler.step()
    # 记录训练信息
    for key in info.keys():
        summary_writer.add_scalar(key, info[key], epoch_time)


def test(model, dataset, summary_writer, evaluator, epoch_time):
    with torch.no_grad():
        # 设置为eval模式
        model.sam2_model.sam_prompt_encoder.eval()
        model.sam2_model.sam_mask_decoder.eval()
        status = {'loss': 0, 'f1': 0, 'rec': 0, 'pre': 0, 'qua': 0, 'com': 0, 'cor': 0}
        with tqdm(total=len(dataset), desc='{:<5} {:<5} {:<3}'.format('test', 'epoch', epoch_time)) as pbar:
            for idx, data in enumerate(dataset):
                # 正向传播
                out = model.forward(image=data['img'])[0]
                # 损失计算
                loss = losser(out, data['mask'])
                # 累积损失
                status['loss'] += loss.item()
                # 计算评估指标
                pre, rec, f1 = evaluator.evaluateSingleFR(torch.where(out >= 0.5, 1.0, 0), data['mask'])
                cor, com, qua = evaluator.evaluateSingleTP(torch.where(out>= 0.5, 1.0, 0), data['mask'])
                # 累积评估指标
                status['pre'] += pre
                status['rec'] += rec
                status['f1'] += f1
                status['com'] += com
                status['cor'] += cor
                status['qua'] += qua
                info = {'Loss': status['loss'] / (idx + 1),
                        'Pre': status['pre'] / (idx + 1),
                        'Rec': status['rec'] / (idx + 1),
                        'F1': status['f1'] / (idx + 1),
                        'Com': status['com'] / (idx + 1),
                        'Cor': status['cor'] / (idx + 1),
                        'Qua': status['qua'] / (idx + 1)
                        }
                # 展示信息
                pbar.set_postfix(info)
                pbar.update()
        # 记录训练信息
        for key in info.keys():
            summary_writer.add_scalar(key, info[key], epoch_time)
        return status['f1'], status['qua']


if __name__ == '__main__':
    # 数据集
    dataset = {
        'train': MyDataset(r'datasets/Crack200', 'train', 'cuda'),
        'test': MyDataset(r'datasets/Crack200', 'test', 'cuda'),
    }
    # 模型
    model = SAM2(r'configs/sam2.1_hiera_t.yaml', r'weights/sam2.1_hiera_tiny.pt')
    # 损失函数
    losser = torch.nn.BCELoss()
    # 优化器
    optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=5e-5, weight_decay=4e-5)
    # 学习率调节器
    scheduler = lr_scheduler.CosineAnnealingLR(optimizer=optimizer, T_max=201)
    # 可视化
    summary_writer = {
        'train': SummaryWriter('runs/SAM2/train'),
        'test': SummaryWriter('runs/SAM2/test')
    }
    # 评估器
    evaluator = Evaluator(benchmark='Crack500')
    # 创建保存模型的地方
    if not os.path.exists('checkpoints'):
        os.makedirs('checkpoints')
    # 设置为train模式
    model.sam2_model.sam_prompt_encoder.train()
    model.sam2_model.sam_mask_decoder.train()
    # 最佳指标
    bestQua = bestF = 0
    # 训练循环
    for epoch_time in range(1000):
        train(model, dataset['train'], losser, optimizer, scheduler, summary_writer['train'], evaluator, epoch_time)
        f1, qua = test(model, dataset['test'], summary_writer['test'], evaluator, epoch_time)
        # 保存模型
        if f1 > bestF:
            torch.save(model.state_dict(), './checkpoints/sam_finetuned_bestF.pth')
            bestF = f1
        if qua > bestQua:
            torch.save(model.state_dict(), './checkpoints/sam_finetuned_bestQua.pth')
            bestQua = qua
        torch.save(model.state_dict(), './checkpoints/sam_finetuned_newest.pth')
