import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.autograd import Variable
from PIL import Image
from torch.nn import functional as F
import cv2
import numpy as np
from PIL import Image
import sys
from tqdm import tqdm
import math
import datetime

from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts
from dataset.dataloader import get_test_loader,get_train_loader

def prec_rec(y_true, y_pred, beta2):

    eps = sys.float_info.epsilon
    tp = torch.sum(y_true * y_pred)
    all_p_pred = torch.sum(y_pred)
    all_p_true = torch.sum(y_true)

    prec = (tp + eps) / (all_p_pred + eps)
    rec = (tp + eps) / (all_p_true + eps)

    return prec, rec

class solver:
    def __init__(self,model,device, batchsize=1, root="./data/",epoch=40,lr=5e-5,wd=0.005):
        self.model = model
        self.device = device
        self.epoch = epoch
        self.lr = lr
        self.wd = wd
        self.optimizer = optim.Adam(self.model.parameters(),lr=self.lr)
        self.loss = torch.nn.BCEWithLogitsLoss()
        self.train_loader = get_train_loader(root,root+"train_pair.lst",batch_size=batchsize)
        self.test_loader = get_test_loader(root,root+"test_pair.lst")
        self.model = self.model.to(self.device)

    def train(self,path):
        for epoch in range(self.epoch):
            total_loss1 = 0
            total_loss2 = 0
            total_loss = 0
            self.model.train()
            for step, data in enumerate(self.train_loader):
                img, gt = data
                img = Variable(img).to(self.device)
                gt = Variable(gt).to(self.device)

                self.optimizer.zero_grad()
                out1, out2 = self.model(img)

                loss1 = self.loss(out1, gt)
                # loss2 = self.loss(out2, gt)
                loss = loss1

                total_loss += loss.item()
                # total_loss1 += loss1.item()
                # total_loss2 += loss2.item()

                loss.backward()
                self.optimizer.step()

            if path is not None:
                save_path = path+str(epoch)+".pth"
                torch.save(self.model.state_dict(),save_path)

            print('Epoch:{}    loss:{}'.format(epoch, total_loss/len(self.train_loader)))
            print(total_loss1/len(self.train_loader), total_loss2/len(self.train_loader))

    def test(self,path=None):
        total_mae = 0
        total_prec = 0
        total_rec = 0
        with torch.no_grad():
            for step, data in enumerate(self.test_loader):
                img, gt, data_path = data
                img = img.to(self.device)
                gt = gt.to(self.device)
                out, _ = self.model(img)

                out = F.interpolate(out, size=(gt.shape[2], gt.shape[3]), mode='bilinear', align_corners=True)
                out = out.sigmoid()

                mae, prec, rec = self.evaluate(out,gt)

                total_mae += mae
                total_prec += prec
                total_rec += rec

                if path is not None:
                    gen_path = path + data_path[0].split('/')[-1]
                    out = out.data.cpu().numpy().squeeze()
                    out = out*255
                    cv2.imwrite(gen_path, out)

            print("gen pic down")

        total_mae = total_mae/len(self.test_loader)
        total_prec = total_prec/len(self.test_loader)
        total_rec = total_rec/len(self.test_loader)

        f_measure = (((1 + 0.3) * total_prec * total_rec) / (0.3 * total_prec + total_rec)).item()

        print("mae:{}".format(total_mae))
        print("F_measure:{}".format(f_measure))

    def evaluate(self, out, gt):
        threshold = 0.5

        mae =  torch.mean(torch.abs(out-gt), dim=(1, 2, 3)).cpu().numpy()

        gt_arr = gt.squeeze().cpu()
        pred_arr = out.squeeze().cpu()

        max_fmeasure = 0
        best_threshold = 0
        for threshold in range(256):
            threshold = threshold / 255.0
            y_pred = (pred_arr >= threshold).float()
            y_true = (gt_arr >= 0.5).float()

            y_true1 = y_true.view(1, -1)
            y_pred1 = y_pred.view(1, -1)

            prec, rec = prec_rec(y_true1, y_pred1, 0.3)
            fmeasure = ((1 + 0.3) * prec * rec) / (0.3 * prec + rec)
            if fmeasure > max_fmeasure:
                max_fmeasure = fmeasure
                best_threshold = threshold

        # Use the best threshold to calculate precision and recall
        y_pred = (pred_arr >= best_threshold).float()
        y_true = (gt_arr >= 0.5).float()
        y_true1 = y_true.view(1, -1)
        y_pred1 = y_pred.view(1, -1)

        prec, rec = prec_rec(y_true1, y_pred1, 0.3)

        return mae, prec, rec


class solver2:
    def __init__(self, model, device, batchsize=1, root="./data/", epoch=30, lr=1e-4, wd=0, betas=(0.9, 0.99), test_interval=5):
        self.model = model
        self.device = device
        self.epoch = epoch
        self.lr = lr
        self.wd = wd
        self.test_interval = test_interval
        self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.wd, betas=betas)
        self.scheduler = CosineAnnealingWarmRestarts(self.optimizer, T_0=10, T_mult=1, eta_min=1e-7)
        self.loss = torch.nn.BCEWithLogitsLoss()
        self.train_loader = get_train_loader(root, root + "train_pair.lst", batch_size=batchsize)
        self.test_loader = get_test_loader(root, root + "test_pair.lst")
        self.model = self.model.to(self.device)
        self.best_mae = math.inf
        self.best_epoch = 0

    def train(self, path, log_pth="./log/"):
        with open(log_pth + f"log_{datetime.datetime.now()}.txt", 'w') as log_file:
            for epoch in range(self.epoch):
                total_loss = 0
                self.model.train()
                progress_bar = tqdm(self.train_loader, desc=f'Epoch {epoch + 1}/{self.epoch}', leave=False)
                for step, data in enumerate(progress_bar):
                    img, gt = data
                    img = Variable(img).to(self.device)
                    gt = Variable(gt).to(self.device)

                    self.optimizer.zero_grad()
                    out, out1, out2, out3, out4 = self.model(img)

                    loss0 = self.loss(out, gt)

                    loss = loss0
                    total_loss += loss.item()

                    loss.backward()
                    self.optimizer.step()

                    current_lr = self.optimizer.param_groups[0]['lr']
                    progress_bar.set_postfix({'loss': total_loss / (step + 1), 'lr': current_lr})

                self.scheduler.step()  # Step the scheduler after each epoch

                if path is not None:
                    save_path = path + str(epoch) + ".pth"
                    torch.save(self.model.state_dict(), save_path)

                log_file.write(f'Epoch:{epoch + 1}    loss:{total_loss / len(self.train_loader)}\n')
                print(f'Epoch:{epoch + 1}    loss:{total_loss / len(self.train_loader)}')

                if (epoch + 1) % self.test_interval == 0:
                    self.test(None, log_file, epoch)

    def test(self, path=None, log_file=None, epoch=None):
        total_mae = 0
        total_prec = 0
        total_rec = 0
        with torch.no_grad():
            for step, data in enumerate(self.test_loader):
                img, gt, data_path = data
                img = img.to(self.device)
                gt = gt.to(self.device)
                out, out1, out2, out3, out4 = self.model(img)

                out = F.interpolate(out, size=(gt.shape[2], gt.shape[3]), mode='bilinear', align_corners=True)
                out = out.sigmoid()

                mae, prec, rec = self.evaluate(out, gt)

                total_mae += mae
                total_prec += prec
                total_rec += rec

                if path is not None:
                    gen_path = path + data_path[0].split('/')[-1]
                    out = out.data.cpu().numpy().squeeze()
                    out = out * 255
                    cv2.imwrite(gen_path, out)

        total_mae = total_mae / len(self.test_loader)
        total_prec = total_prec / len(self.test_loader)
        total_rec = total_rec / len(self.test_loader)

        f_measure = (((1 + 0.3) * total_prec * total_rec) / (0.3 * total_prec + total_rec)).item()

        if total_mae < self.best_mae:
            self.best_mae = total_mae
            self.best_epoch = epoch
            if path is not None:
                best_model_path = path + 'best_model.pth'
                torch.save(self.model.state_dict(), best_model_path)

        if log_file is not None:
            log_file.write(f"mae:{total_mae}\nF_measure:{f_measure}\n")
            log_file.write(f"Best mae:{self.best_mae} at epoch {self.best_epoch + 1}\n")
        print(f"mae:{total_mae}")
        print(f"F_measure:{f_measure}")
        print(f"Best mae:{self.best_mae} at epoch {self.best_epoch + 1}")

    def evaluate(self, out, gt):
        threshold = 0.5

        mae = torch.mean(torch.abs(out - gt), dim=(1, 2, 3)).cpu().numpy()

        gt_arr = gt.squeeze().cpu()
        pred_arr = out.squeeze().cpu()

        max_fmeasure = 0
        best_threshold = 0
        for threshold in range(256):
            threshold = threshold / 255.0
            y_pred = (pred_arr >= threshold).float()
            y_true = (gt_arr >= 0.5).float()

            y_true1 = y_true.view(1, -1)
            y_pred1 = y_pred.view(1, -1)

            prec, rec = prec_rec(y_true1, y_pred1, 0.3)
            fmeasure = ((1 + 0.3) * prec * rec) / (0.3 * prec + rec)
            if fmeasure > max_fmeasure:
                max_fmeasure = fmeasure
                best_threshold = threshold

        # Use the best threshold to calculate precision and recall
        y_pred = (pred_arr >= best_threshold).float()
        y_true = (gt_arr >= 0.5).float()
        y_true1 = y_true.view(1, -1)
        y_pred1 = y_pred.view(1, -1)

        prec, rec = prec_rec(y_true1, y_pred1, 0.3)

        return mae, prec, rec