# coding=utf-8
import ctypes
import time
import os
# os.environ['CUDA_VISIBLE_DEVICES'] = '3,4,5,6,7'
# import torch
# import torch.nn.functional as F
# import torch.nn as nn
from tqdm import tqdm
from PIL import Image
# from torch import optim

# import LearnAttn
# import PSPNet
# # import UTNet.utnet
# import UTNet.swin_unet
# import VFormer
# import myAttention
# import patchNet
import LGFO_PConv
import PCLKA
# from utils.utils import clip_gradient, adjust_lr
# import utils.visualization as visual
import DeenNet
import DlinkNet

from BuildFormer.geoseg.models import newBuildFormer
from seg_utils import data_loader
# from torch.optim import lr_scheduler
# from utils.evaluation import *
from seg_utils.loss import *
# from torchstat import stat
# import torchsummary
# from thop import profile
# from thop import clever_format
#from myMetric import *
#import metric 
#from utils import tools
import numpy as np

import slideFormer
import segNet
# import DlinkNet
import patchNet
# import patchNet2
# import patchNet3
# import NDSNet
# import NLLinkNet.networks.nllinknet_location as NLLinkNet
# import SGCN.models.SGCNNet as SGCNNet
# import SwinUnet.networks.vision_transformer as ViT_seg
import BuildFormer.geoseg.models.BuildFormer as BuildFormer
# import slideFormer
import SwinTransformer.models.swin_transformer as SwinTransformer
# import network.model as CFENet
# #import MSNet
import PSPNet
import UNets
import U_Nets
# import UTNet.utnet
import LiteST_Net
import deepLab
import torchvision
import 配置
import 绘制曲线图

import NWinFormer
import LiteST_Net2
import LGFO_Net
import CoAtt_SNK_PC

batch_size =配置.batch_size
n_epoch = 配置.n_epoch
img_size = 配置.img_size
#model_name = 'CLNet'

dirPath=配置.dirPath
modeName=配置.modeName

model_path = dirPath+modeName+"/"
predictPath=model_path+"predictResult/"

op_lr =配置.op_lr       #学习率
weightDecay=配置.weightDecay      # L2正则化权重
#op_decay_rate = 0.1     #学习率衰减比例
#op_decay_epoch = 60      #学习率衰减间隔
noChangeEpoch=配置.noChangeEpoch       #终止条件


if not os.path.exists(model_path):
    os.makedirs(model_path)
if not os.path.exists(predictPath):
    os.makedirs(predictPath)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#net=NDSNet.NDSNet(3,2).to(device)
# net=patchNet3.patchNet(3,2).to(device)
# net=patchNet.PSPNet().to(device)
# net=ViT_seg.SwinUnet(None,512,2).to(device)
# net=slideFormer.slideFormer3().to(device)
# net=PSPNet.BuildFormer_ShiftPoolingPSPNet().to(device)
# net=PSPNet.ResNet50_PSPNet().to(device)
# net=BuildFormer.MSW_LMHSA().to(device)
# net=SwinTransformer.lineTransformer().to(device)
# net=SwinTransformer.simple_SwinTransformer().to(device)
# net=SwinTransformer.multiTransformer().to(device)
# net=SwinTransformer.FastSwinTransformer().to(device)
# net=SwinTransformer.BuildFormer_SwinTransformer().to(device)
#net=SwinTransformer.SwinTransformer().to(device)
# net=NWinFormer.NWinFormer().to(device)
# net=SwinTransformer.MSSTNet().to(device)
# net=SwinTransformer.Yuan_Net().to(device)
# net=SwinTransformer.Yuan_Net2().to(device)
# net=SwinTransformer.Yuan_Net3().to(device)
# net=SwinTransformer.LiteST_Net().to(device)#使用LiteST_Net架构，但用原始的swin tranformer
# net=LiteST_Net.LiteST_Net().to(device)#使用Lite Swin Transformer
# net=LiteST_Net2.LiteST_Net2().to(device)

# net=BuildFormer.BuildFormerSegDP().to(device)
# net=newBuildFormer.NewBuildFormer().to(device)
# net=PSPNet.UNet_Fully().to(device)
# net=deepLab.DeepLabV3().to(device)
# net=PSPNet.TVResNet50_ShiftPoolingPSPNet().to(device)
# net=PSPNet.ResNet50_M_PSPNet().to(device)
# net=PSPNet.TVResNet50_M_PSPNet().to(device)
# net=PSPNet.TVResNet50_M_F_PSPNet().to(device)
# net=PSPNet.TVResNet50_LWMSA().to(device)
# net=PSPNet.TVResNet50_LWMSA().to(device)
# net=BuildFormer.NewBuildFormer().to(device)
# net=PSPNet.TVResNet50_ShiftPoolingPSPNet().to(device)
# net=PSPNet.TVResNet50_PSPNet64().to(device)

# net=PSPNet.TVResNet50_PSPNet16().to(device)
# net=PSPNet.TVResNet50_M_LWMSA().to(device)
# net=BuildFormer.BuildFormerSegDP().to(device)
# net=newBuildFormer.NewBuildFormer().to(device)

# net=PSPNet.ResNet152_PSPNet().to(device)
# net=PSPNet.CFENet_PSPNet().to(device)
# net=PSPNet.CFENet_ShiftPoolingPSPNet().to(device)
# net=PSPNet.ResNet50_PSPNet().to(device)
# net=NLLinkNet.NL34_LinkNet(2,3).to(device)
# net=SGCNNet.SGCN_res50(num_classes=2).to(device)
# net = CFENet.CLNet(nclass=2).to(device)
# net = DlinkNet.DinkNet34(num_classes=2).to(device)
# net = DlinkNet.LinkNet34(num_classes=2).to(device)
# net=slideFormer.slideFormer().to(device)
# net=segNet.SegNet(3,2).to(device)
#net=MSNet.net(4).to(device)

# net=LearnAttn.LearnAttn_Net().to(device)
# net=U_Nets.U_Net(3,2).to(device)
# net=LGFO_Net.LGFO_SwinTransformerNet(3,2).to(device)
# net=LGFO_Net.SwinTransformer_UNetDecoder(3,2).to(device)

# net=LGFO_Net.LGFO_Net(3,2).to(device)
# net=LGFO_Net.LGFO_Net_NoConv(3,2).to(device)

# net=U_Nets.R2U_Net().to(device)
# net=U_Nets.AttU_Net().to(device)
# net=U_Nets.mutiScaleAttU_Net().to(device)
# net=U_Nets.LA_UNet().to(device)
# net=UNets.UNet2Plus().to(device)
# net=patchNet.patchNet3(3,2).to(device)
# net=UTNet.utnet.UTNet(3).to(device)
# net=UTNet.swin_unet.SwinUnet().to(device)

# net=U_Nets.NestedUNet(3,2).to(device)
# net=UNets.UNet3Plus().to(device)
# net=myAttention.VFormer().to(device)
# net=VFormer.V_Net().to(device)
# net=VFormer.shapeAware().to(device)
# net=patchNet.MTNet().to(device)
# net=patchNet.Learn_Unet().to(device)
# net=DeenNet.DeenNet().to(device)

# net=LGFO_PConv.LGFO_PConv(3,2).to(device)
net= PCLKA.TMP(3,2).to(device)
# net=U_Nets.U_Net(3,2).to(device)
# net=U_Nets.NestedUNet(3,2).to(device)
#net=CoAtt_SNK_PC.TMP1(3,2).to(device)


# 计算FLOPs
# net.to("cpu")
# #1
# input = torch.randn(1, 3, 512, 512)
# flops, params = profile(net, inputs=(input,))
# print("FLOPs=", str(flops/1e9) +'{}'.format("G"))
# print("params=", str(params/1e6)+'{}'.format("M"))
# # #2
# # # stat(net,(3,512,512))
#3 论文中使用的方法
from ptflops import get_model_complexity_info
flops, params = get_model_complexity_info(net, (3, 256, 256), as_strings=True, print_per_layer_stat=True)
print('Flops:  ' + flops)
print('Params: ' + params)




train_loader = data_loader.get_loader(dirPath+"train/" , batch_size , img_size,num_workers=4, mode='train',augmentation_prob=0,shuffle=True, pin_memory=True)
val_loader = data_loader.get_loader(dirPath+"val/" , 1 ,  img_size,num_workers=4, mode='val',augmentation_prob=0,shuffle=False, pin_memory=True)
# test_loader = data_loader.get_loader(dirPath+"test/" , 1 , img_size,num_workers=4, mode='test',augmentation_prob=0,shuffle=False, pin_memory=True)
predict_loader = data_loader.get_loader(dirPath+"predict/" , 1 , img_size,num_workers=4, mode='predict',augmentation_prob=0,shuffle=False, pin_memory=True)

criterion = nn.BCELoss().to(device)
optimizer = torch.optim.Adam(net.parameters(),lr=op_lr,eps=1e-3)
#optimizer =torch.optim.SGD(net.parameters(), lr=op_lr, momentum=0.9,weight_decay=0) 
#scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=1e-5)
epoch=0

def main():
    global epoch
    #看是否是继续训练
    epoch=getLastCheckPt(model_path)#得到应该训练的周期
    if epoch!=1:#有模型，从已有最新的周期恢复训练参数
            
        checkpoint = torch.load(model_path+str(epoch-1)+".pth",map_location={'cuda:5':'cuda:0'})            
        net.load_state_dict(checkpoint)        # 从字典中依次读取
        print("已恢复第{}个周期的参数".format(epoch-1))


    isTrain=False
    isTrain=True
    while isTrain:#循环训练num_epoches个epoch
            
        trainOneEpoch()
            
        evaluateOneEpoch()

        #绘制曲线图.draw(block=False)
        绘制曲线图.printMaxMIOU()
        if stop():
            print("训练完成")
            for i in range(10):
                # player = ctypes.windll.kernel32
                # player.Beep(500 * i, 5000)
                print()
            break

    # test()
    predict()
    # os.system('pause')

def stop():
    
    #终止条件
    #读取已有的epoch指标
    allValLoss=[]
    allValMiou=[]
    with open(model_path+'评价指标.txt','a+') as f:    #设置文件对象    
        f.seek(0.0)#将指针移到开头
        while True:#读取所有的数据
            text = f.readline()  # 只读取一行内容
 
            # 判断是否读取到内容
            if not text:
                break
            
            texts=text.split(":")
            if texts[0]=="loss":
                allValLoss.append(float(texts[1]))#将loss加到Loss列表中。
            if texts[0]=="mIoU":
                allValMiou.append(float(texts[1]))#将loss加到Loss列表中。

    maxIndex=allValMiou.index(max(allValMiou))+1


##以test集上的mIoU为终止条件
#        if ((totalEpoch-maxIndex)>=10 or totalEpoch>=epochRepeatNum)and maxIndex!=1: #如果超过10个epoch还没有更高的epoch，或者训练总周期已经超过epochRepeatNum，那么中止训练。       
#            break;

# 以test集上的loss为终止条件
    minIndex=allValLoss.index(min(allValLoss))+1
    if ((epoch-minIndex)>noChangeEpoch or epoch>n_epoch)and maxIndex!=1: #如果超过10个epoch还没有更高的epoch，或者训练总周期已经超过epochRepeatNum，那么中止训练。       
        with open(model_path+'评价指标.txt','a') as f:    #设置文件对象
            f.write("\nbestEpoch="+str(maxIndex)+"\n")
            
        return True
    else:
        return False

def getLastCheckPt(model_path):
   
    files=os.listdir(model_path)
    max=0
    for file in files:
        if file[-4:]!=".pth":
            continue

        epoch,Extension =file.split(".")
        epoch=int(epoch)
        
        if max< epoch:
            max=epoch
    return max+1 #新的周期数


def trainOneEpoch():
    
    #vis = visual.Visualization()
    #vis.create_summary(model_name)
    #best_iou = 0

    net.train(True)  
        
    print("\n正在训练第{}个周期".format(epoch))
        
    #cur_lr = adjust_lr(optimizer, op_lr, epoch, op_decay_rate, op_decay_epoch)#更新学习率
    #print("当前学习率为",cur_lr)

    #epoch_loss = 0
    trainLoss = 0.  
    #miou_score = 0.  
    #f1_score = 0.
    #rawf1_score=0
    #accuracy_score = 0.
    #P=0.
    #R=0.
    eTP=0
    eTN=0
    eFP=0
    eFN=0
    length = 0.
    st = time.time()
    for i,(inputs, mask) in enumerate(tqdm(train_loader)): #每个批
        X = inputs.to(device)
        Y = mask.to(device).long()

#将标签切成4块
        # patchNum = 8
        # _, _, imgH, imgW = Y.shape
        # rows = torch.split(Y, imgH // patchNum, dim=2)
        # patchs = []  # 每个元素是一个List,代表是一行block
        # for row in rows:
        #     patch = torch.split(row, imgW // patchNum, dim=3)
        #     patch=[1 if torch.sum(elem)>=1 else 0 for elem in patch]
        #     patchs.append(patch)
        #
        #
        # patchs=torch.tensor(patchs)
        # patchs=patchs.to(device)
        # patchs=torch.unsqueeze(patchs,dim=0)
        # patchs = torch.unsqueeze(patchs, dim=0)

        # a,c=torch.split(Y,256,dim=2)
        # a,b=torch.split(a,256,dim=3)
        # c,d=torch.split(c,256,dim=3)
        # if torch.sum(a)>1:
        #     a=torch.tensor(1)
        # else:
        #     a=torch.tensor(0)
        # if torch.sum(b)>1:
        #     b=torch.tensor(1)
        # else:
        #     b=torch.tensor(0)
        # if torch.sum(c)>1:
        #     c=torch.tensor(1)
        # else:
        #     c=torch.tensor(0)
        # if torch.sum(d)>1:
        #     d=torch.tensor(1)
        # else:
        #     d=torch.tensor(0)
# 结束

        output = net(X)                                 #识别出结果
        #SR_probs = SR_eva = F.sigmoid(output)
        #SR_flat = SR_probs.view(SR_probs.size(0), -1)
        #GT_flat = Y.view(Y.size(0), -1)
        #output=F.softmax(output)
        #gt=F.one_hot(Y,2)
        #gt=torch.squeeze(gt,1).permute(0,3,1,2)
        gt=torch.squeeze(Y,1)
        loss=torch.nn.CrossEntropyLoss(reduce=True)(output,gt) #CE算出的是所有样本损失的平均值
        #print(GT_flat)

        #计算L2损失
        L2Loss=0
        for para in net.parameters():
            L2Loss+=torch.sum(para*para)
        loss+=weightDecay*L2Loss
        # loss = losses.mean()
        #loss = criterion(SR_flat, GT_flat)

        # # 计算监督损失
        # superviseLoss = torch.nn.CrossEntropyLoss()(attentionOut, gt)
        # loss+=superviseLoss
#计算分类损失

        # L3Loss=a+b+c+d-classfier[0]-classfier[1]-classfier[2]-classfier[3]
        # L3Loss=torch.mean(torch.square(patchs-classfier))
        # loss+=L3Loss.item()
#结束

        optimizer.zero_grad()#清空梯度
        loss.backward()#反向传播

        optimizer.step()#更新模型参数

        ##每个周期更新学习率
        #torch.optim.lr_scheduler.StepLR(optimizer, 5, gamma=0.1, last_epoch=-1).step()
        #scheduler.step()
        trainLoss += loss.item()
        st = time.time()
            
        #print(torch.min(SR_eva))
        #SR_eva[SR_eva >= 0.5] = 1
        #SR_eva[SR_eva < 0.5] = 0
            
        output=F.softmax(output,dim=1)
        SR_eva=torch.argmax(output, dim=1, keepdim=False)

        TN,FP,FN,TP=BCM(SR_eva,Y)
        eTN+=TN
        eFP+=FP
        eFN+=FN 
        eTP+=TP
            
        length += 1
        #print("第{f:}步",length)
        #print("loss:",loss.item())
        #print("miou_score:",getmIoU(SR_eva, Y))
        #print("f1_score:",getF1score(SR_eva, Y))
        #print("accuracy_score:",getACC(SR_eva,Y))

    #得到每个epoch的精度指标
    eP=eTP/(eTP+eFP+1e-7)
    eR=eTP/(eTP+eFN+1e-7)
    iou0=eTN/(eFP+eTN+eFN+1e-7)
    iou1=eTP/(eFP+eTP+eFN+1e-7)

    miou_score=round((iou0+iou1)/2*100,1)
    f1_score=round(2*eP*eR/(eP+eR+1e-7)*100,1)
    accuracy_score=round((eTP+eTN)/(eTP+eFP+eFN+eTN+1e-7)*100,1)

    #accuracy_score = round(accuracy_score / length*100,1)
    #miou_score = round(miou_score / length*100,1)
    #f1_score= round(f1_score  / length*100,1)
    #rawf1_score= round(rawf1_score  / length*100,1)
    trainLoss = round(trainLoss/length,3)

    #P= round(P  / length*100,1)
    #R= round(R  / length*100,1)

            
        

    print(  "\nTrain Miou: %g %%" % (miou_score),
            "\nTrain F1-score: %g %%" % (f1_score),
          
            "\nTrain accuracy: %g %%" % (accuracy_score),
            "\ntrain loss: %g %%" % (trainLoss),

            )

######################每个epoch后开始评估=========================================================================================
def evaluateOneEpoch():
    print("Start Evaluating!")
    global epoch
    valLoss = 0.  
    #miou_score = 0.  
    #f1_score = 0.  
    #accuracy_score = 0.  
    #P=0.
    #R=0.
    eTP=0
    eTN=0
    eFP=0
    eFN=0
    length = 0.
    #net.train(False)
    net.eval()
    for i, (inputs, mask) in enumerate(tqdm(val_loader)):
        with torch.no_grad():

            X = inputs.to(device)
        #    Y = mask.to(device)
               
        #    #optimizer.zero_grad()
        #    output = net(X)
        #    SR_probs = SR_eva = F.sigmoid(output)

        #    SR_flat = SR_probs.view(SR_probs.size(0), -1)
        #    GT_flat = Y.view(Y.size(0), -1)
        ## loss = losses.mean()
            Y = mask.to(device).long()

# 将标签切成4块
#             patchNum = 8
#             _, _, imgH, imgW = Y.shape
#             rows = torch.split(Y, imgH // patchNum, dim=2)
#             patchs = []  # 每个元素是一个List,代表是一行block
#             for row in rows:
#                 patch = torch.split(row, imgW // patchNum, dim=3)
#                 patch = [1 if torch.sum(elem) >= 1 else 0 for elem in patch]
#                 patchs.append(patch)
#
#             patchs = torch.tensor(patchs)
#             patchs = patchs.to(device)
#             patchs = torch.unsqueeze(patchs, dim=0)
#             patchs = torch.unsqueeze(patchs, dim=0)



            # a, c = torch.split(Y, 256, dim=2)
            # a, b = torch.split(a, 256, dim=3)
            # c, d = torch.split(c, 256, dim=3)
            # if torch.sum(a) > 1:
            #     a = torch.tensor(1)
            # else:
            #     a = torch.tensor(0)
            # if torch.sum(b) > 1:
            #     b = torch.tensor(1)
            # else:
            #     b = torch.tensor(0)
            # if torch.sum(c) > 1:
            #     c = torch.tensor(1)
            # else:
            #     c = torch.tensor(0)
            # if torch.sum(d) > 1:
            #     d = torch.tensor(1)
            # else:
            #     d = torch.tensor(0)
# 结束

            output = net(X)                                 #识别出结果
                
            gt=torch.squeeze(Y,1)
            loss=torch.nn.CrossEntropyLoss()(output,gt)
                
            L2Loss=0
            for para in net.parameters():
                L2Loss+=torch.sum(para*para)

            loss+=weightDecay*L2Loss
# 计算分类损失

            # L3Loss = a + b + c + d - classfier[0] - classfier[1] - classfier[2] - classfier[3]
            # L3Loss = torch.mean(torch.square(patchs - classfier))
            # loss += L3Loss.item()
 # 结束
            # # 计算监督损失
            # superviseLoss = torch.nn.CrossEntropyLoss()(attentionOut, gt)
            # loss += superviseLoss

            valLoss +=loss.item()

            #SR_eva[SR_eva >= 0.5] = 1
            #SR_eva[SR_eva < 0.5] = 0
            output=F.softmax(output,dim=1)
            SR_eva=torch.argmax(output, dim=1, keepdim=False)

            
            TN,FP,FN,TP=BCM(SR_eva,Y)
            eTN+=TN
            eFP+=FP
            eFN+=FN 
            eTP+=TP
            #print("评估每一步的miou:",miou_score)
            #print("评估每一步的f1_score:",f1_score)

            length += 1

    #得到所有评估样本的指标
    eP=eTP/(eTP+eFP+1e-7)
    eR=eTP/(eTP+eFN+1e-7)
    iou0=eTN/(eFP+eTN+eFN+1e-7)
    iou1=eTP/(eFP+eTP+eFN+1e-7)

    miou_score=round((iou0+iou1+1e-7)/2*100,1)
    f1_score=round(2*eP*eR/(eP+eR+1e-7)*100,1)
    accuracy_score=round((eTP+eTN)/(eTP+eFP+eFN+eTN+1e-7)*100,1)
    # print("length:",length)

    valLoss =  round(valLoss/length,3)
    #accuracy_score =  round(accuracy_score / length*100,1)
    #miou_score =  round(miou_score /length*100,1)
    #f1_score=  round(f1_score  /length*100,1)
    

   
    #写入新的数据
    with open(model_path+'评价指标.txt','a+') as f:    #设置文件对象 
        #写入新的数据
        f.write("\nepoch="+str(epoch)+"\n")
        #f.write("Loss:"+str(val_loss)+"\n")
        f.write("mIoU:"+str(miou_score)+"\n")                 
        #f.write("precision:"+str(precision_score)+"\n") 
        f.write("F1-score:"+str(f1_score)+"\n") 
        f.write("accuracy:"+str(accuracy_score)+"\n") 
        f.write("loss:"+str(valLoss)+"\n")

    #每个周期保存模型
    #state = {'state': net.state_dict(),'epoch': epoch}                   # 将epoch一并保存
    
    torch.save(net.state_dict(), model_path+"/"+str(epoch)+".pth")
    epoch=epoch+1
    #unet_score = JS + DC
    
    print(
            "\nVal Miou: %g %%" % (miou_score),
            "\nVal F1-score: %g %%" % (f1_score),
               
            "\nVal accuracy: %g %%" % (accuracy_score),
            "\nVal loss: %g %%" % (valLoss),
           
            )
def test():
    print("Start Testing!")
    valLoss = 0.  
    #miou_score = 0.  
    #f1_score = 0.  
    #accuracy_score = 0.  
    #P=0.
    #R=0.
    eTP=0
    eTN=0
    eFP=0
    eFN=0
    length = 0.
    #net.train(False)
    net.eval()
    for i, (inputs, mask) in enumerate(tqdm(test_loader)):
        with torch.no_grad():

            X = inputs.to(device)

            Y = mask.to(device).long()

            output = net(X)                                 #识别出结果
                
            gt=torch.squeeze(Y,1)
            loss=torch.nn.CrossEntropyLoss()(output,gt)
                
            L2Loss=0
            for para in net.parameters():
                L2Loss+=torch.sum(para*para)

            loss+=weightDecay*L2Loss 


            valLoss +=loss.item()

            #SR_eva[SR_eva >= 0.5] = 1
            #SR_eva[SR_eva < 0.5] = 0
            output=F.softmax(output,dim=1)
            SR_eva=torch.argmax(output, dim=1, keepdim=False)

            
            TN,FP,FN,TP=BCM(SR_eva,Y)
            eTN+=TN
            eFP+=FP
            eFN+=FN 
            eTP+=TP
            #print("评估每一步的miou:",miou_score)
            #print("评估每一步的f1_score:",f1_score)

            length += 1

    #得到所有评估样本的指标
    eP=eTP/(eTP+eFP)
    eR=eTP/(eTP+eFN)    
    iou0=eTN/(eFP+eTN+eFN)
    iou1=eTP/(eFP+eTP+eFN)

    miou_score=round((iou0+iou1)/2*100,1)
    f1_score=round(2*eP*eR/(eP+eR)*100,1)
    accuracy_score=round((eTP+eTN)/(eTP+eFP+eFN+eTN)*100,1)

    valLoss =  round(valLoss/length,3)

    
    print(
            "\nVal Miou: %g %%" % (miou_score),
            "\nVal F1-score: %g %%" % (f1_score),
               
            "\nVal accuracy: %g %%" % (accuracy_score),
            "\nVal loss: %g %%" % (valLoss),
           
            )
    
def predict():
    print("\nStart Predicting!")
    net.eval()
    for i, (inputs, filename) in enumerate(tqdm(predict_loader)):#每个批
        X = inputs.to(device)
        #Y = mask.to(device)
        output = net(X)
        # output = net(X)
        output = F.softmax(output)
        for i in range(output.shape[0]):#对每个批的每个图像识别结果分开处理
            #probs_array = (torch.squeeze(output[i])).data.cpu().numpy()
            array=torch.squeeze(output[i])
            mask_array = torch.argmax(array, axis=0).data.cpu().numpy()
            final_mask = mask_array.astype(np.float32)
            final_mask = final_mask * 255
            final_mask = final_mask.astype(np.uint8)
            print(predictPath + filename[i] + '.png')
            final_savepath = predictPath + filename[i] + '.png'
            im = Image.fromarray(final_mask)
            im.save(final_savepath)
def BCM(pred,gt):#��������
    #ȡ�����ֵ
    #pred=torch.round(pred, out=None)
    #gt=torch.round(gt, out=None)
    TP=float(torch.sum((pred==1)&(gt==1)))
    FP=float(torch.sum((pred==1)&(gt==0)))
    FN=float(torch.sum((pred==0)&(gt==1)))
    TN=float(torch.sum((pred==0)&(gt==0)))
    #print(TP+FP+FN+TN)
    #print(torch.max(gt))
    #print(FN)
    #print(FP)
    return TN,FP,FN,TP
if __name__ == '__main__':
    main()