import torch 
from torchsummary import summary
from model.unet_model import UNet
from glob import glob
import torch.nn as nn
from dice_loss import dice_coeff, DiceCoeff
# from hubconf import unet_carvana
# import ssl
# ssl._create_default_https_context = ssl._create_unverified_context
# from predict import predict_img 
from PIL import Image
import os 
import matplotlib.pyplot as plt 
from torch.utils.tensorboard import SummaryWriter
import numpy as np 
from torch.utils.data import DataLoader, Dataset, TensorDataset
import tensorwatch as tw
import scipy.io as scio
## 测试tensorboard 使用  启动命令 tensorboard --logdir={}
import torch.nn as nn 
from sklearn.decomposition import PCA
from sklearn.svm import SVC

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device is " + str(device))

epoches = 30

class FCNet(nn.Module):
    def __init__(self):
        super(FCNet, self).__init__()
        self.net = nn.Sequential(
            nn.Conv2d(8, 32, 3, stride=(2, 2)),
            nn.ReLU(),
            nn.BatchNorm2d(32),
            nn.Conv2d(32, 64, 3, stride=(2, 2)),
            nn.ReLU(),
            nn.BatchNorm2d(64),
            nn.Conv2d(64, 32, 3, stride=(2, 2)),
            nn.ReLU(),
            # nn.BatchNorm2d(32),   
        )
        self.final_conv = nn.Conv2d(32, 1, 1)
        self.fc = nn.Linear(11, 2)

    def forward(self, x):
        if len(x.shape) == 3:
            x = x.unsqueeze(1)

        x = self.net(x)
        x = self.final_conv(x)
        batch = x.shape[0]
        x = x.view(batch, -1)
        # print(x.shape)
        x = self.fc(x)
        # print(x.shape)
        return x 

if __name__ == "__main__":
    
   
    # all_data = []
  
    # for channel in data["train_set"]:
    #     channe_data = []
    #     for W in channel[0]:
    #         W_data = []
    #         for H in W:
    #             H_data = []
    #             for h in H:
    #                 if np.isnan(h):
    #                     H_data.append(0)
    #                 else :
    #                     H_data.append(h)
    #             # print(type(H))
    #             # print(H.shape)
    #             W_data.append(H_data)
    #         channe_data.append(W_data)
    #     all_data.append(channe_data)
    
    # a1 = np.array(all_data)
    # np.save("./AD_data", a1)
    # print(a1.sum())
    # print(a1.shape)

    # print(data["train_set"][0][0][1].shape)
    # print(len(data["train_set"][0][0]))
    # print(data["train_diagnose"].shape)
    
    # X = []
    # Y = []
    # Y = data["train_diagnose"]
    # Y = np.array(Y, dtype=np.int)
    # print(Y.shape)
    # np.save("./AD_y", Y)

    # for d in data["train_set"][0][0]:
    #     # 2 表示mci 二分类只看1 和 3  1为正常 3为AD
    #     X.append(d)
       
      
    # X = np.stack(X)
    # X = X.transpose(1, 2, 0)
    
    # print(X.shape)

    # XX = []
    # YY = []
    # with open("./data.txt", "w") as f:
    #     for  x, y in zip(X, Y) :
         
    #         f.write(str(x))
    #         keep = True
    #         if np.isnan(x.sum()):
    #             # print("nan")
    #             # f.write(str(x))
    #             keep = False
    #         if y[0] == 2:
    #             keep = False
    #         if keep:
    #             if y[0] == 3:
    #                 y = y - 1
    #             XX.append(x)
    #             YY.append(y)
    
    # X = np.stack(XX)
    # Y = np.stack(YY)

    # print(X.shape)
    # print(Y.shape) 

    # # ### pac 降维
    # # pca = PCA(n_components=50)
    # # pca.fit(X)
    # # X = pca.transform(X)


    # # clf = SVC(C=5,kernel='rbf')
    # # clf.fit(X,Y)

    # # print(clf.score(X, Y))
    

    # X = torch.tensor(X, dtype=torch.float32)
    # Y = torch.tensor(Y - 1, dtype=torch.long)

    # dataset = TensorDataset(X, Y)
    
    # train_size = int(0.8 * len(dataset))
    # test_size = len(dataset) - train_size
    # train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])

    # train_dataloader = DataLoader(train_dataset, batch_size=2, shuffle=True)
    # test_dataloader = DataLoader(test_dataset, batch_size=2, shuffle=True)

    # net = FCNet()
    # net.train()
    # optimizer = torch.optim.Adam(net.parameters(), lr=0.001, weight_decay=1e-5)
    # loss_func = nn.CrossEntropyLoss()
    # net.to(device)
    # for epoch in range(epoches):
    #     epoch_loss = 0 
    #     print("epoch is " + str(epoch))
    #     for x, y in train_dataloader:
    #         # print(x)
    #         # os._exit(0)
    #         optimizer.zero_grad()
    #         x = x.to(device)    
    #         y = y.to(device)

    #         pred = net(x)
          
    #         y = y.view(-1)
    #         loss = loss_func(pred, y)   
    #         epoch_loss += loss.item()
    #         loss.backward()
    #         optimizer.step()
        
    #     print("epoch loss is " + str(epoch_loss))
    #     with torch.no_grad():
    #         right = 0
    #         net.eval()
    #         num = len(test_dataset)
    #         for test_X, test_Y in test_dataloader:
    #             pred_test = net(test_X)
    #             pred_t = pred_test.argmax(-1)
    #             for pred, true in zip(pred_t, test_Y):
    #                 # print(pred.item())
    #                 # print(true.item())
    #                 # print("`~~~~~`")
    #                 if pred.item() == true.item():
    #                     right += 1
    #                     # print("hello ")
                
    #         acc = right / num
    #         # print("right num is " + str(right))
    #         print("acc is " + str(acc))
    #     net.train()
    
    import h5py
    h5_file = h5py.File("AD_data.h5", "r")
    X = h5_file["X"][()]
    Y = h5_file["Y"][()]

    XX = []
    YY = []
    for x, y in zip(X, Y):
        if int(y) != 2:
            XX.append(x)
            if int(y) == 3:
                YY.append(2)
            else :
                YY.append(1)

    X = torch.tensor(XX, dtype=torch.float32)
    Y = torch.tensor(YY, dtype=torch.long)
    Y = Y - 1
    print(X.shape)
    print(Y.shape)
    dataset = TensorDataset(X, Y)
    
    train_size = int(0.8 * len(dataset))
    test_size = len(dataset) - train_size
    train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])

    train_dataloader = DataLoader(train_dataset, batch_size=2, shuffle=True)
    test_dataloader = DataLoader(test_dataset, batch_size=2, shuffle=True)

    net = FCNet()
    net.train()
    optimizer = torch.optim.Adam(net.parameters(), lr=0.001, weight_decay=1e-5)
    loss_func = nn.CrossEntropyLoss()
    net.to(device)
    for epoch in range(epoches):
        epoch_loss = 0 
        print("epoch is " + str(epoch))
        for x, y in train_dataloader: 
            # print(x)
            # os._exit(0)
            optimizer.zero_grad()
            x = x.to(device)    
            y = y.to(device)

            pred = net(x)
          
            y = y.view(-1)
            loss = loss_func(pred, y)   
            epoch_loss += loss.item()
            loss.backward()
            optimizer.step()
        
        print("epoch loss is " + str(epoch_loss))
        with torch.no_grad():
            right = 0
            num = len(test_dataset)
            for test_X, test_Y in test_dataloader:
                test_X = test_X.to(device)
                test_Y = test_Y.to(device)
                pred_test = net(test_X)
                pred_t = pred_test.argmax(-1)
                for pred, true in zip(pred_t, test_Y):
                    # print(pred.item())
                    # print(true.item())
                    # print("`~~~~~`")
                    if pred.item() == true.item():
                        right += 1
                        # print("hello ")
                
            acc = right / num
            # print("right num is " + str(right))
            print("acc is " + str(acc))
        
        

    




    
