import numpy as np
import torch
from torch import nn
from PIL import Image
import torchvision.transforms as transforms
from PIL import ImageFile
from torch.utils.data import Dataset
from torch.optim import lr_scheduler
import matplotlib.pyplot as plt
import matplotlib as mlb
import pandas as pd
import os
from torch.utils.data import TensorDataset, DataLoader
from gdordbfcnl import myFcn
# from gotmd_fcn import gotmdFcn
from db_fcn import dbFcn

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
ImageFile.LOAD_TRUNCATED_IMAGES = True

train_dataset1 = pd.read_csv("reduced_data.csv", index_col=None)
train_dataset2 = pd.read_csv("reduced_gotmd.csv", index_col=None)
train_dataset = pd.concat([train_dataset2, train_dataset1], ignore_index=True)
train_dataset = train_dataset.iloc[:276352]
features = train_dataset.drop(['label', 'dborgot'], axis=1)
labels = train_dataset['label']
# 区分两个数据集的标签
dborgotlabels = train_dataset['dborgot']
features = np.array(features)
labels = np.array(labels)
dborgotlabels = np.array(dborgotlabels)
features = torch.tensor(features, dtype=torch.float32)
labels = torch.tensor(labels, dtype=torch.float32)
dborgotlabels = torch.tensor(dborgotlabels, dtype=torch.float32)
# dataset = TensorDataset(features, labels)
dataset = TensorDataset(features, labels, dborgotlabels)
train_loader = DataLoader(dataset=dataset, batch_size=128, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=128, shuffle=True)
device = "cuda"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# cuda
model = myFcn().to(device)
dbmodel = dbFcn().to(device)
# gotmdmodel=dbFcn().to(device)
# 损失函数，交叉熵
Loss_fn = nn.CrossEntropyLoss().to(device)
# Yh_fn = torch.optim.SGD(model.parameters(), lr=5e-4, momentum=0.9)
# lr_scheduler = lr_scheduler.StepLR(Yh_fn, step_size=1, gamma=0.01)
# db_Yh_fn = torch.optim.SGD(dbmodel.parameters(), lr=5e-4, momentum=0.9)
# db_lr_scheduler = lr_scheduler.StepLR(db_Yh_fn, step_size=1, gamma=0.01)
Yh_fn = torch.optim.SGD(model.parameters(), lr=5e-4, momentum=0.9)
lrscheduler = lr_scheduler.StepLR(Yh_fn, step_size=1, gamma=0.01)

# 糖尿病分期模型的优化器
db_Yh_fn = torch.optim.SGD(dbmodel.parameters(), lr=5e-4, momentum=0.9)
db_lr_scheduler = lr_scheduler.StepLR(db_Yh_fn, step_size=1, gamma=0.01)
all_x_db = []
all_y_db = []
all_x_gotmd = []
all_y_gotmd = []


def mytrain(train_data, model, Loss_fn, Yh_fn):
    total_loss, total_acc, n = 0.0, 0.0, 0
    n = 0
    # i = 0
    for batch, (x, y, z) in enumerate(train_data):
        x, y, z = x.to(device), y.to(device), z.to(device)
        # print(batch)
        # 执行
        model.set_val(batch, 1)
        output = model(x)
        Loss = Loss_fn(output, z.long())
        MaxValue, Pred_idx = torch.max(output, axis=1)
        x_db = x[Pred_idx == 0].to(device)
        y_db = y[Pred_idx == 0].to(device)
        x_gotmd = x[Pred_idx == 1].to(device)
        y_gotmd = y[Pred_idx == 1].to(device)
        cur_acc = torch.sum(z == Pred_idx) / output.shape[0]
        all_x_db.append(x_db)
        all_y_db.append(y_db)
        all_x_gotmd.append(x_gotmd)
        all_y_gotmd.append(y_gotmd)
        Yh_fn.zero_grad()  # x.grad=0，x是output,y
        Loss.backward()  # cur_loss是交叉熵函数，cur_loss求导

        # 更新全部权重及偏置参数
        Yh_fn.step()

        # 总误差
        total_loss += Loss.item()
        total_acc += cur_acc.item()
        n = n + 1
        # print("ok")
        # exit()
    return total_acc / n, total_loss / n


def dbtrain(train_data, model, Loss_fn, db_Yh_fn, n_eporch):
    total_loss, total_acc, n = 0.0, 0.0, 0
    n = 0
    for batch, (x, y) in enumerate(train_data):
        x, y = x.to(device), y.to(device)
        model.set_val(n_eporch, batch)
        output = dbmodel(x)
        Loss = Loss_fn(output, y.long())
        MaxValue, Pred_idx = torch.max(output, axis=1)
        cur_acc = torch.sum(y == Pred_idx) / output.shape[0]
        db_Yh_fn.zero_grad()  # x.grad=0，x是output,y
        Loss.backward()  # cur_loss是交叉熵函数，cur_loss求导

        # 更新全部权重及偏置参数
        db_Yh_fn.step()

        # 总误差
        total_loss += Loss.item()
        total_acc += cur_acc.item()
        n = n + 1
        # print('yesooo')
    return total_acc / n, total_loss / n


t_a, t_loss = mytrain(train_loader, model, Loss_fn, Yh_fn)
print('疾病分类预测正确率:' + str(t_a) + '，训练Loss:' + str(t_loss))
all_x_db_tensor = torch.cat(all_x_db, dim=0)
all_y_db_tensor = torch.cat(all_y_db, dim=0)
x_db_length = all_x_db_tensor.size(0)
nearest_multiple_128 = int(128 * round(x_db_length / 128.0))
all_x_db_tensor = all_x_db_tensor[:nearest_multiple_128]
all_y_db_tensor = all_y_db_tensor[:nearest_multiple_128]
db_dataset = TensorDataset(all_x_db_tensor, all_y_db_tensor)
db_dataset = DataLoader(dataset=db_dataset, batch_size=128, shuffle=True)
# db_dataset = db_dataset.to(device)
# db_train_loader = DataLoader(dataset=db_dataset, batch_size=128, shuffle=True)
# db_a, db_loss = dbtrain(db_dataset, dbmodel, Loss_fn, db_Yh_fn, 1)
# outputdb = dbmodel(db_dataset)

db_epoch = 100
for t in range(db_epoch):
    print(f'糖尿病批次{t + 1}训练')
    db_a, db_loss = dbtrain(db_dataset, dbmodel, Loss_fn, db_Yh_fn, t)
    print('糖尿病训练正确率:' + str(db_a) + '，训练Loss:' + str(db_loss))
print('yes')
print(all_x_db_tensor.shape, "ok")
