# import numpy as np
# import torch
# from torch import nn
# from PIL import Image
# import torchvision.transforms as transforms
# from PIL import ImageFile
# from torch.utils.data import Dataset
# from torch.optim import lr_scheduler
# import matplotlib.pyplot as plt
# import matplotlib as mlb
# import pandas as pd
# import os
# from torch.utils.data import TensorDataset, DataLoader
# from gdordbfcnl import myFcn
# from gotmd_fcn import gotmdFcn
# from db_fcn import dbFcn
#
# os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# ImageFile.LOAD_TRUNCATED_IMAGES = True
#
# train_dataset1 = pd.read_csv("reduced_data.csv", index_col=None)
# train_dataset2 = pd.read_csv("reduced_gotmd.csv", index_col=None)
# train_dataset = pd.concat([train_dataset2, train_dataset1], ignore_index=True)
# # first_row = train_dataset.iloc[0]
# # print(first_row)
# train_dataset = train_dataset.iloc[:276352]
# features = train_dataset.drop(['label', 'dborgot'], axis=1)
# labels = train_dataset['label']
# dborgotlabels = train_dataset['dborgot']
#
# # 区分两个数据集的标签
#
# features = np.array(features)
# labels = np.array(labels)
# dborgotlabels = np.array(dborgotlabels)
# # print(labels.shape)
# # print(features.shape)
# # exit()
# features = torch.tensor(features, dtype=torch.float32)
# labels = torch.tensor(labels, dtype=torch.float32)
# dborgotlabels = torch.tensor(dborgotlabels, dtype=torch.float32)
# # dataset = TensorDataset(features, labels)
# dataset = TensorDataset(features, labels, dborgotlabels)
# train_loader = DataLoader(dataset=dataset,
#                           batch_size=128,
#                           shuffle=True)
# test_loader = torch.utils.data.DataLoader(dataset=dataset,
#                                           batch_size=128,
#                                           shuffle=True)
# device = "cuda"
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# # cuda
# # 用于诊断糖尿病与痛风的模型
# fenleimodel = myFcn().to(device)
# # 用于痛风分期的模型
# gotmdmodel = gotmdFcn().to(device)
# # 用于糖尿病分期的模型
# dbmodel = dbFcn().to(device)
# # 损失函数，交叉熵
# Loss_fn = nn.CrossEntropyLoss().to(device)
#
# # 分类模型的优化器
# Yh_fn = torch.optim.SGD(fenleimodel.parameters(), lr=5e-4,
#                         momentum=0.9)  # ,weight_decay=1e-2,nesterov=True)#,weight_decay=2,nesterov=True
# lrscheduler = lr_scheduler.StepLR(Yh_fn, step_size=1, gamma=0.01)
#
# # 糖尿病分期模型的优化器
# Yh_db_fn = torch.optim.SGD(dbmodel.parameters(), lr=5e-4,
#                            momentum=0.9)
# lr_db_scheduler = lr_scheduler.StepLR(Yh_db_fn, step_size=1, gamma=0.01)
#
# train_loss_list = []
# train_acc_list = []
# val_loss_list = []
# val_acc_list = []
#
#
# def adjust_tensor_to_multiple_of_8(x_db):
#     # 获取 x_db 的当前大小
#     current_size = x_db.size(0)
#
#     # 计算调整后的大小，向下截断到最近的 8 的倍数
#     adjusted_size = (current_size // 8) * 8
#
#     # 如果 adjusted_size 为 0，则需要特殊处理
#     if adjusted_size == 0:
#         adjusted_size = 8
#
#     if current_size >= 8:
#         # 截断到最近的 8 的倍数
#         x_db = x_db[:adjusted_size]
#     else:
#         # 如果 x_db 的大小小于 8，进行填充
#         repeat_count = (8 - current_size) // current_size + 1
#         x_db = x_db.repeat(repeat_count, 1)[:8]
#
#     return x_db
#
#
# def mytrain(train_data, fenleimodel, dbmodel, Loss_fn, Yh_fn, n_epoch):
#     total_loss, total_acc, n = 0.0, 0.0, 0
#     n = 0
#     # i = 0
#     for batch, (x, y, z) in enumerate(train_data):
#         x, y, z = x.to(device), y.to(device), z.to(device)
#         # print(batch)
#         # 执行
#         fenleimodel.set_val(n_epoch, batch)
#         dbmodel.set_val(n_epoch, batch)
#         output = fenleimodel(x)
#         Loss = Loss_fn(output, z.long())
#         MaxValue, Pred_idx = torch.max(output, axis=1)
#         # x_gotmd = x[Pred_idx == 1]
#         x_db = x[Pred_idx == 0].to(device)
#         # y_gotmd = y[Pred_idx == 1]
#         y_db = y[Pred_idx == 0].to(device)
#         if x_db.size(0) == 0:
#             continue
#         x_db = adjust_tensor_to_multiple_of_8(x_db).to(device)
#         y_db = adjust_tensor_to_multiple_of_8(y_db).to(device)
#
#         print(x_db.shape)
#         print(y_db.shape)
#         print(x_db.device)
#         # exit()
#         # outputdb = (x)
#         print("hello")
#         # exit()
#         outputdb=dbmodel(x_db)
#         dbmaxvalue, db_pred_idx = torch.max(outputdb, axis=1)
#         cur_acc = torch.sum(y_db == db_pred_idx).to(device) / outputdb.shape[0]
#         # exit()
#         # 计算该批次预测值准确率=正确个数/总个数
#         # 总体准确率越高则越好，匹配度越高
#         # cur_acc = torch.sum(z == Pred_idx) / output.shape[0]
#         Yh_fn.zero_grad()  # x.grad=0，x是output,y
#         Loss.backward()  # cur_loss是交叉熵函数，cur_loss求导
#
#         # 更新全部权重及偏置参数
#         Yh_fn.step()
#
#         # 总误差
#         total_loss += Loss.item()
#         total_acc += cur_acc.item()
#
#         n = n + 1
#
#     print('batch:' + str(batch) + ',n=' + str(n))
#     # print("333")
#     # exit()
#     return (total_acc / n), total_loss / n
#
#     # print("训练：总误差:"+str(total_loss)+',平均误差'+str(total_loss/n)+',n='+str(n))
#     # print("训练：总体准确率:"+str(total_acc)+',平均准确率'+str(total_acc/n)+',n='+str(n))
#
#
# # 模型验证
# def valM(train_data, model, loss_fn):
#     model.eval()
#     loss, current, n = 0.0, 0.0, 0
#     with torch.no_grad():
#         for batch, (x, y, z) in enumerate(train_data):
#             x, y, z = x.to(device), y.to(device), z.to(device)
#             output = model(x)
#             cur_loss = loss_fn(output, z.long())
#             # cur_loss = loss_fn(output, y.unsqueeze(1).unsqueeze(2).long())
#             _, pred = torch.max(output, axis=1)
#             cur_acc = torch.sum(z == pred) / output.shape[0]
#             loss += cur_loss.item()
#             current += cur_acc.item()
#             n = n + 1
#
#         return current / n, loss / n
#
#
# epoch = 100  # 训练次数
# min_acc = 0
# for t in range(epoch):
#     print(f'批次{t + 1}训练:')
#     # 训练
#     # print(train_loader.shape)
#     t_a, t_loss = mytrain(train_loader, fenleimodel, dbmodel, Loss_fn, Yh_fn, t)
#     # print("333")
#     # exit()
#     # train_loss_list.append(t_loss)
#     # train_acc_list.append(t_a)
#
#     print('训练正确率:' + str(t_a) + '，训练Loss:' + str(t_loss))
#
#     # 验证
#     v_a, v_loss = valM(test_loader, fenleimodel, dbmodel,Loss_fn)
#     val_loss_list.append(v_loss)
#     val_acc_list.append(v_a)
#     print('验证：历史正确率:' + str(min_acc) + '，最新正确率:' + str(v_a) + '，Loss=' + str(v_loss))
#     # if v_a > min_acc:
#     # 保存模型
#     # print('验证：上次正确率:'+str(min_acc)+'，最新正确率:'+str(a))
#     # min_acc = v_a
#     # torch.save(model.state_dict(), 'best_model_flower.pth')
#     # lr_scheduler.step()
#
# mlb.rcParams['font.family'] = 'SimHei'  # 'STKAITI'——字体
#
# plt.figure(figsize=(10, 10))
# plt.subplot(1, 2, 1)
# print("printing")
# plt.plot(train_acc_list, label='训练准确率')
# plt.plot(val_acc_list, label='验证准确率')
# plt.legend(loc='lower right')
# plt.title('训练、验证准确率')
#
# plt.subplot(1, 2, 2)
# plt.plot(train_loss_list, label='训练误差')
# plt.plot(val_loss_list, label='验证误差')
# plt.legend(loc='lower right')
# plt.title('训练、验证损失')
#
# plt.show()
#
# print('done')

import numpy as np
import torch
from torch import nn
from PIL import Image
import torchvision.transforms as transforms
from PIL import ImageFile
from torch.utils.data import Dataset
from torch.optim import lr_scheduler
import matplotlib.pyplot as plt
import matplotlib as mlb
import pandas as pd
import os
from torch.utils.data import TensorDataset, DataLoader
from gdordbfcnl import myFcn
from gotmd_fcn import gotmdFcn
from db_fcn import dbFcn

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
ImageFile.LOAD_TRUNCATED_IMAGES = True

train_dataset1 = pd.read_csv("reduced_data.csv", index_col=None)
train_dataset2 = pd.read_csv("reduced_gotmd.csv", index_col=None)
train_dataset = pd.concat([train_dataset2, train_dataset1], ignore_index=True)
train_dataset = train_dataset.iloc[:276352]
features = train_dataset.drop(['label', 'dborgot'], axis=1)
labels = train_dataset['label']
dborgotlabels = train_dataset['dborgot']

features = np.array(features)
labels = np.array(labels)
dborgotlabels = np.array(dborgotlabels)

features = torch.tensor(features, dtype=torch.float32)
labels = torch.tensor(labels, dtype=torch.float32)
dborgotlabels = torch.tensor(dborgotlabels, dtype=torch.float32)

dataset = TensorDataset(features, labels, dborgotlabels)
train_loader = DataLoader(dataset=dataset, batch_size=128, shuffle=True)
test_loader = DataLoader(dataset=dataset, batch_size=128, shuffle=True)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 用于诊断糖尿病与痛风的模型
fenleimodel = myFcn().to(device)
# 用于痛风分期的模型
gotmdmodel = gotmdFcn().to(device)
# 用于糖尿病分期的模型
dbmodel = dbFcn().to(device)
# 损失函数，交叉熵
Loss_fn = nn.CrossEntropyLoss().to(device)

# 分类模型的优化器
Yh_fn = torch.optim.SGD(fenleimodel.parameters(), lr=5e-4, momentum=0.9)
lrscheduler = lr_scheduler.StepLR(Yh_fn, step_size=1, gamma=0.01)

# 糖尿病分期模型的优化器
Yh_db_fn = torch.optim.SGD(dbmodel.parameters(), lr=5e-4, momentum=0.9)
lr_db_scheduler = lr_scheduler.StepLR(Yh_db_fn, step_size=1, gamma=0.01)

train_loss_list = []
train_acc_list = []
val_loss_list = []
val_acc_list = []


def adjust_tensor_to_multiple_of_8(x_db):
    current_size = x_db.size(0)
    adjusted_size = (current_size // 8) * 8

    if adjusted_size == 0:
        adjusted_size = 8

    if current_size >= 8:
        x_db = x_db[:adjusted_size]
    else:
        repeat_count = (8 - current_size) // current_size + 1
        x_db = x_db.repeat(repeat_count, 1)[:8]

    return x_db.to(device)  # 确保返回的张量在 device 上


all_x_db = []
all_y_db = []


def mytrain(train_data, fenleimodel, dbmodel, Loss_fn, Yh_fn, n_epoch):
    total_loss, total_acc, n = 0.0, 0.0, 0
    fenleimodel.train()
    dbmodel.train()

    for batch, (x, y, z) in enumerate(train_data):
        x, y, z = x.to(device), y.to(device), z.to(device)

        fenleimodel.set_val(n_epoch, batch)
        dbmodel.set_val(n_epoch, batch)
        output = fenleimodel(x)
        Loss = Loss_fn(output, z.long())
        MaxValue, Pred_idx = torch.max(output, axis=1)

        x_db = x[Pred_idx == 1].to(device)
        y_db = y[Pred_idx == 1].to(device)

        if x_db.size(0) == 0:
            continue
        # x_db = adjust_tensor_to_multiple_of_8(x_db)
        # y_db = adjust_tensor_to_multiple_of_8(y_db)

        print(x_db.shape)
        print(y_db.shape)
        print(x_db.device)
        all_x_db.append(x_db)
        all_y_db.append(y_db)
        # outputdb = dbmodel(x_db)
        # dbmaxvalue, db_pred_idx = torch.max(outputdb, axis=1)
        # cur_acc = torch.sum(y_db == db_pred_idx).item() / outputdb.shape[0]

        Yh_fn.zero_grad()
        Loss.backward()
        Yh_fn.step()

        total_loss += Loss.item()
        # total_acc += cur_acc
        n += 1
        break
    if all_x_db:
        new_x_db = torch.cat(all_x_db, dim=0)
        new_y_db = torch.cat(all_y_db, dim=0)
        print('Combined x_db shape:', new_x_db.shape)
        print('Combined y_db shape:', new_y_db.shape)
    print('batch:', batch, ', n=', n)
    return (total_acc / n), total_loss / n


def valM(train_data, model, loss_fn):
    model.eval()
    loss, current, n = 0.0, 0.0, 0
    with torch.no_grad():
        for batch, (x, y, z) in enumerate(train_data):
            x, y, z = x.to(device), y.to(device), z.to(device)
            output = model(x)
            cur_loss = loss_fn(output, z.long())
            _, pred = torch.max(output, axis=1)
            cur_acc = torch.sum(z == pred).item() / output.shape[0]
            loss += cur_loss.item()
            current += cur_acc
            n += 1

    return current / n, loss / n


epoch = 100  # 训练次数
min_acc = 0
for t in range(epoch):
    print(f'批次 {t + 1} 训练:')
    t_a, t_loss = mytrain(train_loader, fenleimodel, dbmodel, Loss_fn, Yh_fn, t)

    print('训练正确率:', t_a, '，训练Loss:', t_loss)

    v_a, v_loss = valM(test_loader, fenleimodel, Loss_fn)
    val_loss_list.append(v_loss)
    val_acc_list.append(v_a)

    print('验证：历史正确率:', min_acc, '，最新正确率:', v_a, '，Loss=', v_loss)

mlb.rcParams['font.family'] = 'SimHei'

plt.figure(figsize=(10, 10))
plt.subplot(1, 2, 1)
plt.plot(train_acc_list, label='训练准确率')
plt.plot(val_acc_list, label='验证准确率')
plt.legend(loc='lower right')
plt.title('训练、验证准确率')

plt.subplot(1, 2, 2)
plt.plot(train_loss_list, label='训练误差')
plt.plot(val_loss_list, label='验证误差')
plt.legend(loc='lower right')
plt.title('训练、验证损失')

plt.show()

print('done')
