import sys

import numpy as np
from matplotlib import pyplot as plt
import sklearn
from sklearn import metrics
import pandas as pd
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import ImageFolder
# # %%
# class AlexNet(nn.Module):
#     def __init__(self):
#         super(AlexNet, self).__init__()
#         self.conv = nn.Sequential(
#             # in_channels, out_channels, kernel_size, stride
#             nn.Conv2d(3, 96, 11, 4),
#             nn.ReLU(),
#             # kernel_size, stride
#             nn.MaxPool2d(3, 2),
#
#             # 减小卷积窗口，使用步长为2来使得输入与输出的高和宽一致，
#             # 且增大输出通道数
#             nn.Conv2d(96, 256, 5, 1, 2),
#             nn.ReLU(),
#             nn.MaxPool2d(3, 2),
#             # 连续3个卷积层，且使用更小的卷积窗口。除了最后的卷积层外，
#             # 进一步增大了输出通道数。
#             # 前两个卷积层后不使用池化层来减小输入的高和宽
#             nn.Conv2d(256, 384, 3, 1, 1),
#             nn.ReLU(),
#             nn.Conv2d(384, 384, 3, 1, 1),
#             nn.ReLU(),
#             nn.Conv2d(384, 256, 3, 1, 1),
#             nn.ReLU(),
#             nn.MaxPool2d(3, 2)
#         )
#         # 这里全连接层的输出个数比LeNet中的大数倍。使用丢弃层来缓解过拟合
#         self.fc = nn.Sequential(
#             nn.Linear(256 * 5 * 5, 4096),
#             nn.ReLU(),
#             nn.Dropout(0.5),
#             nn.Linear(4096, 4096),
#             nn.ReLU(),
#             nn.Dropout(0.5),
#             # 输出层。由于这里使用Fashion-MNIST
#             # 所以用类别数为10，而非论文中的1000
#             nn.Linear(4096, 2)
#         )
#
#     def forward(self, img):
#         feature = self.conv(img)
#         output = self.fc(feature.view(img.shape[0], -1))
#         return output
#
#
# net = AlexNet()
# net.load_state_dict(torch.load('E:\Class_exprience\Machine_Vision\9_class_design\model16.pt'), True)
#
# # %%加载数据
# # 超参数
# batch_size = 32  # 每批里面的样本数
# resize = 224  # 为什么变为224？？？
#
# transform = transforms.Compose([
#     transforms.Resize((resize, resize)),
#     # transforms.RandomHorizontalFlip(p=0.5),
#     # transforms.RandomRotation(25),
#     # transforms.RandomVerticalFlip(p=0.1),
#     transforms.ToTensor(),
#     transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
#
# ])
#
# train_data = ImageFolder(r"E:\Class_exprience\Machine_Vision\9_class_design\training_set\training_set",
#                          transform=transform)
#
# test_data = ImageFolder(r"E:\Class_exprience\Machine_Vision\9_class_design\test_set\test_set", transform=transform)
#
#
#
#
# # sys.platfrom 获取操作系统名称，这里是windows
# # startswith 判断子字符串是否与主字符串一致
# # 可省
# if sys.platform.startswith('win'):
#     # 0表示不用额外的进程来加速读取数据
#     num_workers = 0
# else:
#     num_workers = 4
#
# # torch.utils.data.DataLoader 小批量读取数据
# # batch=235,batch_size=256,examples=60160 约为 60000
# train_iter = torch.utils.data.DataLoader(
#     train_data,  # 数据（特征，标签）
#     batch_size=batch_size,  # 批量样本数
#     shuffle=True,  # 打乱顺序，一般为True
#     num_workers=num_workers)  # 额外进程，一般用于加速加载数据
# test_iter = torch.utils.data.DataLoader(
#     test_data,
#     batch_size=batch_size,
#     shuffle=False,
#     num_workers=num_workers)
#
# # %%
# y_test_pre=[]
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#
# for x, y in test_iter:  # 通过这种方式直接接收参数
#     net.eval()
#     net = net.to(device)
#     y_pre=torch.argmax(net(x.to(device)),dim=1)
#     y_test_pre.extend(y_pre.tolist())
# y_test_pre=np.array(y_test_pre)
#
# np.save(r'E:\Class_exprience\Machine_Vision\9_class_design\Save_Data\y_test_pre.npy',y_test_pre)
#
# y_test_true=np.array(test_iter.dataset.targets)
# np.save(r'E:\Class_exprience\Machine_Vision\9_class_design\Save_Data\y_test_true',y_test_true)
y_test_pre=np.load(r'E:\Class_exprience\Machine_Vision\9_class_design\Save_Data\y_test_pre.npy')
y_test_true=np.load(r'E:\Class_exprience\Machine_Vision\9_class_design\Save_Data\y_test_true.npy')
accuracy=sklearn.metrics.accuracy_score(y_test_true,y_test_pre)

precision=sklearn.metrics.precision_score(y_test_true,y_test_pre)

recall=sklearn.metrics.recall_score(y_test_true,y_test_pre)

f1=sklearn.metrics.f1_score(y_test_true,y_test_pre)

report=sklearn.metrics.classification_report(y_test_true,y_test_pre)


confusion_matrix=sklearn.metrics.confusion_matrix(y_test_true,y_test_pre)
a=sklearn.metrics.ConfusionMatrixDisplay(confusion_matrix)
a.plot()


fpr, tpr, thresholds = sklearn.metrics.roc_curve(y_test_true, y_test_pre1)
roc_auc = sklearn.metrics.auc(fpr, tpr)
display = sklearn.metrics.RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=roc_auc)
display.plot()
plt.show()

y_test_pre1=np.random.randn(2023)
# wenjian=pd.DataFrame(data=y_test_pre,columns=['data'])
# wenjian.to_csv(r"E:\Class_exprience\Machine_Vision\9_class_design\App\wenjian.csv")
# wenjian.values[0][0]