#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File          :    demo.py    
@Contact       :    LJL959@QQ.COM
@License       :    (C)Copyright 2020-2021, Liugroup-NLPR-CASIA
@Modify Time   :    2020/12/10 9:16        
@Author        :    LiuJiaolong
@Version       :    1.0
@Description    :   None                
"""
import torch
import numpy as np
import torchvision.models as models
import torch.nn.functional as F
from D2L.MyFirstDemoWithFMRIData.network import Net
data_dir = 'D:\Study\PycharmProjects\D2L\MyFirstDemoWithFMRIData\data/trainData'
model_file = 'D:\Study\PycharmProjects\D2L\MyFirstDemoWithFMRIData\model\model.pth'
# dataset_dir = 'D:\Study\PycharmProjects\D2L\MyFirstDemoWithFMRIData\data/trainData'
N = 6
all_test_data = []
data_with_label = np.load(data_dir + '/drug_health_data.npy')


def test_demo():
    global new_data
    model = Net()
    # model
    model.load_state_dict(torch.load(model_file), False)
    model.eval()
    criterion = torch.nn.CrossEntropyLoss()                         # 定义loss计算方法，交叉熵，可以理解为两者数值越接近其值越小


    for batch_idx, data in enumerate(data_with_label):  # 从封装好的npy文件中读取数据与对应标签
        label_temp = data_with_label[batch_idx, 116, 0]  # 标签只占一个位置,在第116行 第0列位置处
        label_x = torch.tensor(label_temp, dtype=torch.long)  # 数据类型转换为tensor
        # print("label_x", label_x)
        # print('label', label_x.shape)

        # 从封装好的文件里取得数据有问题
        data_temp = data_with_label[batch_idx, 0:116, 0:150]  # 前0~116存放结点数据，0~150存放特征，所以数据就是0~150全取
        data_x = torch.tensor(data_temp)  # 转换成tensor
        data_x_unsqueeze = torch.unsqueeze(data_x, 0)  # 扩充维度
        data_new = torch.unsqueeze(data_x_unsqueeze, 0)  # 扩充维度

        # 老方法取的数据没问题
        single_data = data[:116]
        single_data = torch.Tensor(single_data)
        all_test_data.append(single_data)
        temp_data = torch.unsqueeze(all_test_data[0], 0)  # 在第一维上扩一维
        new_data = torch.unsqueeze(temp_data, 0)

        out = model(new_data)
        loss = criterion(out, label_x.unsqueeze(0))  # 计算损失，就是网络输出值和实际label的差异，差异越小，说明拟合效果越好

    print('all_data', new_data.shape)
    out = model(new_data)
    out = F.softmax(out, dim=1)
    out = out.data.cpu().numpy()
    print(out.shape)
    # for idx in range(6):
    #     print(out[idx, 0], out[idx, 1])


####################################################


####################################################


if __name__ == '__main__':
    test_demo()










