import numpy as np
import torch
import  torch.nn.functional as fn
from torch import tensor, nn, cuda
from torch.nn import functional

# from PyCharmProject.pytorchgpuProjects.ImageGCN.model import MLPClassifier

torch.set_default_tensor_type(torch.cuda.FloatTensor)
# t3=torch.Tensor(2,3)#默认初始值为0
# t1=torch.tensor([[0, 1, 1, 2],[1, 0, 2, 1]], dtype=torch.long)#torch.Size([2, 4])
t1=torch.rand(1,2,3)
# print(t1.shape)
# t1=t1.squeeze(0)#降维形状变化：索引处维度为1才会删除

t1=t1.reshape(3,2)#变形-不直接改变变量形状
# torch.reshape(t1,(3,2))#变形-改变形状
print(t1.shape)
# s=fn.sigmoid(t1)
# print(t1)
# print(s)
# s_tanh=fn.tanh(s)
# print(s_tanh)
# t1=t1[:,0]#切片-索引取值
# t1=t1.view(-1,3)#降维至二维
# print(t1.dtype)
# print(t1.logits)# 'Tensor' object has no attribute 'logits'
# print(fn.log_softmax(t1, dim=1))
# print(torch.log(t1))#求函数不改变维度，对每个元素应用函数
# mlp1=MLPClassifier(3,3,1)
# print(mlp1(t1))
# t1=t1.unsqueeze(1)
# print(t1.shape)
# print(torch.sort(t1,dim=1))
# t1_sm=fn.softmax(t1,1)
# print(t1_sm)
# print("最大值一维索引：",torch.argmax(t1))
# print("最大值:",t1.view(-1)[torch.argmax(t1)])#降维-为一列

# t1=torch.rand(2,1)
# # print(t1,t1.exp(),2*t1.exp())#取对数
# print(t1._dict_)
# x2=[tensor([[ 1.1108,  2.1974,  3.4145, -1.9743, -1.4355, -0.7595],
#         [ 3.4210,  4.9091,  2.0919, -2.5529, -1.9208, -2.2419],
#         [ 3.1305,  5.3193,  3.9172, -2.4626, -2.1018, -2.4932],
#         [ 3.0258,  2.3903, -0.2149, -1.8888, -1.3622, -0.9463],
#         [ 4.7503,  3.3249,  3.2370, -2.4487, -2.1267, -1.1225],
#         [ 2.1766,  1.8763,  2.2034, -1.3738, -1.8671, -1.3329]],
#        device='cuda:0')
#     ,tensor([[ 1.1108,  2.1974,  3.4145, -1.9743, -1.4355, -0.7595],
#         [ 3.4210,  4.9091,  2.0919, -2.5529, -1.9208, -2.2419],
#         [ 3.1305,  5.3193,  3.9172, -2.4626, -2.1018, -2.4932],
#         [ 3.0258,  2.3903, -0.2149, -1.8888, -1.3622, -0.9463],
#         [ 4.7503,  3.3249,  3.2370, -2.4487, -2.1267, -1.1225],
#         [ 2.1766,  1.8763,  2.2034, -1.3738, -1.8671, -1.3329]],
#        device='cuda:0')]
# # x2=x2[0].mm(x2[1].t())#乘法与转置
# print("list-len:",len(x2),type(x2))#list长度,变量类型
# print("tensor.size():",x2[0].size())#查看tensor形状
# # for i in x2:#逐个输出含多个tensor的list
# #     print(i,i.device, i.dtype,i.size(), sep='\n')#查看本身，装置cuda/cpu,数据类型
# # x2=torch.cat((x2[0],x2[1]), dim=0).float().unsqueeze(dim=0)#half()#将list转换为tensor
# # print(x2,x2.device, x2.dtype,x2.size(), sep='\n')#查看本身，装置cuda/cpu,数据类型
# # print(torch.stack((x2,x2),dim=0).shape)#拼接2个x2
# # print(torch.stack((x2),1).shape)#拼接2个x2
# print("---------------------------------------------")
# #
# # x2=x2[0]#取第一个元素
# # print("x2.sum(dim=1)",x2.sum(dim=1))
# # # print(x2,x2.device, x2.dtype,x2.size(), sep='\n')#查看本身，装置cuda/cpu,数据类型
# #
# # line1=nn.Linear(6,6)
# # Q=line1(x2)#Q=XW
# # print(Q,Q.device, Q.dtype,Q.size(), sep='\n')#查看本身，装置cuda/cpu,数据类型
# #
# # t=torch.rand(3,6,6)
# # print(t,t.device, t.dtype,t.size(), sep='\n')#查看本身，装置cuda/cpu,数据类型
#
# #tensor拼接升维-start
# # 假设是时间步T1
# T1 = torch.tensor([[1, 2, 3],
#              [4, 5, 6],
#              [7, 8, 9]])
# # 假设是时间步T2
# T2 = torch.tensor([[10, 20, 30],
#              [40, 50, 60],
#              [70, 80, 90]])
# print(torch.stack((T1,T2),dim=0).shape)
# print(torch.stack((T1,T2),dim=1).shape)
# print(torch.stack((T1,T2),dim=2).shape)
# # print(torch.stack((T1,T2),dim=3).shape)
# print("--------------------------------------------")
# #tensor拼接升维-end
#
# list1=[]
# list1.append(x2[0])
# print(list1)

