
#

#线性回归
# import  numpy as np
# def total_error(b,w,points):
#     error = 0
#     for  i in range(len(points)):
#         x = points[i,0]
#         y = points[i,1]
#         error += y -w*x-b
#     return  error
# def gradient_step (b_current, w_current,points,learningrate):
#     b_gd = 0
#     w_gd = 0
#     for i in range(len(points)):
#         x = points[i, 0]
#         y = points[i, 1]
#         b_gd += 2*(w_current*x+b_current-y)
#         w_gd += 2*x*(w_current*x+b_current-y)
#     b_gd = b_gd/float(len(points))
#     w_gd = w_gd/float(len(points))
#     new_b = b_current - (learningrate*b_gd)
#     new_w = w_current - (learningrate*w_gd)
#     return new_b ,new_w
# def iter_gradient (b,w,points,learningrate,num_iter):
#     for i in range(num_iter):
#         b, w = gradient_step(b, w, points,learningrate)
#     return b, w
# def run():
#     points = np.genfromtxt("data.csv", delimiter=",")
#     learningrate = 0.0001
#     initial_b = 0
#     initial_w = 0
#     print('first b = {0},w= {1},error = {2}'.format(initial_b,initial_w,total_error(initial_b,initial_w,points)))
#     num_iter = 1000
#     b, w = iter_gradient(initial_b,initial_w,points,learningrate,num_iter)
#     print('ater b = {0}, w = {1} ,error = {2}'.format(b,w,total_error(b,w,points)))
# if __name__ == '__main__':
#     run()

#minist
# import torch
# from torch import nn
# from torch.nn import  functional as F
# from  torch import  optim
# import  torchvision
# from   utils import plot_image, plot_curve, one_hot
#
# batch_size = 512
# train_loader = torch.utils.data.DataLoader(
#     torchvision.datasets.MNIST('mnist_data', train=True, download=True,
#                                transform=torchvision.transforms.Compose([
#                                    torchvision.transforms.ToTensor(),
#                                    torchvision.transforms.Normalize(
#                                        (0.1307,), (0.3081,))
#                                ])),
#     batch_size=batch_size, shuffle=True)
#
# test_loader = torch.utils.data.DataLoader(
#     torchvision.datasets.MNIST('mnist_data/', train=False, download=True,
#                                transform=torchvision.transforms.Compose([
#                                    torchvision.transforms.ToTensor(),
#                                    torchvision.transforms.Normalize(
#                                        (0.1307,), (0.3081,))
#                                ])),
#     batch_size=batch_size, shuffle=False)
# x,y = next(iter(train_loader))
# print(x.shape,y.shape)
#
# class Net(nn.Module):
#     def __init__(self):
#         super (Net,self).__init__()
#         self.fc1 =nn.Linear(28*28,256)
#         self.fc2 = nn.Linear(256, 64)
#         self.fc3 = nn.Linear(64, 10)
#     def forward(self,x):
#         # x: [b, 1, 28, 28]
#         # h1 = ruel(xw1+b1)
#         x = F.relu(self.fc1(x))
#         # h2 = relu(h1w2+b2)
#         x = F.relu(self.fc2(x))
#         # h3 = h2w3+b3
#         x = self.fc3(x)
#
#         return x
# net = Net()
# optimzer = optim.SGD(net.parameters(),lr=0.01,momentum=0.9)
# train_loss = []
# for epoch in range(3):
#     for batch_index,(x,y) in enumerate(train_loader):
#         x = x.view(x.size(0),28*28)
#         out = net(x) # [b,10]
#         y_onehot = one_hot(y)
#         loss = F.mse_loss(out,y_onehot) #
#         optimzer.zero_grad()   #梯度清零
#         loss.backward()     #求梯度
#         optimzer.step()   #更新迭代
#         train_loss.append(loss.item())
#         if batch_index %10 ==0:
#             print(epoch,batch_index,loss.item())
# plot_curve(train_loss)
# total_correct = 0
# for x,y in test_loader:
#     x = x.view(x.size(0),28*28) #打平
#     out = net(x)
#     pred = out.argmax(dim = 1)
#     correct = pred.eq(y).sum().float().item()
#     total_correct += correct
# acc = total_correct/len(test_loader.dataset) #dataset
# print(acc)
# x, y = next(iter(test_loader))
# out = net(x.view(x.size(0), 28*28))
# pred = out.argmax(dim=1)
# plot_image(x, pred, 'test')

# import torch
# a = torch.tensor(1.3)
# b = torch.randn(2,3)
# print(a.type())
# print(b.shape)
# print(len(a.shape))
# print(a.dim())
# aa = torch.tensor([1.1,2.2,3.])
# bb = list(aa)
# print(bb[1].item())
# c = torch.rand_like(b)
# d = torch.randint(1,10,[3,3])
# print(d)
# e = torch.full([10],1) #10个1
# print(e)
# f = torch.arange(1,0,-0.1)
# print(f)
# g = torch.linspace(0,10,steps=3)
# print(g)
# h = torch.logspace(1,5,5)#10的次方
# print(h)
# i = torch.randn(4,3,28,28)
# j = i.index_select(0,torch.tensor([0,3])).shape
# print(j)
# k = torch.randn(3,4)
# mask = k.ge(0.5)
# print(mask)
# l = torch.masked_select(k,mask)
# print(l)

import  torch
# a = torch.randn(4,3,28,28)
# print(a.squeeze().shape)
# print(a.unsqueeze(0).shape)
# print(a.view(4,3*28*28).shape)
# b = torch.rand(1,3,1,1)
# # print(b)
# # c = b.expand(4,3,28,28)
# # print(c)
# print(a.permute(0,3,1,2).shape)
# print(b.repeat(4,1,28,28).shape)
# print(b.repeat(4,3,2,1).shape)
# print(a.transpose(0,1).contiguous().shape)
# a = torch.rand(4,32,8)
# b = torch.rand(5,32,8)
# c = torch.rand(4,32,8)
# print(torch.cat([a,b],dim=0).shape)
# print(torch.stack([a,c],dim=0).shape)
# aa ,bb = a.split([3,1],dim=0)
# print(aa.shape ,bb.shape)
# cc ,dd = a.chunk(2,dim=1)
# print(cc.shape, dd.shape)

#运算
import  torch
# a = torch.rand(3,4)
# b = torch.rand(4)
# c = torch.rand(4,3)
# print(torch.all(torch.eq(torch.matmul(a,c),a@c)))#eq 是对应位是否相等
#
# print(a+b,'\n',torch.add(a,b))
# a = torch.rand(4,3,28,64)
# b = torch.rand(4,3,64,32)
# c = torch.rand(4,1,64,32)
# print(torch.matmul(a,b).shape,torch.matmul(a,c).shape)
# d = torch.full([2,2],3)
# print(d.pow(2),d**2,d**0.5)
# print(torch.exp(d),torch.log(d))
# e= torch.tensor(3.14)
# print(e.floor(),e.ceil(),e.trunc(),e.frac(),e.round())
# print(a.max(),a.median(),a.mean())
# print(d.clamp(2),d.clamp(0,2))

#统计属性
import torch
# a = torch.rand(4,4)
# b  = torch.rand(4,4)
# print(a.max(dim=1))
# print(a)
# b = a.view(1,16)
# print(b)
# print(a.sum(),a.prod())
# print(a.argmax(),a.argmin())
# print(a.argmax(dim=1))
# print(a .topk(3,dim=1))
# print(a.topk(3,dim=1,largest=False))
# print(a.kthvalue(3))
# print(torch.eq(a,b))
#
# idx = a.topk(3,dim=1)
# idx = idx[1]
# label = torch.arange(10)+100
# print(idx)
# print(idx.long())
# print(torch.gather(label.expand(4,10),dim=1,index=idx))

# def vels(speed,turn):
#     return "currently:\tspeed %s\tturn %s " %(speed,turn)
# if __name__ == '__main__':
#     print(vels(3,4))
#
# #单层感知机
# import  torch
# from  torch.nn import functional as F
# x = torch.rand(1,10)
# # w = torch.rand(1,10,requires_grad=True)
# # out = torch.sigmoid(x@w.t())
# # loss = F.mse_loss(torch.ones(1,1),out)
# # loss.backward()
# # print(w.grad)
# #多输出感知机
# w = torch.rand(3,10)
# w.requires_grad_()
# out = torch.sigmoid(x@w.t())
# loss = F.mse_loss(out,torch.ones(1,3))
# loss.backward()
# print(w.grad)

#2d函数优化例子
import  torch
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from    torch import optim
# def himmelblau(x):
#     return  (x[0]**2 +x[1]-11)**2 + (x[0]+x[1]**2-7)**2
#画图
# x = np.arange(-6,6,0.1)
# y = np.arange(-6,6,0.1)
# x,y = np.meshgrid(x,y)
# print(x.shape )
# print(x)
# print(y.shape)
# print(y)
# z = himmelblau([x,y])
# print(z.shape)
# print(z)
# fig = plt.figure()
# axes = Axes3D(fig)
# axes.plot3D(x,y,z,'gray')
# # ax = fig.gca(projection = '3d')
# # ax.plot_surface(x,y,z)
# plt.show
#优化
# x = torch.tensor([0.,0.],requires_grad=True)
# optimizer = torch.optim.Adam([x],lr=1e-3)
# for step in range(20000):
#     pred = himmelblau(x)
#     optimizer.zero_grad()
#     pred.backward()
#     optimizer.step()
#     if step % 2000 == 0:
#         print('step {0}: x = {1}, f(x) = {2}'
#               .format(step, x.tolist(), pred.item()))
# import  torch
# from torch.nn import  functional as F
# x = torch.rand(1,784)
# w = torch.rand(10,784)
# logit = x@w.t()
# print(logit.shape[1])
# pred = F.softmax(logit,dim=1)
# pred_log = torch.log(pred)
# print(pred_log)
# print(pred_log.size())
# print(F.cross_entropy(logit,torch.tensor([4])))
# print(F.nll_loss(pred_log,torch.tensor([2]))) #等于0 0 1 0 0 0 0 0 0 0
# print(torch.tensor([3]).size())
# import  torch

# from torch import  nn
# m=nn.Linear(3, 4)
# print(m.weight)
# print(m.bias)

#数据集划分 train validation test

# import  numpy as np
# a = np.array([[-4,-2,4],[0,1,2],[1,0,1]])   #逆矩阵
# print(np.linalg.inv(a))
#
# #卷积层网络
#
# import torch
# from  torch import  nn
# from  torch.nn import  functional as F
# layer = nn.Conv2d(1,3,kernel_size=5,stride=2,padding=1)  #kernel:3x1x3x3  0表示没有padding padding = 0/1/2...
# # print(layer.weight)
# x = torch.rand(32,1,28,28)
# out = layer.forward(x)
# out2 = layer(x)  #推荐这个
# # print(layer.weight)
# # print(layer.bias)
# # print(out.shape)
# # print(out2.shape)
# layerpool = nn.MaxPool2d(2,stride=2)   #第一种池化降采样
# layerpool2 = nn.AdaptiveAvgPool2d([1,1])         #第二章池化 设置为自定义的尺寸
#
# out3 = layerpool2(x)
# print(out3.shape)
# out4 = F.interpolate(x,scale_factor=2,mode='nearest') #插值放大
# print(out4.shape)
# layerrelu = nn.ReLU(inplace=True)   #True节省内存空间
# out5 = layerrelu(out3)
# print(out5.shape)


#batchnorm
# x = torch.rand(100,16,784)
# y = torch.rand(100,16,7,7)
# layer = nn.BatchNorm1d(16)
# out = layer(x)
# print(layer.running_mean)
# print(layer.running_var)
# layer2 = nn.BatchNorm2d(16)
# out2 = layer2(y)
# print(out2.shape)
# print(layer2.running_mean)
# print(layer2.running_var)


#resnet
# import torch
# from  torch.utils.data import  DataLoader
# from torchvision import datasets
# from torchvision import  transforms
# def main():
#     bathsz = 32
#     cifar_train = datasets.CIFAR10('cifar',True,transform=transforms.Compose([
#         transforms.Resize((32,32)),
#         transforms.ToTensor()
#     ]),download=True)
#     cifar_train = DataLoader(cifar_train,batch_size=bathsz,shuffle=True)


#
# import  torch
# from  torch import  nn
# from torch.nn import  functional as F
#
# class Lenet5(nn.Module):
#
#     def __init__(self):
#         super(Lenet5, self).__init__()
#         self.conv_unit = nn.Sequential(
#             # x: b 3 32 32
#             nn.Conv2d(3,6,kernel_size=5,stride=1,padding=0),
#             nn.AvgPool2d(kernel_size=2,stride=2,padding=0),
#
#             nn.Conv2d(6,16,kernel_size=5, stride=1,padding=0),
#             nn.AvgPool2d(kernel_size=2,stride=2,padding=0),
#
#         )
#         self.fc = nn.Sequential(
#             nn.Linear(16*5*5,120),
#             nn.ReLU(),
#             nn.Linear(120,84),
#             nn.ReLU(),
#             nn.Linear(84,10)
#
#         )
#         # tmp = torch.randn(2,3,32,32)
#         # out = self.conv_unit(tmp)
#         # print("co:", out.shape)
#         self.criteon = nn.CrossEntropyLoss()
#
#
#     def forward(self,x):
#         batchsz = x.size(0)
#         x = self.conv_unit(x)
#         x = x.view(batchsz,16*5*5)
#         logits = self.fc(x)
#         # pred = self.criteon(logits,y)
#         return  logits
#
#
# def main():
#     net = Lenet5()
#     tmp = torch.randn(2, 3, 32, 32)
#     out = net(tmp)
#
# if __name__ == '__main__':
#     main()


#灰度
# import cv2
# image = cv2.imread('C:\\Users\\dcljkkl\\Desktop\\1.png')
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# cv2.imshow('a', gray)
# cv2.waitKey(0)
# cv2.imwrite('f.png', gray)
# print(gray.shape)
# size = gray.shape[::-1]  #size的装置
# print(size)
a= 1
print(a)
print('dcl')
print('afsdfs')