import torch
import numpy as np

print()

# print("hello pytorch")
# print(torch.__version__)
#
# a = torch.Tensor(2, 3)
# print(a)
# print(a.shape)
# print(a.size())
# print(a.type())
#
# a = torch.LongTensor(2, 3)
# print(a.type())
# b = a.float()
# print(b.type())
#
# def describe(x):
#     print(x.type())
#     print(x.shape)
#     print(x)
#
# a = torch.rand(3, 3)
# b = torch.randn(3, 3)
# describe(a)
# describe(b)
#
# a = torch.zeros(2, 3)
# b = torch.ones(2, 3)
# describe(a)
# describe(b)
#
# a = torch.eye(3, 3)
# describe(a)
#
# a = torch.zeros(3, 3)
# for i in range(a.shape[0]):
#     a[i, i] = 1
#
# describe(a)
#
# a = torch.zeros(3, 3)
# a = torch.fill(a, 100)
# describe(a)
#
# a = torch.zeros(3, 3)
# a.fill_(10)
# describe(a)
#
# a = [1, 2, 3]
# b = torch.tensor(a)
# describe(b)
#
# a = torch.tensor([1, 1, 3])
# describe(a)
#
# a = np.array([1, 2, 3])
# a = torch.tensor(a)
# describe(a)
#
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# a = a.to(device)
#
# a = np.arange(10)
# a = torch.tensor(a, dtype=torch.int)
# describe(a)
#
# a = torch.arange(6).view(3, 2)
# print(a)
# b = torch.ones(3, 2) * 2
# print(b)
#
# # c = a * b
# c = torch.mul(a, b)
#
# print(c)

# x = torch.ones(3)
# y = torch.tensor([1, 2, 3], dtype=torch.float32)
# print(x)
# print(y)
# z = torch.dot(x, y)
# print(z)

# x = torch.arange(10).view(2, 5)
# x = x.to(torch.float32)
# y = torch.ones(5, 3)
# z = torch.mm(x, y)
# print(x)
# print(y)
# print(z)

# x = torch.arange(24).view(2, 3, 4)
# y = torch.arange(16).view(2, 4, 2)
# z = torch.bmm(x, y)
# print(z)
# print(z.shape)

# x = torch.arange(10)
# y = x.view(2, 5)
# y[0, 0] = 100
# print(x)

# x = torch.arange(30).view(2, 3, 5)
# y = torch.transpose(x, 1, 2)
# print(x)
# print(y)

# x = torch.arange(6).view(2, 1, 1, 3)
# print(x)
# y = torch.squeeze(x)
# print(y)

# x = torch.tensor([[1, 2], [3, 4], [5, 6]])
# x = torch.unsqueeze(x, dim=1)
# print(x)
# print(x.shape)

# A = torch.randn(3, 3)
# x = torch.ones(3)
# x = torch.unsqueeze(x, dim=1)
# y = torch.mm(A, x)
# print(y)
# print(y.shape)

# a = torch.randn(4, 4)
# b = a[1:3, 1:3]
# print(a)
# print(b)

# index = torch.tensor([0, 2])
# x = torch.randn(4, 4)
# y = torch.index_select(x, 0, index)
# print(x)
# print(y)

# x = torch.randn(4, 4)
# r = torch.tensor([1, 2])
# c = torch.tensor([2, 3])
# y = x[r, c]
# print(x)
# print(y)

# x = torch.randn(2, 3)
# y = torch.randn(4, 3)
# z = torch.cat((x, y), dim=1)
# print(x)
# print(y)
# print(z)
# B = 32
# img_feature = torch.randn(B, 2048)
# txt_feature = torch.randn(B, 768)
# f = torch.cat([img_feature, txt_feature], dim=1)
# print(f.shape)

# x = torch.randn(2, 3)
# y = torch.stack([x, x, x, x], dim=0)
# print(y.shape)

# x = torch.tensor([2], requires_grad=True, dtype=torch.float32)
# y = torch.tensor([3], requires_grad=True, dtype=torch.float32)
# z = x * y
# # dz/dx = y = 3, dz/dy = x = 2
#
# z.backward()
# print(x.grad)
# print(y.grad)

x = torch.ones(2, 2, requires_grad=True, dtype=torch.float32)

y = (x + 2) * (x + 5) + 3
loss = torch.mean(y)

loss.backward()
print(x.grad)

import torch
import time

N = 10240

def bench(x, y):
    t0 = time.time()

    z = torch.mm(x, y)
    if x.is_cuda:
        torch.cuda.synchronize()

    t1 = time.time()
    return t1 - t0

x = torch.randn(N, N)
y = torch.randn(N, N)
cpu_t = bench(x, y)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
x = x.to(device)
y = y.to(device)
gpu_t = bench(x, y)

print(cpu_t)
print(gpu_t)

























































































