import numpy as np
import pandas as pd
import torch
from torch import nn
from d2l import torch as d2l

data = pd.read_csv(r"AM.dat", usecols=[0, 1, 2, 3, 4, 5], sep="\t")  # 读取数据文件，只取第0列、1列、2列的数据
m, n = data.shape  # m 代表总行数，n 代表总列数
train_X = data.head(int(1 * m)).values  # 训练数据取前75%*总行数的的数据量进行训练
# train_X = train_X/np.max(train_X)

test_X = data.tail(int(0.25 * m)).values  # 使用数据后25%的数据做测试

# 计算训练数据的模
train_norm = np.linalg.norm(train_X, axis=1, keepdims=True)

# 根据球体半径计算标签数据
# radius1 = 6409.1759
radius1 = 6450
crossmos = 2330 / radius1
Amo = 15999.135
train_X[:, 3:] = train_X[:, 3:] / radius1
train_X[:, :3] = train_X[:, :3] / Amo
radius = 1
train_y = np.ones((np.size(train_X, 0), 2), dtype=np.double) * [radius, crossmos]
test_y = np.ones((np.size(test_X, 0), 2), dtype=np.double) * [radius, crossmos]


def load_array(data_arrays, batch_size, in_train=True):
    """构造 一个Pytorch数据迭代器"""
    dataset = data.TensorDataset(*data_arrays)
    return data.Dataloader(dataset, batch_size, shuffle=is_train)


'''
def train_my1(net,train_X,train_y,test_X,test_y,num_epochs,lr,device):
    """用GPU训练模型（在第6章定义）"""

    def init_weights(m):
        if type(m) == nn.Linear or type(m) == nn.Conv2d:
            nn.init.xavier_uniform_(m.weight)

    net.apply(init_weights)
    print('training on', device)
    net.to(device)
    optimizer = torch.optim.SGD(net.parameters(), lr=lr)
    #loss = nn.MSELoss()
    loss = nn.L1Loss(reduction='mean')

    animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs],
                        legend=['train loss', 'train acc', 'test acc'])
    timer, num_batches = d2l.Timer(), len(train_y)

    for epoch in range(num_epochs):
        # 训练损失之和，训练准确率之和，样本数
        metric = d2l.Accumulator(3)
        net.train()
        for i in range(len(train_y)):
            X = torch.tensor(train_X[i,:])
            y = torch.tensor(train_y[i])
            X = X.to(torch.float)
            y = y.to(torch.float)
            timer.start()
            optimizer.zero_grad()
            #X, y = X.to(device), y.to(device)
            y_hat = net(X)
            l = loss(y_hat, y)


            l.backward()
            print(net[0].state_dict())
            print(net[1].state_dict())
            optimizer.step()
            with torch.no_grad():
                metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])
            timer.stop()
            train_l = metric[0] / metric[2]
            train_acc = metric[1] / metric[2]
            if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:
                animator.add(epoch + (i + 1) / num_batches,
                             (train_l, train_acc, None))

        print(net[0].state_dict())
        print(net[1].state_dict())
'''


def train_my3(net, train_X, train_y, test_X, test_y, num_epochs, lr, device):
    """用GPU训练模型（在第6章定义）"""

    def init_weights(m):
        if type(m) == nn.Linear or type(m) == nn.Conv2d:
            nn.init.xavier_uniform_(m.weight)

    net.apply(init_weights)
    print('training on', device)
    net.to(device)
    optimizer = torch.optim.SGD(net.parameters(), lr=lr)
    # loss = nn.MSELoss()
    loss = nn.L1Loss(reduction='mean')

    animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs],
                            legend=['train loss', 'train acc', 'test acc'])
    timer, num_batches = d2l.Timer(), len(train_y)

    for epoch in range(num_epochs):
        # 训练损失之和，训练准确率之和，样本数
        metric = d2l.Accumulator(3)
        net.train()
        for i in range(len(train_y)):
            X = torch.tensor(train_X[i, :])
            y = torch.tensor(train_y[i])
            X = X.to(torch.float)
            y = y.to(torch.float)
            timer.start()
            optimizer.zero_grad()
            # X, y = X.to(device), y.to(device)
            y_hat = net(X)
            l = loss(y_hat, y)
            # l = y_hat - y
            # l = y - y_hat
            print('当前损失：', l * 6409, '当前epoch:', epoch, '当前i:', i)

            l.backward()
            # print(net[0].state_dict())
            optimizer.step()
            with torch.no_grad():
                metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])
            timer.stop()
            train_l = metric[0] / metric[2]
            train_acc = metric[1] / metric[2]
            if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:
                animator.add(epoch + (i + 1) / num_batches,
                             (train_l, train_acc, None))

        print(net[0].state_dict())
        k = 0
        '''test_acc = evaluate_accuracy_gpu(net, test_iter)
        animator.add(epoch + 1, (None, None, test_acc))
    print(f'loss{train_l:.3f},train acc {train_acc:.3f},'
          f'test acc {test_acc:.3f}')
    print(f'{metric[2] * num_epochs / timer.sum():.1f} example/sec '
          f' on {str(device)}')'''


def train_my4(net, train_X, train_y, test_X, test_y, num_epochs, lr, batch_size, device):
    """用GPU训练模型（在第6章定义）"""

    def init_weights(m):
        if type(m) == nn.Linear or type(m) == nn.Conv2d:
            nn.init.xavier_uniform_(m.weight)

    net.apply(init_weights)
    print('training on', device)
    net.to(device)
    optimizer = torch.optim.SGD(net.parameters(), lr=lr)
    # loss = nn.MSELoss()
    loss = nn.L1Loss(reduction='mean')

    animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs],
                            legend=['train loss', 'train acc', 'test acc'])
    timer, num_batches = d2l.Timer(), len(train_y)

    for epoch in range(num_epochs):
        # 训练损失之和，训练准确率之和，样本数
        metric = d2l.Accumulator(3)
        net.train()
        j = 0
        optimizer.zero_grad()
        for i in range(len(train_y)):
            X = torch.tensor(train_X[i, :])
            y = torch.tensor(train_y[i])
            X = X.to(torch.float)
            y = y.to(torch.float)
            timer.start()

            # X, y = X.to(device), y.to(device)
            y_hat = net(X)
            l = loss(y_hat, y)
            # l = y_hat - y
            # l = y - y_hat
            print('当前损失：', l * 6409, '当前epoch:', epoch, '当前i:', i)

            l.backward()
            j = j + 1
            if j == batch_size:
                j = 0
                # print(net[0].state_dict())
                optimizer.step()
                optimizer.zero_grad()
                with torch.no_grad():
                    metric.add(l * X.shape[0], d2l.accuracy(y_hat, y), X.shape[0])
                timer.stop()
                train_l = metric[0] / metric[2]
                train_acc = metric[1] / metric[2]
                if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:
                    animator.add(epoch + (i + 1) / num_batches,
                                 (train_l, train_acc, None))

        print(net[0].state_dict())
        k = 0


def train_my_cross(net, train_X, train_y, test_X, test_y, num_epochs, lr, batch_size, device):
    """用GPU训练模型（在第6章定义）"""

    def init_weights(m):
        if type(m) == nn.Linear or type(m) == nn.Conv2d:
            nn.init.xavier_uniform_(m.weight)

    net.apply(init_weights)
    print('training on', device)
    net.to(device)
    optimizer = torch.optim.SGD(net.parameters(), lr=lr)
    # loss = nn.MSELoss()
    loss = nn.L1Loss(reduction='none')

    animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs],
                            legend=['train loss', 'train acc', 'test acc'])
    timer, num_batches = d2l.Timer(), len(train_y)

    for epoch in range(num_epochs):
        # 训练损失之和，训练准确率之和，样本数
        metric = d2l.Accumulator(3)
        net.train()
        j = 0
        optimizer.zero_grad()
        for i in range(len(train_y)):
            X = torch.tensor(train_X[i, :])
            y = torch.tensor(train_y[i, :])
            X = X.to(torch.float)
            y = y.to(torch.float)
            timer.start()

            # X, y = X.to(device), y.to(device)
            y_hat = net(X)

            l1 = loss(y_hat[0], y[0])
            l2 = loss(y_hat[1], y[1])
            l = l1 + l2
            # l = [l1,l2]
            # l.requires_grad_(True)

            # l = y_hat - y
            # l = y - y_hat
            print('当前损失：', l, '当前epoch:', epoch, '当前i:', i)
            # if l.grad_fn is None:
            #    raise RuntimeError('张量没有梯度函数')
            l.backward()
            # l.sum().backward()
            # print(l.retain_grad)
            j = j + 1
            if j == batch_size:
                j = 0
                # print(net[0].state_dict())
                optimizer.step()
                optimizer.zero_grad()
        print(net[0].state_dict())
        k = 0


# Pytorch自定义层
class my_dense1(nn.Module):
    def __init__(self):
        super(my_dense1, self).__init__()
        # self.params = nn.ParameterList(nn.Parameter(torch.zeros(3,1)) for i in range(1))
        self.params = nn.Parameter(torch.zeros(3, ))

    def forward(self, x):
        x = x - self.params[:]
        return x


class my_dense2(nn.Module):
    def __init__(self):
        super(my_dense2, self).__init__()
        # self.params = nn.ParameterList(nn.Parameter(torch.eye(3)) for i in range(1))
        self.params = nn.Parameter(torch.eye(3))
        # self.params.append(nn.Parameter(torch.eye(3)))

    def forward(self, x):
        print(self.params[:])
        temp = torch.tensor([x[0], x[1], x[2]])
        temp = temp.to(torch.float32)
        # print(temp.t())
        return torch.matmul(self.params[:], temp)


class my_dense3(nn.Module):
    def __init__(self):
        super(my_dense3, self).__init__()
        # self.params = nn.ParameterList(nn.Parameter(torch.eye(3)) for i in range(1))
        self.weight = nn.Parameter(torch.eye(3))
        self.bias = nn.Parameter(torch.zeros(3, ))

    def forward(self, x):
        '''temp = torch.tensor([x[0], x[1], x[2]])-self.bias[:]
        temp = temp.to(torch.float32)
        #print(temp.t())
        return torch.matmul(self.weight[:], temp)'''
        temp = torch.tensor([x[0], x[1], x[2]])
        temp = temp.to(torch.float32)
        # print(temp.t())
        temp2 = torch.matmul(self.weight[:], temp)
        return temp2 - self.bias[:]


class my_dense4(nn.Module):
    def __init__(self):
        super(my_dense4, self).__init__()
        self.bias = nn.Parameter(torch.zeros(3, ))
        # self.bias = nn.Parameter(torch.tensor([-5., -1., -2.]))
        print(type(self.bias))
        print(self.bias)
        '''
        self.bias[0] = -5
        self.bias[1] = -1
        self.bias[2] = -2
        '''

    def forward(self, x):
        '''temp = torch.tensor([x[0], x[1], x[2]])-self.bias[:]
        temp = temp.to(torch.float32)
        #print(temp.t())
        return torch.matmul(self.weight[:], temp)'''
        temp = torch.tensor([x[0], x[1], x[2]])
        temp = temp.to(torch.float32)
        temp2 = torch.tensor([[0.9826, -0.0030, -0.0338], [-0.0041, 0.9671, -0.0096], [-0.0299, -0.0117, 1.0434]])

        # print(temp.t())
        temp2 = torch.matmul(temp2, temp)
        return temp2 - self.bias[:]


class CenteredLayer(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, X):
        return torch.sqrt(sum(X ** 2))


class my_dense_cross(nn.Module):
    def __init__(self):
        super(my_dense_cross, self).__init__()
        self.weight = nn.Parameter(torch.eye(3))
        self.bias = nn.Parameter(torch.zeros(3, ))
        # self.bias = nn.Parameter(torch.tensor([-5., -1., -2.]))
        # print(type(self.bias))
        # print(self.bias)

    def forward(self, x):
        temp = torch.tensor([x[3], x[4], x[5]]) - self.bias[:]
        temp = temp.to(torch.float32)
        temp2 = torch.matmul(self.weight[:], temp)
        temp3 = torch.cross(torch.tensor([x[0], x[1], x[2]]), temp2)
        temp4 = torch.cat([temp2, temp3], dim=0)
        return temp4
        '''temp = torch.tensor([x[0], x[1], x[2]])
        temp = temp.to(torch.float32)
        temp2 = torch.tensor([[0.9826, -0.0030, -0.0338], [-0.0041,  0.9671, -0.0096], [-0.0299, -0.0117,  1.0434]])

        # print(temp.t())
        temp2 = torch.matmul(temp2, temp)
        return temp2 - self.bias[:]'''


class CenteredLayer_cross(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, X):
        '''print(type(torch.sqrt(sum(X[:3]**2))))
        print(type(torch.sqrt(sum(X[3:]**2))))
        print(type([torch.sqrt(sum(X[:3]**2)), torch.sqrt(sum(X[3:]**2))]))
        print([torch.sqrt(sum(X[:3]**2)), torch.sqrt(sum(X[3:]**2))])'''
        temp4 = [torch.sqrt(sum(X[:3] ** 2)), torch.sqrt(sum(X[3:] ** 2))]
        # print(temp4)
        # temp4 = torch.cat([torch.sqrt(sum(X[:3]**2)), torch.sqrt(sum(X[3:]**2))], dim=0)
        # temp4.requires_grad_(True)
        # temp4 = torch.sqrt(sum(X[:3] ** 2))
        return temp4


'''net = my_dense1()
print(net)
net = my_dense2()
print(net)

# 构建神经网络模型
mix_net = nn.Sequential(
    my_dense1(),
    my_dense2(),
    CenteredLayer(),
)
'''
# 构建神经网络模型
mix_net = nn.Sequential(
    my_dense_cross(),
    CenteredLayer_cross(),
)
# rain_iter = zip(train_X,train_y)
# test_iter = zip(test_X,test_y)
# lr, num_epochs = 0.0001, 3
# lr, num_epochs = 0.0000001,1000#my_dense3
# train_my3(mix_net, train_X, train_y, test_X, test_y, num_epochs, lr, d2l.try_gpu())
lr, num_epochs, batch_size = 0.0001, 100, 10
train_my_cross(mix_net, train_X, train_y, test_X, test_y, num_epochs, lr, batch_size, d2l.try_gpu())

k = 0
