import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np

torch.manual_seed(1)
np.random.seed(1)

LR_G = 0.0001
LR_D = 0.0001
BATCH_SIZE = 64
N_IDEAS = 5

ART_COMPONETS = 15
PAINT_POINTS = np.vstack([np.linspace(-1, 1, ART_COMPONETS) for _ in range(BATCH_SIZE)])


# print(PAINT_POINTS[0])


# plt.plot(PAINT_POINTS[0], 2 * np.power(PAINT_POINTS[0], 2) + 1, c='#74BCFF', lw=3, label='upper bound')    #2 * x^2 + 1
# plt.plot(PAINT_POINTS[0], 1 * np.power(PAINT_POINTS[0], 2) + 0, c='#FF9359', lw=3, label='lower bound')    #   x^2
# plt.legend(loc='upper right')           #标签位置
# plt.show()


def artist_work():
    a = np.random.uniform(1, 2, size=BATCH_SIZE)[:, np.newaxis]
    paints = a * np.power(PAINT_POINTS, 2) + (a - 1)
    paints = torch.from_numpy(paints).float()
    return paints


# a=np.random.uniform(1,2,size=BATCH_SIZE)[:,np.newaxis]
# # b = np.random.uniform(1,2,size=BATCH_SIZE)
# # print(a)
# paints = a * np.power(PAINT_POINTS,2) + (a-1)
# # print(paints)

G = nn.Sequential(
    nn.Linear(N_IDEAS, 128),
    nn.ReLU(),
    nn.Linear(128, ART_COMPONETS)
)
D = nn.Sequential(
    nn.Linear(ART_COMPONETS, 128),
    nn.ReLU(),
    nn.Linear(128, 1),
    nn.Sigmoid()
)

optimizer_g = torch.optim.Adam(G.parameters(), lr=LR_G)
optimizer_d = torch.optim.Adam(D.parameters(), lr=LR_D)

plt.ion()

# 防止log时值为0
eps = 1e-6

# 我们希望真数据判别出来的结果是1，损失函数是 MSE 时用到
real_expected_output = torch.ones((BATCH_SIZE,))
# 假数据判别出来的结果希望是0，损失函数是 MSE 时用到
fake_expected_output = torch.zeros((BATCH_SIZE,))
# 网上资料的损失函数用的是log, 对离群点比较好，但是MSE也可以,MSE运算速度快点
criterion = nn.MSELoss()
for step in range(10000):
    # 批量产生指导数据
    real_data = artist_work()
    # 为D网络对于指导数据，评判后的输出结果，肯定越接近1越好
    d_real_output = D(real_data)

    # 即将输入G网络的一组随机数 bach*5
    g_fake_input = torch.randn(BATCH_SIZE, N_IDEAS)
    # G网络根据这组随机数产生一组数据
    g_fake_output = G(g_fake_input)
    # 为D网络对于生成的数据，评判后的输出结果，结果越接近0越好，当然，既然是伪造，也不可能结果真的接近于0，最好是比0.5小点，要不然怎么叫造假 bach*1
    d_fake_output = D(g_fake_output)

    # G的任务就是生成的数据越接近真实越好，G的损失函数就是和1作比较，越接近1越好，但是等于1就错了，我们只是让趋势往1方向去
    g_loss = criterion(real_expected_output, d_fake_output)

    optimizer_g.zero_grad()
    g_loss.backward()
    optimizer_g.step()

    # D的任务要成功判别出真实数据和伪造数据，不能用 d_fake_output.detach()，会导致G网络得不到有效更新，D应该带着G一起更新

    d_loss = criterion(real_expected_output, d_real_output) + criterion(fake_expected_output, D(G(g_fake_input)))
    optimizer_d.zero_grad()
    d_loss.backward(retain_graph=True)
    optimizer_d.step()

    if step % 200 == 0:  # plotting
        print("g_loss", g_loss)
        print("d_loss", d_loss)
