import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Dataset
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd 
import time

# 检查CUDA是否可用
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 检查 PyTorch 版本和 CUDA 是否可用
print(f"PyTorch Version: {torch.__version__}")
print(f"CUDA Available: {torch.cuda.is_available()}")
print(f"CUDA Version: {torch.version.cuda}")

# 如果 CUDA 可用，显示 GPU 信息
if torch.cuda.is_available():
    print(f"GPU Device: {torch.cuda.get_device_name(0)}")
    print(f"GPU Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.2f} GB")

def generate_random_image(size):
    random_data = torch.rand(size)
    return random_data

def generate_random_seed(size):
    random_seed = torch.randn(size)
    return random_seed

# 定义判别器
class Discriminator(nn.Module):
    def __init__(self):
        # 初始化
        super().__init__()

        # 定义模型
        self.model = nn.Sequential(
            nn.Linear(784,200),
            nn.LeakyReLU(0.02),

            nn.LayerNorm(200),

            nn.Linear(200,1),
            nn.Sigmoid()
        )
        
        # 定义损失函数
        # self.loss_fn = nn.MSELoss()
        self.loss_fn = nn.BCELoss()    

        # 定义优化器
        # self.optimizer = torch.optim.SGD(self.model.parameters(), lr=0.01)
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=0.001)

        # 定义计数器和进度
        self.counter = 0
        self.progress = []

        # 将模型移到GPU
        self.to(device)

    def forward(self, inputs):
        # 确保输入数据在GPU上
        inputs = inputs.to(device)
        return self.model(inputs)   
    
    def train(self, inputs, targets):
        # 运行模型
        outputs = self.forward(inputs)
        # 计算损失
        loss = self.loss_fn(outputs, targets)   

        # 更新计数器和进度
        self.counter += 1
        if self.counter % 10 == 0:
            self.progress.append(loss.item())
        if (self.counter % 10000 == 0):
            print("counter: ", self.counter)
        
        # 反向传播
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

        return loss
    
    def plot_progress(self):
        df = pd.DataFrame(self.progress, columns=['loss'])
        df.plot(ylim=(0, 1.0), figsize=(16,8), alpha=0.1, marker='.', grid=True, yticks=(0, 0.25, 0.5, 0.75, 1.0))
        plt.show()  
        pass


class Generator(nn.Module):
    def __init__(self):
        super().__init__()
        
        # 定义模型
        self.model = nn.Sequential(
            nn.Linear(100, 200),
            nn.LeakyReLU(0.02),

            nn.LayerNorm(200),

            nn.Linear(200, 784),
            nn.Sigmoid()
        )

        # 定义损失函数
        # self.loss_fn = nn.MSELoss()
        self.loss_fn = nn.BCELoss()  

        # 定义优化器
        # self.optimizer = torch.optim.SGD(self.model.parameters(), lr=0.01)
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=0.001)

        # 定义计数器和进度
        self.counter = 0
        self.progress = []  

        # 将模型移到GPU
        self.to(device)

    def forward(self, inputs):
        # 运行模型
        outputs = self.model(inputs)
        return outputs
    
    def train(self, D, inputs, targets):
        # 计算网络输出  
        g_outputs = self.forward(inputs)
        # 计算判别器输出
        d_outputs = D.forward(g_outputs)
        # 计算损失
        loss = self.loss_fn(d_outputs, targets) 

        # 更新计数器和进度
        self.counter += 1
        if self.counter % 10 == 0:
            self.progress.append(loss.item())

        # 反向传播
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

        return loss 
    
    def plot_progress(self):
        df = pd.DataFrame(self.progress, columns=['loss'])
        df.plot(ylim=(0, 1.0), figsize=(16,8), alpha=0.1, marker='.', grid=True, yticks=(0, 0.25, 0.5, 0.75, 1.0))
        plt.show()  
        pass

class MnistDataset(Dataset):    
    def __init__(self, file_path):
        # 读取CSV文件
        # 读取数据，第一列是标签，其他列是像素值
        self.data_df = pd.read_csv(file_path, header=None)
        pass

    def __len__(self):
        return len(self.data_df)
    
    def __getitem__(self, idx):
        # 获取标签  
        label = self.data_df.iloc[idx, 0]
        # 创建目标向量
        target = torch.zeros(10)
        # 设置目标向量
        target[label] = 1.0
        # 获取图像数据，取值范围0-255，转换为0-1
        image_values = torch.FloatTensor(self.data_df.iloc[idx, 1:].values)/255.0

        # 返回标签、图像数据和目标向量  
        return label, image_values, target
    pass

    def plot_image(self, idx):
        # 获取图像数据
        arr = self.data_df.iloc[idx, 1:].values.reshape(28, 28)
        # 显示图像
        plt.title("laber = "+ str(self.data_df.iloc[idx, 0]))
        plt.imshow(arr,  interpolation='none', cmap='Blues')
        plt.show()
        pass

mnist_dataset = MnistDataset('mnist_test_10000.csv')
    
D = Discriminator()
G = Generator()

# 开始时间
start_time = time.time()

for label, image_data_tensor, target_tensor in mnist_dataset:
    # 将数据移到GPU
    image_data_tensor = image_data_tensor.to(device)
    
    # 用真实数据训练判别器
    D.train(image_data_tensor, torch.FloatTensor([1.0]).to(device))

    # 用生成器生成的数据训练判别器
    D.train(G.forward(generate_random_seed(100).to(device).detach()), 
            torch.FloatTensor([0.0]).to(device))

    # 训练生成器
    G.train(D, generate_random_seed(100).to(device), 
            torch.FloatTensor([1.0]).to(device))
    pass    

# 结束时间
end_time = time.time()
print("训练时间：", end_time - start_time)

# 绘制损失函数
D.plot_progress()   
# 绘制生成器损失函数
G.plot_progress()

# 绘制生成器生成的图像
f, axarr = plt.subplots(2, 3, figsize=(16, 8))
for i in range(2):
    for j in range(3):
        output = G.forward(generate_random_seed(100).detach())
        img = output.detach().numpy().reshape(28, 28)
        axarr[i, j].imshow(img,  interpolation='none', cmap='Blues')
        axarr[i, j].axis('off')
plt.show()

# 添加内存清理
def clear_cuda_memory():
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        
# 在训练循环结束后添加
clear_cuda_memory()










