import torch
import numpy as np
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torch.utils.tensorboard import SummaryWriter


def generate_linearly_correlated_vectors(m, n, base_vector, factor_range=(0.1, 1.0), noise_level=0.01, seed=None):
    if seed is not None:
        np.random.seed(seed)

    # base_vector = np.random.rand(n)

    vectors = np.zeros((m, n))
    for i in range(m):
        factor = np.random.uniform(factor_range[0], factor_range[1])
        # 添加随机噪声
        noise = np.random.normal(0, noise_level, size=n)
        vectors[i] = base_vector * factor + noise

    return vectors


def generate_random_vectors(m, n, seed=None):
    if seed is not None:
        np.random.seed(seed)

    return np.random.rand(m, n)


def gen_2cls_dataset(num, dim, base_vector, factor_range, noise_level=0.01, seed=None):
    m = num // 2
    labels = np.zeros(num)

    true_data = generate_linearly_correlated_vectors(m, dim, base_vector, factor_range=factor_range, noise_level=noise_level, seed=seed)
    false_data = generate_random_vectors(m, dim, seed=seed)
    dataset = np.vstack((true_data, false_data))
    labels[:m] = 1
    return dataset, labels


class MyDataset(Dataset):
    def __init__(self, vectors, labels):
        super().__init__()
        self.vectors = torch.tensor(vectors, dtype=torch.float32)
        self.labels = torch.tensor(labels, dtype=torch.float32)

    def __len__(self):
        return len(self.vectors)

    def __getitem__(self, item):
        return self.vectors[item], self.labels[item]


class MyModel(nn.Module):
    def __init__(self):
        super().__init__()

        self.model = nn.Sequential(
            nn.Linear(10, 32),
            nn.ReLU(),
            nn.Linear(32, 10),
            nn.ReLU(),
            nn.Linear(10, 1),
            nn.Sigmoid()
        )

    def forward(self, inputs):
        return self.model(inputs).squeeze(-1)


def train():
    seed = None
    num, dim = 20000, 10
    factor_range = (0.1, 1.0)
    noise_level = 0.5

    thr = 0.5

    lr = 0.02
    max_epoch = 100
    batch_size = 256
    weight_decay = 1e-5

    tb_writer = SummaryWriter(comment='_2cls')

    base_vector = np.random.rand(dim)  # 使用相同的 base_vector 生成训练和测试数据

    raw_train_data, raw_train_label = gen_2cls_dataset(num=num, dim=dim, base_vector=base_vector, factor_range=factor_range, noise_level=noise_level, seed=seed)
    train_data = MyDataset(raw_train_data, raw_train_label)
    train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)

    raw_test_data, raw_test_label = gen_2cls_dataset(num=num, dim=dim, base_vector=base_vector, factor_range=factor_range, noise_level=noise_level, seed=seed)
    test_data = MyDataset(raw_test_data, raw_test_label)
    test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False)

    model = MyModel()

    optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
    criterion = torch.nn.BCELoss()

    for epoch in range(max_epoch):
        model.train()
        for i, (vector, label) in enumerate(train_loader):
            out = model(vector)
            loss = criterion(out, label)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            pred_cls = torch.where(out > thr, 1, 0)
            i_right_num = torch.sum(pred_cls == label)

            iters = epoch * len(train_loader) + i
            if i % (len(train_loader) // 10) == 0:  # 每个 epoch 打印 10 次
                print(f"Epoch {epoch}, Iteration {i}: Loss = {loss.item():.6f}")
                tb_writer.add_scalar('train/iter_loss', loss.item(), iters)

        model.eval()
        right_sum = 0
        loss_sum = 0
        total_samples = 0
        with torch.no_grad():
            for j, (vector, label) in enumerate(test_loader):
                out = model(vector)
                loss = criterion(out, label)

                batch_size = label.size(0)
                loss_sum += batch_size * loss.item()
                pred_cls = torch.where(out > thr, 1, 0)
                right_sum += torch.sum(pred_cls == label).item()
                total_samples += batch_size

        avg_loss = loss_sum / total_samples
        epoch_acc = right_sum / total_samples
        tb_writer.add_scalar('test/epoch_loss', avg_loss, epoch)
        tb_writer.add_scalar('test/epoch_acc', epoch_acc, epoch)
        print(f"Epoch {epoch}: Test Accuracy = {epoch_acc}, Test Loss = {avg_loss:.6f}")

    tb_writer.close()


if __name__ == '__main__':
    train()
