import numpy as np
import torch
import matplotlib.pyplot as plt
import random
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
from sklearn.metrics import f1_score

batch_size = 4
learning_rate = 1e-3
max_epoch = 100
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

seed = 100
random.seed(seed)
np.random.seed(seed)
torch.random.manual_seed(seed)
torch.cuda.manual_seed(seed)

def create_data():
    # x1 = np.repeat([1, 4], 100)
    x1 = [1] * 100 + [4] * 100
    x1 = np.array(x1, dtype=np.float64)
    x2 = [1] * 100 + [4] * 100
    x2 = np.array(x2, dtype=np.float64)
    y = [0] * 100 + [1] * 100
    y = np.array(y, dtype=np.int32)
    print(x1)
    x1 += np.random.randn(x1.shape[0]) * 1.0
    x2 += np.random.randn(x2.shape[0]) * 1.0

    # plt.scatter(x1, x2, c=y)
    # plt.show()
    return x1, x2, y

def split_data(x1, x2, y, train_rate, val_rate):
    indices = np.arange(x1.shape[0])
    np.random.shuffle(indices)
    x1 = x1[indices]
    x2 = x2[indices]
    y = y[indices]
    train_size = int(x1.shape[0] * train_rate)
    val_size = int(x1.shape[0] * val_rate)

    train_x1 = x1[:train_size]
    train_x2 = x2[:train_size]
    train_y = y[:train_size]

    val_x1 = x1[train_size:train_size + val_size]
    val_x2 = x2[train_size:train_size + val_size]
    val_y = y[train_size:train_size + val_size]

    test_x1 = x1[train_size + val_size:]
    test_x2 = x2[train_size + val_size:]
    test_y = y[train_size + val_size:]

    return train_x1, train_x2, train_y, val_x1, val_x2, val_y, test_x1, test_x2, test_y

# dataset and dataloader
class MyDataset(Dataset):
    def __init__(self, x1, x2, y):
        super(MyDataset, self).__init__()
        x1 = torch.tensor(x1, dtype=torch.float32)
        x2 = torch.tensor(x2, dtype=torch.float32)
        # x1: [160] x2: [160] -> [160, 2]
        self.x = torch.stack([x1, x2], dim=1)
        self.y = torch.tensor(y)

    def __getitem__(self, index):
        return self.x[index], self.y[index]

    def __len__(self):
        return self.x.shape[0]

# model
class LogisticRegressionModel(nn.Module):
    def __init__(self):
        super(LogisticRegressionModel, self).__init__()
        self.fc = nn.Linear(2, 1)

    def forward(self, x):
        x = self.fc(x)
        x = torch.sigmoid(x)
        x = torch.squeeze(x)
        return x

# train
def train(model, train_dataloader, val_dataloader):
    model.train()
    criterion = nn.BCELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    best_f1 = 0
    for epoch in range(max_epoch):
        for step, batch in enumerate(train_dataloader):
            batch_x, batch_y = batch
            batch_x = batch_x.to(device)
            batch_y = batch_y.to(device)
            pred_y = model(batch_x)
            loss = criterion(pred_y, batch_y.to(torch.float32))

            optimizer.zero_grad
            loss.backward()
            optimizer.step()

            if step % 4 == 0:
                print("Epoch: {:d}, Step: {:d}, Loss: {:.4f}".format(epoch, step, loss.item()))
        f1 = test(model, val_dataloader)
        print("Validation F1: {:.4f}".format(f1))
        if f1 > best_f1:
            best_f1 = f1
            torch.save(model.state_dict(), "logistic_regression_best_model.pt")

def test(model, dataloader):
    model.eval()
    golden_list = []
    predicted_list = []
    for step, batch in enumerate(dataloader):
        batch_x, batch_y = batch
        batch_x = batch_x.to(device)
        batch_y = batch_y.to(device)
        pred_y = model(batch_x)
        # 概率转标签
        pred_y = pred_y >= 0.5
        predicted_list.extend(pred_y.cpu().numpy().tolist())
        golden_list.extend(batch_y.cpu().numpy().tolist())

    f1 = f1_score(golden_list, predicted_list, average="macro")
    return f1


if __name__ == '__main__':
    x1, x2, y = create_data()
    train_x1, train_x2, train_y, val_x1, val_x2, val_y, test_x1, test_x2, test_y = split_data(x1, x2, y, 0.8, 0.1)
    train_dataset = MyDataset(train_x1, train_x2, train_y)
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    val_dataset = MyDataset(val_x1, val_x2, val_y)
    val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
    model = LogisticRegressionModel()
    train(model, train_loader, val_dataloader)
