import torch
import numpy as np
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR

def compare_files(file1, file2):
    with open(file1, 'r') as f1, open(file2, 'r') as f2:
        content1 = f1.read()
        content2 = f2.read()

    num_differences = 0
    for line1, line2 in zip(content1.splitlines(), content2.splitlines()):
        if line1 != line2:
            num_differences += 1

    return num_differences

def fizz_buzz_encode(i):
    if i % 15 == 0:
        return 3
    elif i % 5 == 0:
        return 2
    elif i % 3 == 0:
        return 1
    else:
        return 0


def fizz_buzz_decode(i, prediction):
    return [str(i), "fizz", "buzz", "fizzbuzz"][prediction]

NUM_DIGITS = 10
BATCH_SIZE = 128

def binary_encode(i, num_digits):
    return np.array([i >> d & 1 for d in range(num_digits)][::-1])

trX = torch.Tensor([binary_encode(i, NUM_DIGITS) for i in range(1, 101)])
trY = torch.LongTensor([fizz_buzz_encode(i) for i in range(1, 101)])

model = torch.nn.Sequential(
    torch.nn.Linear(NUM_DIGITS, 512),
    torch.nn.ReLU(),
    torch.nn.Linear(512, 256),
    torch.nn.ReLU(),
    torch.nn.Linear(256, 4)
)

print('cuda: ', torch.cuda.is_available())

if torch.cuda.is_available():
    device = torch.device("cuda")
else:
    device = torch.device("cpu")

model = model.to(device)
loss_fn = torch.nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
scheduler = StepLR(optimizer, step_size=2000, gamma=0.5)

flg = 0

for epoch in range(5000):
    flg = 0
    for start in range(0, len(trX), BATCH_SIZE):
        end = start + BATCH_SIZE
        batchX = trX[start:end].to(device)
        batchY = trY[start:end].to(device)

        y_pred = model(batchX)
        loss = loss_fn(y_pred, batchY)

        if epoch % 100 == 0 and flg == 0:
            lr = scheduler.get_last_lr()[0]
            print("epoch", epoch, "lr", lr, "loss", loss.item())
            flg = 1

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    scheduler.step()

testX = torch.Tensor([binary_encode(i, NUM_DIGITS) for i in range(1, 101)]).to(device)

with torch.no_grad():
    testY = model(testX)

predictions = zip(range(1, 101), testY.max(1)[1].cpu().data.tolist())

file_path = 'output.txt'
with open(file_path, 'w') as file:
    for i, x in predictions:
        output = fizz_buzz_decode(i, x) + '\n'
        file.write(output)

print("Number of differences: ", compare_files("simple.txt", "output.txt"))
