import wandb
import numpy as np
import random



# Define training function that takes in hyperparameter values from `wandb.config` and uses them to train a model and return metric
def train_one_epoch(epoch, lr, bs):
  acc = 0.25 + ((epoch/30) +  (random.random()/10))
  loss = 0.2 + (1 - ((epoch-1)/10 +  random.random()/5))
  return acc, loss

def evaluate_one_epoch(epoch):
  acc = 0.1 + ((epoch/20) +  (random.random()/10))
  loss = 0.25 + (1 - ((epoch-1)/10 +  random.random()/6))
  return acc, loss

def main():

    wandb.login()
    run = wandb.init()

    # note that we define values from `wandb.config` instead
    # of defining hard values
    lr  =  wandb.config.lr
    bs = wandb.config.batch_size
    epochs = wandb.config.epochs
    batch_size = wandb.config.batch_size

    for epoch in np.arange(1, epochs):
      wandb.log({
        'epoch': epoch,
        'lr': lr,
        'bs': bs,
          "batch_size":batch_size,
      })

    # for epoch in np.arange(1, epochs):
    #   train_acc, train_loss = train_one_epoch(epoch, lr, bs)
    #   val_acc, val_loss = evaluate_one_epoch(epoch)
    #
    #   wandb.log({
    #     'epoch': epoch,
    #     'train_acc': train_acc,
    #     'train_loss': train_loss,
    #     'val_acc': val_acc,
    #     'val_loss': val_loss
    #   })

if __name__ == '__main__':
    # Define sweep config
    sweep_configuration = {
        'method': 'random',
        'name': 'sweep',
        'metric': {'goal': 'maximize', 'name': 'val_acc'},
        'parameters':
            {
                'batch_size': {'values': [16, 32, 64]},
                'epochs': {'values': [5, 10, 15]},
                'lr': {'values': [1, 10, 100]}
            }
    }

    # Initialize sweep by passing in config. (Optional) Provide a name of the project.
    sweep_id = wandb.sweep(sweep=sweep_configuration, project='my-first-sweep')

    # Start sweep job.
    wandb.agent(sweep_id, function=main, count=4)