import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm

from eval.evaluation import evaluate
from models.tgat import TGAT
from utils.dataset import LocationTweetDataset, load_geoloc, tweets_collate_fn


def location_regularization(preds, labels, class_lat_median, class_lon_median):
    return 0.0


def train(model, optimizer, dataloader, graph, graph_feat, class_lat_median, class_lon_median):
    model.train()
    total_loss = 0
    batch_num = len(dataloader)
    y_preds = []
    y_labels = []
    for users, tweets, tweets_num, labels in tqdm(dataloader):
        users = users.cuda()
        tweets = tweets.cuda()
        optimizer.zero_grad()
        pred = model(graph, graph_feat, users, tweets, tweets_num)
        loss = model.loss(pred, torch.LongTensor(labels).cuda())
        l2_loss = 0
        for param in model.parameters():
            l2_loss += torch.sum(torch.norm(param, 2))
        loss = loss + l2_loss * 0.0005 + location_regularization(pred, labels, class_lat_median, class_lon_median)
        loss.backward()

        total_loss += loss
        optimizer.step()

        y_preds.extend(pred)
        y_labels.extend(labels)
    print('Training Loss: {}'.format(total_loss / batch_num))
    y_preds = torch.stack(y_preds, dim=0)
    acc, median, mean = evaluate(y_preds, y_labels, class_lat_median, class_lon_median)

    print(' - {header:12} loss: {loss: 8.5f}, acc@161: {acc:3.3f} %, '
          'median: {median:3.3f}, mean: {mean:3.3f}'
          .format(header=f"({'__Train__'})", loss=total_loss / batch_num, acc=100 * acc,
                  median=median, mean=mean))


def eval(model, dataloader, graph, graph_feat, class_lat_median, class_lon_median):
    model.eval()
    total_loss = 0
    batch_num = len(dataloader)
    y_preds = []
    y_labels = []
    with torch.no_grad():
        for users, tweets, tweets_num, labels in tqdm(dataloader):
            users = users.cuda()
            tweets = tweets.cuda()
            pred = model(graph, graph_feat, users, tweets, tweets_num)
            loss = model.loss(pred, torch.LongTensor(labels).cuda())
            total_loss += loss

            y_preds.extend(pred)
            y_labels.extend(labels)
        y_preds = torch.stack(y_preds, dim=0)
        acc, median, mean = evaluate(y_preds, y_labels, class_lat_median, class_lon_median)

        print(' - {header:12} loss: {loss: 8.5f}, acc@161: {acc:3.3f} %, '
              'median: {median:3.3f}, mean: {mean:3.3f}'
              .format(header=f"({'__Test__'})", loss=total_loss / batch_num, acc=100 * acc,
                      median=median, mean=mean))


def run(config):
    # -------------- Loading dataset -----------------

    dataset = config['dataset']
    train_dataset = LocationTweetDataset('../dataset', dataset, suffix='train')
    test_dataset = LocationTweetDataset('../dataset', dataset, suffix='test')
    valid_dataset = LocationTweetDataset('../dataset', dataset, suffix='valid')

    train_dataloader = DataLoader(train_dataset,
                                  batch_size=config['batch_size'],
                                  shuffle=True,
                                  collate_fn=tweets_collate_fn)
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=config['batch_size'],
                                 shuffle=False,
                                 collate_fn=tweets_collate_fn)
    valid_dataloader = DataLoader(valid_dataset,
                                  batch_size=config['batch_size'],
                                  shuffle=False,
                                  collate_fn=tweets_collate_fn)

    class_lat_median, class_lon_median = load_geoloc('../dataset', dataset)
    graph = train_dataset.graph
    graph_feat = train_dataset.graph_feat

    # -------------- Loading model -----------------
    config['d_user_feat'] = graph_feat.shape[1]
    model = TGAT(config=config).cuda()
    optimizer = optim.Adam(params=model.parameters(), lr=config['lr'])

    # -------------- Training model -----------------

    for epoch in range(1, config['epoches'] + 1):
        print('------------- Epoch {}/{} -------------'.format(epoch, config['epoches']))
        train(model, optimizer, train_dataloader, graph, graph_feat, class_lat_median, class_lon_median)
        eval(model, test_dataloader, graph, graph_feat, class_lat_median, class_lon_median)
        eval(model, valid_dataloader, graph, graph_feat, class_lat_median, class_lon_median)



if __name__ == '__main__':
    dataset_config = {
        'cmu': 129,
        'twitter-us': 256,
        'twitter-world': 930
    }

    dataset = 'cmu'
    config = {
        'input_type': 'network',
        'dataset': dataset,
        'd_model': 256,
        'd_output': dataset_config[dataset],
        'lr': 0.001,
        'batch_size': 16,
        'epoches': 100,
    }
    run(config)
