import os
import torch
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
from loguru import logger
import time
import numpy as np
import argparse

from dataset import MovieRating, item_process
from model import TwoTower
from preprocess import preprocess_data

def parse_args():
    parser = argparse.ArgumentParser(description='Movie Recommendation')
    parser.add_argument('--test', action='store_true', default=False)
    parser.add_argument('--ntest', default=1, type=int)
    return parser.parse_args()



def carry_dict_data_to_device(data:dict, device):
    for k, v in data.items():
        data[k] = v.to(device)
    return data


def train(epoch, dataloader, model, optimizer, criterion, device, logger):
    model.train()
    total_loss = []
    loader_size = len(dataloader)
    for i, data in enumerate(dataloader):
        user_info = carry_dict_data_to_device(data['user_info'], device)
        movie_info = carry_dict_data_to_device(data['movie_info'], device)

        target = data['rating']
        target = target.to(device)

        optimizer.zero_grad()
        output, _, _ = model(user_info, movie_info)
        output = output.squeeze()
        loss = criterion(output, target)
        loss.backward()
        total_loss.append(loss.item())
        optimizer.step()
        if (i+1) % 200 == 0:
            logger.info(f'Epoch[{epoch}][{i}/{loader_size}](train) Loss: {loss.item():.4f}')
    logger.info(f'Epoch[{epoch}] Total Train Loss: {sum(total_loss)/len(total_loss):.4f}')

min_mse_loss = 100

@torch.no_grad()
def evaluate(epoch, dataloader, model, criterion, device, logger):
    global min_mse_loss
    model.eval()
    total_loss = []
    loader_size = len(dataloader)
    for i, data in enumerate(dataloader):
        user_info = carry_dict_data_to_device(data['user_info'], device)
        movie_info = carry_dict_data_to_device(data['movie_info'], device)

        target = data['rating']
        target = target.to(device)

        output, _, _ = model(user_info, movie_info)
        output = output.squeeze()
        loss = criterion(output, target)
        total_loss.append(loss.item())
        if (i+1) % 200 == 0:
            logger.info(f'Epoch[{epoch}][{i}/{loader_size}](test) Loss: {loss.item():.4f}')

    total_mse_loss = sum(total_loss)/len(total_loss)
    if total_mse_loss < min_mse_loss:
        min_mse_loss = total_mse_loss
        torch.save(model.state_dict(), 'best_model_params.pt')

    logger.info(f'Epoch[{epoch}] Total Test Loss: {total_mse_loss:.4f}(min: {min_mse_loss:.4f})')

@torch.no_grad()
def inference(model, dataset, logger):
    model.eval()
    true_rating = []
    pred_rating = []
    for i in dataset:
        user_info = i['user_info']
        for k, v in user_info.items():
            user_info[k] = v.unsqueeze(0)
        movie_info = i['movie_info']
        for k, v in movie_info.items():
            movie_info[k] = v.unsqueeze(0)
        rating = i['rating']
        output, _, _ = model(user_info, movie_info)
        output = output.squeeze()
        true_rating.append(rating.item())
        pred_rating.append(round(output.item(), 1))
    logger.info(f'True rating: {true_rating}')
    logger.info(f'Pred rating: {pred_rating}')



def main():
    logger.add("./log/{time}.log")
    if os.path.exists('preprocessed_data.pt') is False:
        preprocess_data()

    data_processed = torch.load('preprocessed_data.pt', weights_only=False)
    logger.info(f'The processed data contains the following keys: {data_processed.keys()}')
    logger.info(f'The processed data statistics are: {data_processed["n_statistic"]}')


    model = TwoTower(
        n_name_token_dim=768,
        embed_dim=128,
        hidden_size=256,
        **data_processed['n_statistic']
    )

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = model.to(device)
    merge_data = data_processed['merge_data']

    if args.test:
        model.load_state_dict(torch.load('best_model_params.pt', weights_only=True))
        model = model.to("cpu")
        merge_data_size = len(merge_data)
        random_idx = np.random.randint(0, merge_data_size, args.ntest).tolist()
        item = merge_data.iloc[random_idx]
        dataset = MovieRating(item)
        inference(model, dataset, logger)
        return

    train_data, test_data = train_test_split(merge_data, test_size=0.2)
    train_dataset = MovieRating(train_data)
    test_dataset = MovieRating(test_data)

    loader_params = {
        'batch_size': 64,
        'pin_memory': True,
        'num_workers': 4
    }

    train_loader = DataLoader(train_dataset, shuffle=True, **loader_params)
    test_loader = DataLoader(test_dataset, shuffle=False, **loader_params)

    criterion = torch.nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    n_epochs = 5
    total_time = time.time()
    for i in range(n_epochs):
        logger.info(f'Epoch[{i}/{n_epochs}]')
        train_time = time.time()
        train(i, train_loader, model, optimizer, criterion, device, logger)
        logger.info(f'Epoch[{i}/{n_epochs}] Training time: {time.time()-train_time:.2f}s')
        test_time = time.time()
        evaluate(i, test_loader, model, criterion, device, logger)
        logger.info(f'Epoch[{i}/{n_epochs}] Testing time: {time.time()-test_time:.2f}s')
    logger.info(f'Total time: {time.time()-total_time:.2f}s')
    torch.save(model.state_dict(), 'last_model_params.pt')

if __name__ == '__main__':
    args = parse_args()
    main()

