import pandas as pd
import torch
import time
from torch.utils.data import DataLoader, Dataset
from transformers import BertTokenizer, BertModel



def load_data():
    params = {
        "sep": "::",
        "header": None,
        "engine": "python"
    }
    movie_data_path = "./data/ml-1m/movies.dat"
    movie_head = ['movie_id', 'name', 'genre']
    movie_data = pd.read_csv(movie_data_path, names=movie_head, **params)

    user_data_path = "./data/ml-1m/users.dat"
    user_head = ['user_id', 'gender', 'age', 'occupation', 'ZIP']
    user_data = pd.read_csv(user_data_path, names=user_head, **params)

    rating_data_path = "./data/ml-1m/ratings.dat"
    rating_head = ['user_id', 'movie_id', 'rating', 'timestamp']
    rating_data = pd.read_csv(rating_data_path, names=rating_head, **params)

    return user_data, movie_data, rating_data


def preprocess_user_data(user_data: pd.DataFrame):
    gender_map = {'M': 0, 'F': 1}
    user_data["gender"] = user_data["gender"].map(gender_map)
    zip_map = {}
    zip_labels = 0
    for i, zip_code in enumerate(user_data['ZIP'].values):
        if zip_code not in zip_map:
            zip_map[zip_code] = zip_labels
            zip_labels += 1
    user_data['ZIP'] = user_data['ZIP'].map(zip_map)
    maps = {
        'gender': gender_map,
        'ZIP': zip_map
    }
    return user_data, maps


def preprocess_movie_data(movie_data: pd.DataFrame):
    genre_map = {}
    genre_label = 1
    movie_data_copy = movie_data.copy()

    for i, genres in enumerate(movie_data['genre'].values):
        replaced_genres = []
        for genre in genres.split('|'):
            if genre not in genre_map:
                genre_map[genre] = genre_label
                genre_label += 1
            replaced_genres.append(genre_map[genre])
        movie_data_copy.at[i, 'genre'] = replaced_genres  # replace genre with label
        movie_data_copy.at[i, 'name'] = movie_data.at[i, 'name'][:-7]  # remove year from movie name
    movie_name = movie_data_copy['name'].values.tolist()
    end = time.time()
    movie_name_tensor = movie_name_to_tensor(movie_name, torch.device("cuda" if torch.cuda.is_available() else "cpu"))
    print(f"Time taken to convert movie name to tensor: {time.time() - end:.4f}s")
    movie_data_copy['name'] = movie_name_tensor
    maps = {
        'genre': genre_map
    }
    return movie_data_copy, maps

def preprocess_data():
    user_data, movie_data, rating_data = load_data()
    processed_user_data, user_dict = preprocess_user_data(user_data)

    n_user = max(processed_user_data["user_id"]) + 1
    n_gender = len(user_dict['gender'])
    n_zip = len(user_dict['ZIP'])
    n_age = max(processed_user_data["age"]) + 1
    n_job = max(processed_user_data["occupation"]) + 1


    processed_movie_data, movie_dict = preprocess_movie_data(movie_data)

    n_movie = max(processed_movie_data["movie_id"]) + 1
    n_genre = len(movie_dict['genre']) + 1

    n_statistic = {
        'n_user': n_user,
        'n_gender': n_gender,
        'n_zip': n_zip,
        'n_age': n_age,
        'n_job': n_job,
        'n_movie': n_movie,
        'n_genre': n_genre
    }

    merge_data = pd.merge(pd.merge(rating_data, processed_user_data, on='user_id'), processed_movie_data, on='movie_id')
    save_data = {
        'merge_data': merge_data,
        'n_statistic': n_statistic,
        'user_dict': user_dict,
        'movie_dict': movie_dict,
        'processed_user_data': processed_user_data,
        'processed_movie_data': processed_movie_data
    }
    torch.save(save_data, 'preprocessed_data.pt')
    print("Data saved successfully")


class MovieNameDataset(Dataset):
    def __init__(self, movie_names: list[str], tokenizer, max_length: int):
        self.movie_names = movie_names
        self.tokenizer = tokenizer
        self.max_length = max_length

    def __len__(self):
        return len(self.movie_names)

    def __getitem__(self, idx):
        encoded_input = self.tokenizer(self.movie_names[idx], truncation=True, return_tensors='pt')
        input_ids = encoded_input['input_ids'].squeeze(0)
        attention_mask = encoded_input['attention_mask'].squeeze(0)
        if len(input_ids) < self.max_length:
            # Padding
            padding_length = self.max_length - len(input_ids)
            input_ids = torch.cat([input_ids, torch.zeros(padding_length, dtype=torch.long)], dim=0)
            attention_mask = torch.cat([attention_mask, torch.zeros(padding_length, dtype=torch.long)], dim=0)
        else:
            input_ids = input_ids[:self.max_length]
            attention_mask = attention_mask[:self.max_length]

        return {"input_ids": input_ids, "attention_mask": attention_mask}


@torch.no_grad()
def movie_name_to_tensor(movie_name: list[str], device, batch_size: int = 8, num_worker: int = 4, max_length: int = 32):
    tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
    model = BertModel.from_pretrained("bert-base-uncased",
                                      ignore_mismatched_sizes=True,
                                      max_position_embeddings=max_length).to(device)
    model.eval()

    dataset = MovieNameDataset(movie_name, tokenizer, max_length)
    dataloader = DataLoader(dataset, batch_size=batch_size, pin_memory=True, num_workers=num_worker)
    tensor_name = []
    for batch in dataloader:
        batch = {key: value.to(device) for key, value in batch.items()}
        outputs = model(**batch)
        last_hidden_state = outputs.last_hidden_state
        cls_embedding = last_hidden_state[:, 0, :]
        cls_embedding = cls_embedding.squeeze(1)
        for i in range(cls_embedding.shape[0]):
            tensor_name.append(cls_embedding[i].cpu())
    return tensor_name



if __name__ == "__main__":
    test_text = "Toy Story"
