import os
from time import time
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import (
    Dataset,
    DataLoader
)
from rich.console import Console
from rich.traceback import install
from warnings import filterwarnings
from utils import (
    DEVICE,
    Logger,
    jieba_cut
)

install()
filterwarnings('ignore')
console = Console()

class LyricsDataset(Dataset):
    # Initialize with token indices and sequence length
    def __init__(self, corpus_idx: list[str], num_chars: int, device: torch.device=DEVICE):
        self.device = device                             # Device for computation
        self.corpus_idx = corpus_idx                     # Tokenized corpus indices
        self.num_chars = num_chars                       # Sequence length for training
        self.word_count = len(self.corpus_idx)           # Total tokens in corpus
        self.number = self.word_count // self.num_chars  # Number of samples

    # Return total number of training sequences
    def __len__(self):
        return self.number

    # Calculate sequence boundaries with safety checks
    def __getitem__(self, idx: int):
        start = min(max(idx, 0), self.word_count - self.num_chars - 1)
        end = start + self.num_chars
        # Get input sequence and target sequence (shifted by 1)
        x = self.corpus_idx[start: end]          # Input sequence
        y = self.corpus_idx[start + 1: end + 1]  # Target sequence
        # Convert to PyTorch tensors with CPU/GPU
        return torch.tensor(x).to(self.device), torch.tensor(y).to(self.device)

class TextGenerator(nn.Module):
    def __init__(self, word_count: int, device: torch.device=DEVICE):
        super(TextGenerator, self).__init__()
        self.device = device
        # Embedding layer: converts word indices to dense vectors
        self.ebd = nn.Embedding(word_count, 128).to(self.device)
        # RNN layer: processes sequence with 256 hidden units, single layer
        self.rnn = nn.RNN(128, 256, 1).to(self.device)
        # Output layer: maps RNN outputs to vocabulary size predictions
        self.out = nn.Linear(256, word_count).to(self.device)

    def forward(self, inputs, hidden):
        # Convert input indices to embeddings (batch_size x seq_len x emb_dim)
        embed = self.ebd(inputs)
        # Process embeddings through RNN (seq_len x batch_size x hidden_dim)
        output, hidden = self.rnn(embed.transpose(0, 1), hidden)
        # Reshape and project to vocabulary space (seq_len*batch_size x vocab_size)
        output = self.out(output.reshape(shape=(-1, output.shape[-1])))
        return output, hidden

    def init_hidden(self, bs: int):
        # Initialize zero hidden state (num_layers x batch_size x hidden_dim)
        return torch.zeros(1, bs, 256).to(self.device)

def model_train(file_name: str, model, logger: Logger | None=None) -> None:
    """
    Train the model and save the model.
    
    Args:
        file_name (str): File name for training.
        model (Any): Model for training.
        logger (Logger, optional): Logger for logging.
    """
    # Get preprocessed corpus indices from Jieba segmentation
    corpus_idx = jieba_cut(file_name)[3]
    # Initialize dataset and dataloader with sequence length of 32 characters
    lyrics = LyricsDataset(corpus_idx, 32)
    dl = DataLoader(lyrics, batch_size=5, shuffle=True)
    # Define loss function and optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.00001)
    # Training configuration
    num_epochs = 100
    for epoch in range(num_epochs):
        total_loss, total_num, start = 0.0, 0, time()
        # Batch training loop
        for x, y in dl:
            # Initialize hidden state for RNN
            hidden = model.init_hidden(bs=5)
            # Forward pass
            output, hidden = model(x, hidden)
            # Reshape targets for loss calculation
            y = torch.transpose(y, 0, 1).reshape(shape=(-1,))
            # Calculate loss
            loss = criterion(output, y)
            # Backpropagation and optimization
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            # Accumulate metrics
            total_num += 1
            total_loss += loss.item()
        # Logging and progress reporting
        if logger is not None:
            logger.info('epoch: %03d, loss: %.2f, time: %.2fs'
                        % (epoch + 1, total_loss / total_num, time() - start))
        else:
            console.print(
                '[bold green]epoch: [bold cyan]%03d[/],' % (epoch + 1),
                '[bold green]loss: [bold cyan]%.2f[/],' % (total_loss / total_num),
                '[bold green]time: [bold cyan]%.2fs[/]' % (time() - start)
            )
    # Model saving logic
    torch.save(model.state_dict(), './model/jaychou_lyrics.pth')
    info = 'Model `jaychou_lyrics.pth` saved successfuly!'
    logger.info(info) if logger is not None else console.print(info, style='bold green')

def model_predict(file_name: str, start_word: str, sentence_len: int, model) -> None:
    """
    Predict the model.

    Args:
        file_name (str): File name for training.
        start_word (str): Start word for prediction.
        sentence_len (int): Length of the sentence to generate.
        model (Any): Model for prediction.
    """
    # Get vocabulary and word mappings from Jieba processing
    unique_words, word2index, _, _ = jieba_cut(file_name)
    # Initialize hidden state for RNN
    hidden = model.init_hidden(bs=1)
    # Convert starting word to index (ensure word exists in vocabulary)
    word_idx = word2index[start_word]
    # Initialize generated sentence with starting word index
    sentense_gen = [word_idx]
    # Generate sequence of specified length
    for _ in range(sentence_len):
        # Get model prediction for next character
        output, hidden = model(torch.tensor([[word_idx]]).to(DEVICE), hidden)
        # Select most probable next word index
        word_idx = torch.argmax(output).to(DEVICE)
        # Append generated index to sequence
        sentense_gen.append(word_idx)
    # Convert indices to words and print generated text
    for i in sentense_gen:
        console.print(unique_words[i], end='', style='bold blue')

def run(file_name: str, start_word: str, sentence_len: int, logger: Logger | None=None):
    """
    Train and predict the model.

    Args:
        file_name (str): File name for training.
        start_word (str): Start word for prediction.
        sentence_len (int): Length of the sentence to generate.
        logger (Logger): Logger for logging.
    """
    os.makedirs('./model', exist_ok=True)
    model_name = './model/jaychou_lyrics.pth'
    # Get vocabulary size from Jieba processing results
    word_count = jieba_cut(file_name)[2]
    # Initialize model and move to appropriate device (GPU/CPU)
    model = TextGenerator(word_count)
    if not os.path.exists(model_name):
        # Train model and get saved model path
        model_train(file_name, model, logger)
    else:
        error = f'Model `{os.path.basename(model_name)}` already exists!'
        logger.warning(error) if logger is not None else console.print(error, style='bold red')
    model.load_state_dict(torch.load(model_name))
    # Generate lyrics using trained model
    model_predict(file_name, start_word, sentence_len, model)
