#!/usr/bin/env python3

# Copyright © 2025 Wenze Wei
#
# This file is part of Pisces L1.
#
# Licensed under the Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0).
# You may not use this file except in compliance with the License.
# Commercial use is strictly prohibited.
# You may obtain a copy of the License at
#
#     https://creativecommons.org/licenses/by-nc/4.0/
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os

def setup_device(device_pref):
    """
    Set up the computing device based on the given preference.

    Args:
        device_pref (str): Device preference. If "auto", automatically select CUDA if available, otherwise use CPU.
                           Otherwise, use the specified device.

    Returns:
        torch.device: The selected computing device.
    """
    import torch
    # Determine the device based on the preference
    if device_pref == "auto":
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    else:
        device = torch.device(device_pref)
    print(f"✅\tUsing device: {device}")
    # Print GPU information if available
    if torch.cuda.is_available():
        print(f"✅\tGPU: {torch.cuda.get_device_name(0)}")
        print(f"✅\tGPU Memory: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f} GB")
    else:
        print("❌\tNo GPU available, using CPU")
    return device

def infer(args):
    """
    Perform inference using the Pisces model.

    Args:
        args: Command line arguments containing parameters such as model size, checkpoint path, prompt, etc.

    Returns:
        None: Prints the generated response to the console.
    """
    import torch
    from PIL import Image
    from model.tokenizer import get_tokenizer
    from model import PiscesModel, PiscesConfig
    from transformers import BitsAndBytesConfig
    from torchvision.transforms import functional as TF
    import torch.nn.functional as F
    print("✅\tStarting Pisces L1 Inference ...")
    # Set up the computing device
    device = setup_device("auto")
    
    # Get the model size from arguments, default to "0.5B"
    model_size = getattr(args, "model_size", "0.5B").upper()
    # Load the model configuration from the JSON file
    cfg = PiscesConfig.from_json(f"configs/{model_size}.json")
    # Automatic 4-bit/LoRA/mixed precision inference
    quant_config = BitsAndBytesConfig(
        load_in_4bit=True,
        bnb_4bit_compute_dtype=torch.bfloat16,
        bnb_4bit_quant_type="nf4",
        bnb_4bit_use_double_quant=True
    )
    # Initialize the Pisces model
    model = PiscesModel(cfg, quantization_config=quant_config)
    lora_used = False
    if args.ckpt:
        print(f"✅\tLoading model: {args.ckpt}")
        # Load the checkpoint
        checkpoint = torch.load(args.ckpt, map_location=device)
        # Get the model state dictionary from the checkpoint
        state_dict = checkpoint['model'] if 'model' in checkpoint else checkpoint

        # Get the vocabulary size from the checkpoint
        ckpt_vocab_size = state_dict['embed.weight'].shape[0] if 'embed.weight' in state_dict else None
        # Get the vocabulary size of the model
        model_vocab_size = model.embed.weight.shape[0]
        # Resize the token embeddings if the vocabulary sizes mismatch
        if ckpt_vocab_size and ckpt_vocab_size != model_vocab_size:
            print(f"🟧\tVocab size mismatch: checkpoint={ckpt_vocab_size}, model={model_vocab_size}. Auto resizing...")
            model.resize_token_embeddings(ckpt_vocab_size)

        # Check if the checkpoint contains LoRA keys
        lora_keys = [k for k in state_dict.keys() if k.startswith('base_model.model.') or '.lora_A.' in k or '.lora_B.' in k]
        if lora_keys:
            from peft import get_peft_model, LoraConfig, TaskType
            print("✅\tDetected LoRA/QLoRA checkpoint, wrapping PiscesModel with LoRA config...")
            # Configure the LoRA parameters
            lora_config = LoraConfig(
                r=8, lora_alpha=32, target_modules=["q_proj", "v_proj", "o_proj"],
                lora_dropout=0.05, bias="none", task_type=TaskType.CAUSAL_LM
            )
            # Wrap the model with LoRA
            lora_model = get_peft_model(model, lora_config)
            # Copy necessary attributes from the original model to the LoRA model
            for attr in ["cfg", "quantization_config", "lora_config", "forward", "prepare_inputs_for_generation"]:
                if hasattr(model, attr):
                    setattr(lora_model, attr, getattr(model, attr))
            model = lora_model
            lora_used = True
        # Move the model to the target device and set it to evaluation mode
        model = model.to(device).eval()
        # Load the model state dictionary
        model.load_state_dict(state_dict, strict=False)
        print("✅\tModel loaded successfully")
    else:
        # Move the model to the target device and set it to evaluation mode
        model = model.to(device).eval()
        print("❌\tNo model file provided, using random weights")
    print("✅\tLoading Pisces BPETokenizer...")
    # Load the tokenizer
    tokenizer = get_tokenizer()
    print("✅\tPisces BPETokenizer loaded successfully")
    print(f"✅\tProcessing prompt: {args.prompt}")
    # Encode the input prompt
    input_ids = tokenizer.encode(args.prompt, return_tensors="pt").to(device)
    pixel_values = None
    if args.image and os.path.exists(args.image):
        print(f"✅\tProcessing image: {args.image}")
        try:
            # Open, convert, and resize the image
            img = Image.open(args.image).convert("RGB").resize((224, 224))
            # Convert the image to tensor and move it to the target device
            pixel_values = TF.to_tensor(img).unsqueeze(0).to(device)
            print("✅\tImage processed successfully")
        except Exception as e:
            print(f"❌\tError processing image: {e}")
            pixel_values = None
    print("✅\tGenerating response (Automatic blocking/Mixed precision/4-bit)...")
    # Get the maximum generation length from arguments, default to 100
    max_gen_len = getattr(args, 'max_length', 100)
    
    # Get the length of the input prompt
    prompt_len = input_ids.shape[1]
    # Set the top-k and top-p sampling parameters based on the prompt length
    if prompt_len < 20:
        top_k = 40
        top_p = 0.9
    else:
        top_k = 20
        top_p = 0.8
    # Determine the chunk size for processing
    chunk_size = min(getattr(cfg, 'max_position_embeddings', 2048), 512)
    generated_ids = []
    
    # Set up the automatic mixed precision context
    if hasattr(torch, "amp") and hasattr(torch.amp, "autocast"):
        autocast_ctx = torch.amp.autocast("cuda", dtype=torch.bfloat16)
    else:
        autocast_ctx = torch.cuda.amp.autocast(dtype=torch.bfloat16)
    with torch.no_grad(), autocast_ctx:
        cur_input = input_ids
        for _ in range(max_gen_len):
            logits_chunks = []
            # Process the input in chunks
            for i in range(0, cur_input.shape[1], chunk_size):
                chunk = cur_input[:, i:i+chunk_size]
                outputs = model(chunk, images=pixel_values)
                logits = outputs["logits"]
                logits_chunks.append(logits)
            # Concatenate the logits chunks
            logits = torch.cat(logits_chunks, dim=1)
            # Get the logits of the next token
            next_token_logits = logits[:, -1, :]
            
            # Clone the next token logits for filtering
            filtered_logits = next_token_logits.clone()
            # Apply top-k sampling
            if top_k > 0:
                top_k = min(top_k, filtered_logits.size(-1))
                values, _ = torch.topk(filtered_logits, top_k)
                min_values = values[:, -1].unsqueeze(-1)
                filtered_logits[filtered_logits < min_values] = -float('Inf')
            # Apply top-p sampling
            if 0 < top_p < 1.0:
                sorted_logits, sorted_indices = torch.sort(filtered_logits, descending=True)
                cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
                sorted_indices_to_remove = cumulative_probs > top_p
                if sorted_indices_to_remove[..., 1:].size(-1) > 0:
                    sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
                sorted_indices_to_remove[..., 0] = 0
                indices_to_remove = sorted_indices[sorted_indices_to_remove]
                filtered_logits[0, indices_to_remove] = -float('Inf')
            # Convert the filtered logits to probabilities
            probs = F.softmax(filtered_logits, dim=-1)
            # Sample the next token
            next_token = torch.multinomial(probs, num_samples=1)
            # Break if the end-of-sequence token is generated
            if next_token.item() == tokenizer.eos_token_id:
                break
            generated_ids.append(next_token.item())
            # Concatenate the generated token to the current input
            cur_input = torch.cat([cur_input, next_token], dim=1)
    # Combine the input IDs and generated IDs
    output_ids = input_ids[0].tolist() + generated_ids
    # Decode the output IDs to text
    generated_text = tokenizer.decode(output_ids, skip_special_tokens=True)
    print("\n" + "="*50)
    print("✅\tGenerated Response:")
    print("="*50)
    print(generated_text)
    print("="*50)
