"""
CLI Chat Demo for GLM-4-9B/32B Models with Transformers Backend

This script allows users to interact with the GLM-4-9B or GLM-4-32B model through a command-line interface.
Model configurations are automatically adjusted based on the specified model size.

Usage:
    python trans_cli_demo_plus.py --model-size [9b|32b] [--model-path MODEL_PATH] [--max-length MAX_LENGTH] [--top-p TOP_P] [--temperature TEMPERATURE]
    python trans_cli_demo_plus.py --model-size "9b" --model-path "/data/models/llm/models/GLM-4-9B-0414"
    python trans_cli_demo_plus.py --model-size "32b" --model-path "/data/models/llm/models/GLM-4-32B-0414"

Arguments:
    --model-size        Required. Specify the model size, either '9b' or '32b'.
    --model-path        Path to the model directory. Default is the predefined MODEL_PATH.
    --max-length        Maximum generation length. Default is 8192.
    --top-p             Top-p sampling parameter. Default is 0.8.
    --temperature       Sampling temperature. Default is 0.6.

Note:
- For 9B model: Uses flash attention and float16 by default (requires flash-attn installation)
- For 32B model: Uses float32 precision without flash attention
- The script handles markdown to plain text conversion for better CLI display
- Press 'exit' or 'quit' to end the session
"""

import argparse
from threading import Thread

import torch
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    StoppingCriteria,
    StoppingCriteriaList,
    TextIteratorStreamer,
)

# Default model path (will be overridden by command-line argument)
DEFAULT_MODEL_PATH = "/data/models/llm/models/GLM-4-9B-0414"

class StopOnTokens(StoppingCriteria):
    def __init__(self, model):
        super().__init__()
        self.model = model

    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
        stop_ids = self.model.config.eos_token_id
        # 确保stop_ids是列表类型
        if not isinstance(stop_ids, list):
            stop_ids = [stop_ids]
        return input_ids[0][-1].item() in stop_ids

def main():
    # Parse command-line arguments
    parser = argparse.ArgumentParser(description='GLM-4 CLI Chat Demo')
    parser.add_argument('--model-size', type=str, required=True, choices=['9b', '32b'],
                       help='Model size specification (9b or 32b)')
    parser.add_argument('--model-path', type=str, default=DEFAULT_MODEL_PATH,
                       help='Path to model directory')
    parser.add_argument('--max-length', type=int, default=8192,
                       help='Maximum generation length')
    parser.add_argument('--top-p', type=float, default=0.8,
                       help='Top-p sampling parameter')
    parser.add_argument('--temperature', type=float, default=0.6,
                       help='Sampling temperature')
    args = parser.parse_args()

    # Load tokenizer
    tokenizer = AutoTokenizer.from_pretrained(args.model_path, trust_remote_code=True)

    # Configure model parameters based on model size
    model_config = {
        "trust_remote_code": True,
        "device_map": "auto"
    }

    if args.model_size == '9b':
        model_config.update({
            "attn_implementation": "flash_attention_2",
            "torch_dtype": torch.float16
        })
    elif args.model_size == '32b':
        model_config.update({
            "torch_dtype": torch.float32
        })

    # Load model
    model = AutoModelForCausalLM.from_pretrained(
        args.model_path,
        **model_config
    ).eval()

    # Initialize chat
    history = []
    stop = StopOnTokens(model)  # 传入model实例

    print(f"\nWelcome to GLM-4-{args.model_size.upper()} CLI Chat (config: max_length={args.max_length}, "
          f"top_p={args.top_p}, temperature={args.temperature})")
    print("Type your message below (enter 'exit' or 'quit' to end):\n")

    while True:
        user_input = input("\nYou: ")
        if user_input.lower() in ["exit", "quit"]:
            break
        history.append([user_input, ""])

        # Format messages with template
        messages = []
        for idx, (user_msg, model_msg) in enumerate(history):
            if idx == len(history) - 1 and not model_msg:
                messages.append({"role": "user", "content": user_msg})
                break
            if user_msg:
                messages.append({"role": "user", "content": user_msg})
            if model_msg:
                messages.append({"role": "assistant", "content": model_msg})

        # Prepare model inputs
        model_inputs = tokenizer.apply_chat_template(
            messages,
            add_generation_prompt=True,
            tokenize=True,
            return_dict=True,
            return_tensors="pt"
        ).to(model.device)

        # Streamer setup
        streamer = TextIteratorStreamer(
            tokenizer=tokenizer,
            timeout=60,
            skip_prompt=True,
            skip_special_tokens=True
        )

        # Generation parameters
        generate_kwargs = {
            "input_ids": model_inputs["input_ids"],
            "attention_mask": model_inputs["attention_mask"],
            "streamer": streamer,
            "max_new_tokens": args.max_length,
            "do_sample": True,
            "top_p": args.top_p,
            "temperature": args.temperature,
            "stopping_criteria": StoppingCriteriaList([stop]),
            "repetition_penalty": 1.2,
            "eos_token_id": model.config.eos_token_id,
        }

        # Start generation thread
        t = Thread(target=model.generate, kwargs=generate_kwargs)
        t.start()

        # Stream output
        print("GLM-4:", end="", flush=True)
        response_buffer = []
        for new_token in streamer:
            if new_token:
                print(new_token, end="", flush=True)
                response_buffer.append(new_token)
        
        # Save final response
        history[-1][1] = "".join(response_buffer).strip()

if __name__ == "__main__":
    main()
