import argparse
from typing import Dict
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '.'))
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from model import ComplexNetLM

def main(params:Dict = {}) -> None:
    model_id = "PKU-DS-LAB/Fairy-plus-minus-i-700M"
    # Load tokenizer and model
    tokenizer = AutoTokenizer.from_pretrained(model_id,
                                            trust_remote_code=True, device_map="cuda:0")
    model = AutoModelForCausalLM.from_pretrained(
        model_id,
        torch_dtype=torch.bfloat16,
        trust_remote_code=True, device_map="cuda:0"
    )
    #model = ComplexNetLM.from_pretrained(
    #    model_id,
    #    torch_dtype=torch.bfloat16,
    #    trust_remote_code=False
    #)

    # Apply the chat template
    prompt = (
        "System: You are a helpful AI assistant.\n"
        "User: please introduce deep learning technology\n"
        "Assistant:"
    )

    chat_input = tokenizer(prompt, return_tensors="pt").to(model.device)

    # Generate response
    chat_outputs = model.generate(**chat_input, max_new_tokens=50)
    response = tokenizer.decode(chat_outputs[0][chat_input['input_ids'].shape[-1]:], skip_special_tokens=True) # Decode only the response part
    print("\nAssistant Response:", response)

def parse_args() -> argparse.Namespace:
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--run_mode', action='store',
        type=int, default=1, dest='run_mode',
        help='run mode'
    )
    return parser.parse_args()

if '__main__' == __name__:
    args = parse_args()
    params = vars(args)
    main(params=params)

