import torch
from modelscope import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, LlamaForCausalLM
from peft import TaskType, LoraConfig, get_peft_model
from transformers import LlamaConfig

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# model_name = "Qwen/Qwen3-14B"
model_name = "Qwen/Qwen3-8B"
tokenizer = AutoTokenizer.from_pretrained(model_name)
print(f"词表大小:{tokenizer.vocab_size}")

# # 1.从零开始训练大模型
# config = LlamaConfig() # 创建一个默认的Llama config
# config.num_hidden_layers = 12 # 配置网络结构
# config.hidden_size = 1024
# config.intermediate_size = 4096
# config.num_key_value_heads = 8
# # 用配置文件初始化一个大模型
# model = LlamaForCausalLM(config)

bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_use_double_quant=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_compute_dtype=torch.bfloat16
)
model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=bnb_config, low_cpu_mem_usage=True).to(device)
print(model)

# peft_config = LoraConfig(
#     r=2,
#     target_modules=["q_proj",
#                     "v_proj",
#                     "k_proj",
#                     "o_proj",
#                     "gate_proj",
#                     "down_proj",
#                     "up_proj"
#                     ],
#     task_type=TaskType.CAUSAL_LM,
#     lora_alpha=16,
#     lora_dropout=0.05
# )
#
# model = get_peft_model(model, peft_config)
# model.print_trainable_parameters()

optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)

texts = "the capital of france is"
inputs = tokenizer(texts, return_tensors="pt").to(device)
inputs["labels"] = inputs["input_ids"].clone()
print(f'inputs: {inputs}')

bb = model.generate(**inputs)
print(f'generated: {tokenizer.batch_decode(bb, skip_special_tokens=True)}')

outputs = model(**inputs)
print(outputs)
print(f'outputs.logits.shape: {outputs.logits.shape}')

token_id = outputs.logits[0, -1].argmax(-1)
print(f'token_id: {token_id}')
print(tokenizer.decode(token_id, skip_special_tokens=True))

loss = outputs.loss
loss.backward()
optimizer.step()
optimizer.zero_grad()
model.save_pretrained("./out")
print(f'loss: {loss.item()}')
