File size: 1,536 Bytes
b7edc7f
bddac60
b7edc7f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import torch
import gradio as gr
from llmtuner import create_ui

create_ui().queue().launch(share=True)

from llmtuner import run_exp
run_exp(dict(
  stage="sft",
  do_train=True,
  model_name_or_path="Qwen/Qwen1.5-0.5B-Chat",
  dataset="identity,alpaca_gpt4_en,alpaca_gpt4_zh",
  template="qwen",
  finetuning_type="lora",
  lora_target="all",
  output_dir="test_identity",
  per_device_train_batch_size=4,
  gradient_accumulation_steps=4,
  lr_scheduler_type="cosine",
  logging_steps=10,
  save_steps=100,
  learning_rate=1e-4,
  num_train_epochs=5.0,
  max_samples=500,
  max_grad_norm=1.0,
  fp16=True,
))

from llmtuner import ChatModel
chat_model = ChatModel(dict(
  model_name_or_path="Qwen/Qwen1.5-0.5B-Chat",
  adapter_name_or_path="test_identity",
  finetuning_type="lora",
  template="qwen",
))
messages = []
while True:
  query = input("\nUser: ")
  if query.strip() == "exit":
    break
  if query.strip() == "clear":
    messages = []
    continue

  messages.append({"role": "user", "content": query})
  print("Assistant: ", end="", flush=True)
  response = ""
  for new_text in chat_model.stream_chat(messages):
    print(new_text, end="", flush=True)
    response += new_text
  print()
  messages.append({"role": "assistant", "content": response})

from llmtuner import export_model
export_model(dict(
  model_name_or_path="Qwen/Qwen1.5-0.5B-Chat",
  adapter_name_or_path="test_identity",
  finetuning_type="lora",
  template="qwen",
  export_dir="test_exported",
  # export_hub_model_id="your_hf_id/test_identity",
))