hiieu commited on
Commit
781ea7a
1 Parent(s): 550c910

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +51 -0
README.md CHANGED
@@ -12,6 +12,57 @@ tags:
12
  - sft
13
  ---
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  # Uploaded model
16
 
17
  - **Developed by:** hiieu
 
12
  - sft
13
  ---
14
 
15
+ functions_metadata = [
16
+ {
17
+ "type": "function",
18
+ "function": {
19
+ "name": "get_temperature",
20
+ "description": "get temperature of a city",
21
+ "parameters": {
22
+ "type": "object",
23
+ "properties": {
24
+ "city": {
25
+ "type": "string",
26
+ "description": "name"
27
+ }
28
+ },
29
+ "required": [
30
+ "city"
31
+ ]
32
+ }
33
+ }
34
+ }
35
+ ]
36
+
37
+ ```python
38
+ messages = [
39
+ { "role": "user", "content": f"""Bạn là một trợ lý hữu ích có quyền truy cập vào các chức năng sau. Sử dụng chúng nếu cần -\n{str(functions_metadata)}"""},
40
+ { "role": "user", "content": "What is the temperature in Tokyo right now?"},
41
+ # You will get the previous prediction, extract it will the tag <functioncall>
42
+ # execute the function and append it to the messages like below:
43
+ { "role": "assistant", "content": """<functioncall> {"name": "get_temperature", "arguments": '{"city": "Tokyo"}'} </functioncall>"""},
44
+ { "role": "user", "content": """<function_response> {"temperature":30 C} </function_response>"""}
45
+ ]
46
+
47
+ input_ids = tokenizer.apply_chat_template(
48
+ messages,
49
+ add_generation_prompt=True,
50
+ return_tensors="pt"
51
+ ).to(model.device)
52
+
53
+
54
+ outputs = model.generate(
55
+ input_ids,
56
+ max_new_tokens=256,
57
+ do_sample=True,
58
+ temperature=0.6,
59
+ top_p=0.9,
60
+ )
61
+ response = outputs[0][input_ids.shape[-1]:]
62
+ print(tokenizer.decode(response, skip_special_tokens=True))
63
+ # >> The current temperature in Tokyo is 30 degrees Celsius.
64
+ ```
65
+
66
  # Uploaded model
67
 
68
  - **Developed by:** hiieu