Upload folder using huggingface_hub

#1
by nold - opened
.gitattributes CHANGED
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Phi-3-mini-4k-instruct-function-calling_Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ Phi-3-mini-4k-instruct-function-calling_Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
38
+ Phi-3-mini-4k-instruct-function-calling_Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ Phi-3-mini-4k-instruct-function-calling_Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
40
+ Phi-3-mini-4k-instruct-function-calling_Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ Phi-3-mini-4k-instruct-function-calling_Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
Phi-3-mini-4k-instruct-function-calling_Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c71b404e958e9c726cbbc2b5aca6b9eb2925f8c5ef2d608fe63693ff0d889029
3
+ size 1416202976
Phi-3-mini-4k-instruct-function-calling_Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b30b5e4cb2403c2fd22dc5ab13ac5180d1cbef77d8f3cc6802c658090bc752fc
3
+ size 1955475680
Phi-3-mini-4k-instruct-function-calling_Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b5cde6edb83851b71bcdf0ca546aa8fbdd65f05643dbed2c88a656157c93a83
3
+ size 2393231072
Phi-3-mini-4k-instruct-function-calling_Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db4a02ed0ec74f46d9c9fc9e905179f934fc0bea0bfc4de0c23f949810030356
3
+ size 2815274720
Phi-3-mini-4k-instruct-function-calling_Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:254508b59e2e3474eb822667b31cad944289f583c26bfc3081ccc7fc8321a7ee
3
+ size 3135851744
Phi-3-mini-4k-instruct-function-calling_Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a01fffebf8bd8e3c3b9d53fffabb583d3d91760171eab164586393765d0d03cc
3
+ size 4061221088
README.md ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ datasets:
3
+ - mzbac/function-calling-phi-3-format-v1.1
4
+ ---
5
+
6
+ # Model
7
+
8
+ Fine-tuned the Phi3 instruction model for function calling via MLX-LM using https://huggingface.co/datasets/mzbac/function-calling-phi-3-format-v1.1
9
+
10
+
11
+ # Usage
12
+ ```python
13
+ from transformers import AutoTokenizer, AutoModelForCausalLM
14
+ import torch
15
+
16
+ model_id = "mzbac/Phi-3-mini-4k-instruct-function-calling"
17
+
18
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
19
+ model = AutoModelForCausalLM.from_pretrained(
20
+ model_id,
21
+ torch_dtype=torch.bfloat16,
22
+ device_map="auto",
23
+ )
24
+
25
+ tool = {
26
+ "name": "search_web",
27
+ "description": "Perform a web search for a given search terms.",
28
+ "parameter": {
29
+ "type": "object",
30
+ "properties": {
31
+ "search_terms": {
32
+ "type": "array",
33
+ "items": {"type": "string"},
34
+ "description": "The search queries for which the search is performed.",
35
+ "required": True,
36
+ }
37
+ },
38
+ },
39
+ }
40
+
41
+ messages = [
42
+ {
43
+ "role": "user",
44
+ "content": f"You are a helpful assistant with access to the following functions. Use them if required - {str(tool)}",
45
+ },
46
+ {"role": "user", "content": "Any news in Melbourne today, May 7, 2024?"},
47
+ ]
48
+
49
+ input_ids = tokenizer.apply_chat_template(
50
+ messages, add_generation_prompt=True, return_tensors="pt"
51
+ ).to(model.device)
52
+
53
+ terminators = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids("<|end|>")]
54
+
55
+ outputs = model.generate(
56
+ input_ids,
57
+ max_new_tokens=256,
58
+ eos_token_id=terminators,
59
+ do_sample=True,
60
+ temperature=0.1,
61
+ )
62
+ response = outputs[0]
63
+ print(tokenizer.decode(response))
64
+
65
+ # <s><|user|> You are a helpful assistant with access to the following functions. Use them if required - {'name': 'search_web', 'description': 'Perform a web search for a given search terms.', 'parameter': {'type': 'object', 'properties': {'search_terms': {'type': 'array', 'items': {'type': 'string'}, 'description': 'The search queries for which the search is performed.', 'required': True}}}}<|end|><|assistant|>
66
+ # <|user|> Any news in Melbourne today, May 7, 2024?<|end|>
67
+ # <|assistant|> <functioncall> {"name": "search_web", "arguments": {"search_terms": ["news", "Melbourne", "May 7, 2024"]}}<|end|>
68
+ ```
69
+
70
+ # Training hyperparameters
71
+ lora_config.yaml
72
+ ```yaml
73
+ # The path to the local model directory or Hugging Face repo.
74
+ model: "microsoft/Phi-3-mini-4k-instruct"
75
+ # Whether or not to train (boolean)
76
+ train: true
77
+
78
+ # Directory with {train, valid, test}.jsonl files
79
+ data: "data"
80
+
81
+ # The PRNG seed
82
+ seed: 0
83
+
84
+ # Number of layers to fine-tune
85
+ lora_layers: 32
86
+
87
+ # Minibatch size.
88
+ batch_size: 1
89
+
90
+ # Iterations to train for.
91
+ iters: 111000
92
+
93
+ # Number of validation batches, -1 uses the entire validation set.
94
+ val_batches: -1
95
+
96
+ # Adam learning rate.
97
+ learning_rate: 1e-6
98
+
99
+ # Number of training steps between loss reporting.
100
+ steps_per_report: 10
101
+
102
+ # Number of training steps between validations.
103
+ steps_per_eval: 200
104
+
105
+ # Load path to resume training with the given adapter weights.
106
+ # resume_adapter_file: "adapters/adapters.safetensors"
107
+
108
+ # Save/load path for the trained adapter weights.
109
+ adapter_path: "adapters"
110
+
111
+ # Save the model every N iterations.
112
+ save_every: 1000
113
+
114
+ # Evaluate on the test set after training
115
+ test: false
116
+
117
+ # Number of test set batches, -1 uses the entire test set.
118
+ test_batches: 100
119
+
120
+ # Maximum sequence length.
121
+ max_seq_length: 4096
122
+
123
+ # Use gradient checkpointing to reduce memory use.
124
+ grad_checkpoint: false
125
+
126
+ # LoRA parameters can only be specified in a config file
127
+ lora_parameters:
128
+ # The layer keys to apply LoRA to.
129
+ # These will be applied for the last lora_layers
130
+ keys: ['mlp.down_proj','mlp.gate_up_proj','self_attn.qkv_proj','self_attn.o_proj']
131
+ rank: 128
132
+ alpha: 256
133
+ scale: 10.0
134
+ dropout: 0.05
135
+ ```
136
+
137
+ ***
138
+
139
+ Quantization of Model [mzbac/Phi-3-mini-4k-instruct-function-calling](https://huggingface.co/mzbac/Phi-3-mini-4k-instruct-function-calling).
140
+ Created using [llm-quantizer](https://github.com/Nold360/llm-quantizer) Pipeline
test.log ADDED
@@ -0,0 +1 @@
 
 
1
+ <s> What is a Large Language Model?<|end|>