Ataliba Miguel commited on
Commit
6a18a16
1 Parent(s): 0709daa

First model version

Browse files
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ data/alpaca_synth_queries_healed.jsonl filter=lfs diff=lfs merge=lfs -text
37
+ adapter_model.safetensors filter=lfs diff=lfs merge=lfs -text
38
+ *.psd filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,13 +1 @@
1
- ---
2
- title: Shit
3
- emoji: ⚡
4
- colorFrom: red
5
- colorTo: green
6
- sdk: streamlit
7
- sdk_version: 1.35.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ This directory contains the jsonl data used to train this model
 
 
 
 
 
 
 
 
 
 
 
 
app.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
4
+
5
+ # Load the model and tokenizer
6
+ model = AutoModelForSequenceClassification.from_pretrained("./", local_files_only=True)
7
+ tokenizer = AutoTokenizer.from_pretrained("gpt2")
8
+
9
+ def classify_text(text):
10
+ inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512)
11
+ with torch.no_grad():
12
+ outputs = model(**inputs)
13
+ logits = outputs.logits
14
+ predicted_class_id = logits.argmax().item()
15
+ return "Proper Naming Notfcn" if predicted_class_id == 1 else "Wrong Naming Notificn"
16
+
17
+ iface = gr.Interface(fn=classify_text, inputs="text", outputs="text", title="Classification Naming", description="Classify naming notifications as proper or wrong.")
18
+ iface.launch()
data/README.md ADDED
@@ -0,0 +1 @@
 
 
1
+ This directory contains the jsonl data used to train this model
data/adapter_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "mistralai/Mistral-7B-v0.1",
5
+ "bias": "none",
6
+ "fan_in_fan_out": null,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layers_pattern": null,
10
+ "layers_to_transform": null,
11
+ "loftq_config": {},
12
+ "lora_alpha": 16,
13
+ "lora_dropout": 0.05,
14
+ "megatron_config": null,
15
+ "megatron_core": "megatron.core",
16
+ "modules_to_save": null,
17
+ "peft_type": "LORA",
18
+ "r": 32,
19
+ "rank_pattern": {},
20
+ "revision": null,
21
+ "target_modules": [
22
+ "gate_proj",
23
+ "v_proj",
24
+ "o_proj",
25
+ "up_proj",
26
+ "q_proj",
27
+ "k_proj",
28
+ "down_proj"
29
+ ],
30
+ "task_type": "CAUSAL_LM"
31
+ }
data/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:151496a8692deab90e4393beefaf341e8f67bdda2df9d2c62e2cf913122aeba5
3
+ size 335604696
data/configs_hc.yml ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: mistralai/Mistral-7B-v0.1
2
+ model_type: MistralForCausalLM
3
+ tokenizer_type: LlamaTokenizer
4
+ is_mistral_derived_model: true
5
+
6
+ load_in_8bit: false
7
+ load_in_4bit: true
8
+ strict: false
9
+
10
+ lora_fan_in_fan_out: false
11
+ data_seed: 49
12
+ seed: 49
13
+
14
+ datasets:
15
+ - path: /teamspace/studios/this_studio/axolotl/finetune_maven/hc-mistral-alpaca/data/alpaca_synth_queries_healed.jsonl
16
+ type: sharegpt
17
+ conversation: alpaca
18
+ dataset_prepared_path: last_run_prepared
19
+ val_set_size: 0.1
20
+ output_dir: ./qlora-alpaca-out
21
+ hub_model_id: valLabsz/clssfcn-naming
22
+ #hub_model_id: hamel/hc-mistral-alpaca
23
+
24
+
25
+ adapter: qlora
26
+ lora_model_dir:
27
+
28
+ sequence_len: 896
29
+ sample_packing: false
30
+ pad_to_sequence_len: true
31
+
32
+ lora_r: 32
33
+ lora_alpha: 16
34
+ lora_dropout: 0.05
35
+ lora_target_linear: true
36
+ lora_fan_in_fan_out:
37
+ lora_target_modules:
38
+ - gate_proj
39
+ - down_proj
40
+ - up_proj
41
+ - q_proj
42
+ - v_proj
43
+ - k_proj
44
+ - o_proj
45
+
46
+ #wandb_project: hc-axolotl-mistral
47
+ #wandb_entity: hamelsmu
48
+
49
+ gradient_accumulation_steps: 4
50
+ micro_batch_size: 16
51
+ eval_batch_size: 16
52
+ num_epochs: 3
53
+ optimizer: adamw_bnb_8bit
54
+ lr_scheduler: cosine
55
+ learning_rate: 0.0002
56
+ max_grad_norm: 1.0
57
+ adam_beta2: 0.95
58
+ adam_epsilon: 0.00001
59
+ save_total_limit: 12
60
+
61
+ train_on_inputs: false
62
+ group_by_length: false
63
+ bf16: true
64
+ fp16: false
65
+ tf32: false
66
+
67
+ gradient_checkpointing: true
68
+ early_stopping_patience:
69
+ resume_from_checkpoint:
70
+ local_rank:
71
+ logging_steps: 1
72
+ xformers_attention:
73
+ flash_attention: true
74
+
75
+ loss_watchdog_threshold: 5.0
76
+ loss_watchdog_patience: 3
77
+
78
+ warmup_steps: 20
79
+ evals_per_epoch: 4
80
+ eval_table_size:
81
+ eval_table_max_new_tokens: 128
82
+ saves_per_epoch: 6
83
+ debug:
84
+ weight_decay: 0.0
85
+ fsdp:
86
+ fsdp_config:
87
+ special_tokens:
88
+ bos_token: "<s>"
89
+ eos_token: "</s>"
90
+ unk_token: "<unk>"
91
+ save_safetensors: true
data/notfcn_B17.csv ADDED
The diff for this file is too large to render. See raw diff
 
data/notfcn_B17.jsonl ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ {"text": "No"}
2
+ {"text": "Yes"}