DavidLanz commited on
Commit
859a063
1 Parent(s): e32cb7b

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. README.md +131 -3
  2. adapter_config.json +29 -0
  3. adapter_model.safetensors +3 -0
README.md CHANGED
@@ -1,3 +1,131 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license: apache-2.0
5
+ library_name: peft
6
+ tags:
7
+ - facebook
8
+ - meta
9
+ - pytorch
10
+ - llama
11
+ - llama-2
12
+ base_model: DavidLanz/Meta-Llama-3-8B-Instruct
13
+ model_name: Llama 3 8B Instruct
14
+ inference: false
15
+ model_creator: Meta Llama 3
16
+ model_type: llama
17
+ pipeline_tag: text-generation
18
+ quantized_by: QLoRA
19
+ ---
20
+
21
+ # Model Card for Model ID
22
+
23
+ This PEFT model is designed for predicting the prices of these five Taiwan stocks:
24
+
25
+ | 證券代號 | 證券名稱 |
26
+ |---------|--------|
27
+ | 3661 | 世芯-KY |
28
+ | 2330 | 台積電 |
29
+ | 3017 | 奇鋐 |
30
+ | 2618 | 長榮航 |
31
+ | 2317 | 鴻海 |
32
+
33
+ Disclaimer: This model is for a time series problem on LLM performance, and it's not for investment advice; any prediction results are not a basis for investment reference.
34
+
35
+ ## Model Details
36
+
37
+ The training data source is from the [臺灣證券交易所](https://www.twse.com.tw/).
38
+
39
+ ### Model Description
40
+
41
+ This repo contains QLoRA format model files for [Meta's Llama 3 8B Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct).
42
+
43
+ ## Uses
44
+
45
+ ```python
46
+ import torch
47
+ from peft import LoraConfig, PeftModel
48
+
49
+ from transformers import (
50
+ AutoModelForCausalLM,
51
+ AutoTokenizer,
52
+ BitsAndBytesConfig,
53
+ HfArgumentParser,
54
+ TrainingArguments,
55
+ TextStreamer,
56
+ pipeline,
57
+ logging,
58
+ )
59
+
60
+ device_map = {"": 0}
61
+ use_4bit = True
62
+ bnb_4bit_compute_dtype = "float16"
63
+ bnb_4bit_quant_type = "nf4"
64
+ use_nested_quant = False
65
+ compute_dtype = getattr(torch, bnb_4bit_compute_dtype)
66
+
67
+ bnb_config = BitsAndBytesConfig(
68
+ load_in_4bit=use_4bit,
69
+ bnb_4bit_quant_type=bnb_4bit_quant_type,
70
+ bnb_4bit_compute_dtype=compute_dtype,
71
+ bnb_4bit_use_double_quant=use_nested_quant,
72
+ )
73
+
74
+ based_model_path = "meta-llama/Meta-Llama-3-8B-Instruct"
75
+ adapter_path = "DavidLanz/llama3_8b_taiwan_stock_qlora"
76
+
77
+ base_model = AutoModelForCausalLM.from_pretrained(
78
+ based_model_path,
79
+ low_cpu_mem_usage=True,
80
+ return_dict=True,
81
+ quantization_config=bnb_config,
82
+ torch_dtype=torch.float16,
83
+ device_map=device_map,
84
+ )
85
+ model = PeftModel.from_pretrained(base_model, adapter_path)
86
+
87
+ tokenizer = AutoTokenizer.from_pretrained(based_model_path, trust_remote_code=True)
88
+
89
+ import torch
90
+ from transformers import pipeline, TextStreamer
91
+
92
+ text_gen_pipeline = pipeline(
93
+ "text-generation",
94
+ model=model,
95
+ model_kwargs={"torch_dtype": torch.bfloat16},
96
+ tokenizer=tokenizer,
97
+ )
98
+
99
+ messages = [
100
+ {
101
+ "role": "system",
102
+ "content": "你是一位專業的台灣股市交易分析師",
103
+ },
104
+ {"role": "user", "content": "股票名稱為台積電,股票代號為2330。關於昨日的表現,開盤價為761,當日最高價為761,最低價為752,收盤價為754,與前一日相比漲了12,交易量為32,067,682,成交金額為24,247,217,869。請預測今天的收盤價?"},
105
+ ]
106
+
107
+ prompt = text_gen_pipeline.tokenizer.apply_chat_template(
108
+ messages,
109
+ tokenize=False,
110
+ add_generation_prompt=True
111
+ )
112
+
113
+ terminators = [
114
+ text_gen_pipeline.tokenizer.eos_token_id,
115
+ text_gen_pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>")
116
+ ]
117
+
118
+ outputs = text_gen_pipeline(
119
+ prompt,
120
+ max_new_tokens=256,
121
+ eos_token_id=terminators,
122
+ do_sample=True,
123
+ temperature=0.6,
124
+ top_p=0.9,
125
+ )
126
+ print(outputs[0]["generated_text"][len(prompt):])
127
+ ```
128
+
129
+ ### Framework versions
130
+
131
+ - PEFT 0.10.0
adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "DavidLanz/Meta-Llama-3-8B-Instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.1,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 64,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "v_proj"
25
+ ],
26
+ "task_type": "CAUSAL_LM",
27
+ "use_dora": false,
28
+ "use_rslora": false
29
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83f4837c5d0ffaa6c6346d274d466394f32114230f5c0ef96ba5ef5b59d4eed5
3
+ size 109069176