Thouph commited on
Commit
4edfe1c
1 Parent(s): 5994619

Upload 10 files

Browse files
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<|beginoftext|>": 14914,
3
+ "[PAD]": 14915
4
+ }
config.json ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "_name_or_path": "D:\\CLIP-Git-GPT-E6-small",
4
+ "activation_function": "gelu_new",
5
+ "architectures": [
6
+ "GitForCausalLM"
7
+ ],
8
+ "attention_probs_dropout_prob": 0.1,
9
+ "attn_pdrop": 0.1,
10
+ "bos_token_id": 14914,
11
+ "embd_pdrop": 0.1,
12
+ "eos_token_id": 0,
13
+ "hidden_act": "gelu",
14
+ "hidden_dropout_prob": 0.1,
15
+ "hidden_size": 768,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 3072,
18
+ "layer_norm_eps": 1e-12,
19
+ "layer_norm_epsilon": 1e-05,
20
+ "max_position_embeddings": 1024,
21
+ "model_type": "git",
22
+ "n_ctx": 1024,
23
+ "n_embd": 768,
24
+ "n_head": 12,
25
+ "n_inner": null,
26
+ "n_layer": 12,
27
+ "n_positions": 1024,
28
+ "num_attention_heads": 12,
29
+ "num_hidden_layers": 6,
30
+ "num_image_with_embedding": null,
31
+ "pad_token_id": 0,
32
+ "position_embedding_type": "absolute",
33
+ "reorder_and_upcast_attn": false,
34
+ "resid_pdrop": 0.1,
35
+ "scale_attn_by_inverse_layer_idx": false,
36
+ "scale_attn_weights": true,
37
+ "summary_activation": null,
38
+ "summary_first_dropout": 0.1,
39
+ "summary_proj_to_labels": true,
40
+ "summary_type": "cls_index",
41
+ "summary_use_proj": true,
42
+ "task_specific_params": {
43
+ "text-generation": {
44
+ "do_sample": true,
45
+ "max_length": 50
46
+ }
47
+ },
48
+ "tie_word_embeddings": false,
49
+ "torch_dtype": "float32",
50
+ "transformers_version": "4.28.1",
51
+ "use_cache": true,
52
+ "vision_config": {
53
+ "_name_or_path": "",
54
+ "add_cross_attention": false,
55
+ "architectures": null,
56
+ "attention_dropout": 0.0,
57
+ "bad_words_ids": null,
58
+ "begin_suppress_tokens": null,
59
+ "bos_token_id": null,
60
+ "chunk_size_feed_forward": 0,
61
+ "cross_attention_hidden_size": null,
62
+ "decoder_start_token_id": null,
63
+ "diversity_penalty": 0.0,
64
+ "do_sample": false,
65
+ "early_stopping": false,
66
+ "encoder_no_repeat_ngram_size": 0,
67
+ "eos_token_id": null,
68
+ "exponential_decay_length_penalty": null,
69
+ "finetuning_task": null,
70
+ "forced_bos_token_id": null,
71
+ "forced_eos_token_id": null,
72
+ "hidden_act": "quick_gelu",
73
+ "hidden_size": 768,
74
+ "id2label": {
75
+ "0": "LABEL_0",
76
+ "1": "LABEL_1"
77
+ },
78
+ "image_size": 224,
79
+ "initializer_range": 0.02,
80
+ "intermediate_size": 3072,
81
+ "is_decoder": false,
82
+ "is_encoder_decoder": false,
83
+ "label2id": {
84
+ "LABEL_0": 0,
85
+ "LABEL_1": 1
86
+ },
87
+ "layer_norm_eps": 1e-05,
88
+ "length_penalty": 1.0,
89
+ "max_length": 20,
90
+ "min_length": 0,
91
+ "model_type": "git_vision_model",
92
+ "no_repeat_ngram_size": 0,
93
+ "num_attention_heads": 12,
94
+ "num_beam_groups": 1,
95
+ "num_beams": 1,
96
+ "num_channels": 3,
97
+ "num_hidden_layers": 12,
98
+ "num_return_sequences": 1,
99
+ "output_attentions": false,
100
+ "output_hidden_states": false,
101
+ "output_scores": false,
102
+ "pad_token_id": null,
103
+ "patch_size": 16,
104
+ "prefix": null,
105
+ "problem_type": null,
106
+ "pruned_heads": {},
107
+ "remove_invalid_values": false,
108
+ "repetition_penalty": 1.0,
109
+ "return_dict": true,
110
+ "return_dict_in_generate": false,
111
+ "sep_token_id": null,
112
+ "suppress_tokens": null,
113
+ "task_specific_params": null,
114
+ "temperature": 1.0,
115
+ "tf_legacy_loss": false,
116
+ "tie_encoder_decoder": false,
117
+ "tie_word_embeddings": true,
118
+ "tokenizer_class": null,
119
+ "top_k": 50,
120
+ "top_p": 1.0,
121
+ "torch_dtype": null,
122
+ "torchscript": false,
123
+ "transformers_version": "4.27.3",
124
+ "typical_p": 1.0,
125
+ "use_bfloat16": false
126
+ },
127
+ "vocab_size": 14916
128
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.27.3"
6
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "feature_extractor_type": "CLIPFeatureExtractor",
12
+ "image_mean": [
13
+ 0.48145466,
14
+ 0.4578275,
15
+ 0.40821073
16
+ ],
17
+ "image_processor_type": "CLIPImageProcessor",
18
+ "image_std": [
19
+ 0.26862954,
20
+ 0.26130258,
21
+ 0.27577711
22
+ ],
23
+ "processor_class": "GitProcessor",
24
+ "resample": 3,
25
+ "rescale_factor": 0.00392156862745098,
26
+ "size": {
27
+ "shortest_edge": 224
28
+ }
29
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffbb9e0c5f1fdcdb3334769412ee67026be96465ca392c8a6f3517099e0a4bf8
3
+ size 610649049
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|beginoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "[PAD]",
5
+ "unk_token": "<|endoftext|>"
6
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
4
+ "eos_token": "<|endoftext|>",
5
+ "model_max_length": 1024,
6
+ "processor_class": "GitProcessor",
7
+ "special_tokens_map_file": null,
8
+ "tokenizer_class": "GPT2Tokenizer",
9
+ "unk_token": "<|endoftext|>"
10
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff