John Thickstun commited on
Commit
e206a88
1 Parent(s): 5643405
Files changed (3) hide show
  1. README.md +12 -0
  2. config.json +1 -0
  3. model.safetensors +3 -0
README.md CHANGED
@@ -1,3 +1,15 @@
1
  ---
2
  license: apache-2.0
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
  ---
4
+
5
+ This is a Large (780M parameter) Transformer trained for 800k steps on arrival-time encoded music from the [Lakh MIDI dataset](https://colinraffel.com/projects/lmd/), [MetaMidi dataset](https://github.com/jeffreyjohnens/MetaMIDIDataset), and transcripts of the [FMA audio dataset](https://github.com/mdeff/fma) and 450k commercial music records (transcribed using Google Magenta's [ISMIR 2022](https://ismir2022program.ismir.net/poster_287.html) music transcription model). This model was trained with anticipation.
6
+
7
+ # References for the Anticipatory Music Transformer
8
+
9
+ The Anticipatory Music Transformer paper is available on [ArXiv](http://arxiv.org/abs/2306.08620).
10
+
11
+ The full model card is available [here](https://johnthickstun.com/assets/pdf/music-modelcard.pdf).
12
+
13
+ Code for using this model is available on [GitHub](https://github.com/jthickstun/anticipation/).
14
+
15
+ See the accompanying [blog post](https://crfm.stanford.edu/2023/06/16/anticipatory-music-transformer.html) for additional discussion of anticipatory models.
config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"vocab_size": 55030, "n_positions": 1024, "n_embd": 1280, "n_layer": 36, "n_head": 20, "n_inner": null, "activation_function": "gelu_new", "resid_pdrop": 0.1, "embd_pdrop": 0.1, "attn_pdrop": 0.0, "layer_norm_epsilon": 1e-05, "initializer_range": 0.02, "summary_type": "cls_index", "summary_use_proj": true, "summary_activation": null, "summary_first_dropout": 0.1, "summary_proj_to_labels": true, "scale_attn_weights": true, "use_cache": true, "scale_attn_by_inverse_layer_idx": true, "reorder_and_upcast_attn": false, "bos_token_id": 50256, "eos_token_id": 50256, "return_dict": true, "output_hidden_states": false, "output_attentions": false, "torchscript": false, "torch_dtype": null, "use_bfloat16": false, "tf_legacy_loss": false, "pruned_heads": {}, "tie_word_embeddings": true, "is_encoder_decoder": false, "is_decoder": false, "cross_attention_hidden_size": null, "add_cross_attention": false, "tie_encoder_decoder": false, "max_length": 20, "min_length": 0, "do_sample": false, "early_stopping": false, "num_beams": 1, "num_beam_groups": 1, "diversity_penalty": 0.0, "temperature": 1.0, "top_k": 50, "top_p": 1.0, "typical_p": 1.0, "repetition_penalty": 1.0, "length_penalty": 1.0, "no_repeat_ngram_size": 0, "encoder_no_repeat_ngram_size": 0, "bad_words_ids": null, "num_return_sequences": 1, "chunk_size_feed_forward": 0, "output_scores": false, "return_dict_in_generate": false, "forced_bos_token_id": null, "forced_eos_token_id": null, "remove_invalid_values": false, "exponential_decay_length_penalty": null, "suppress_tokens": null, "begin_suppress_tokens": null, "architectures": ["GPT2LMHeadModel"], "finetuning_task": null, "id2label": {"0": "LABEL_0", "1": "LABEL_1"}, "label2id": {"LABEL_0": 0, "LABEL_1": 1}, "tokenizer_class": null, "prefix": null, "pad_token_id": null, "sep_token_id": null, "decoder_start_token_id": null, "task_specific_params": null, "problem_type": null, "_name_or_path": "", "transformers_version": "4.32.1", "model_type": "gpt2"}
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83fb8b9546eacce77a90bb10006b3f569ba342361dce046d078cf2429975e09f
3
+ size 3120598456