File size: 3,603 Bytes
a23d134
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
{
    "model_card": {
        "Model Card": [
            "https://huggingface.co/google/t5-v1_1-base"
        ],
        "License Information": [
            "apache-2.0"
        ],
        "Citation Information": [
            "\n@inproceedings{Wolf_Transformers_State-of-the-Art_Natural_2020,\n  author = {Wolf, Thomas and Debut, Lysandre and Sanh, Victor and Chaumond, Julien",
            "\n@Misc{peft,\n  title =        {PEFT: State-of-the-art Parameter-Efficient Fine-Tuning methods},\n  author =       {Sourab Mangrulkar and Sylvain Gugger and Lysandre Debut and Younes"
        ]
    },
    "data_card": {
        "Generate Research Paper Abstracts": {
            "Model Name": [
                "gpt-4"
            ],
            "Model Card": [
                "https://cdn.openai.com/papers/gpt-4-system-card.pdf"
            ],
            "License Information": [
                "https://openai.com/policies"
            ],
            "Citation Information": [
                "@article{OpenAI2023GPT4TR,\n  title={GPT-4 Technical Report},\n  author={OpenAI},\n  journal={ArXiv},\n  year={2023},\n  volume={abs/2303.08774},\n  url={https://api.semanticscholar.org/CorpusID:257532815}\n}",
                "@article{ouyang2022training,\n  title={Training language models to follow instructions with human feedback},\n  author={Ouyang, Long and Wu, Jeffrey and Jiang, Xu and Almeida, Diogo and Wainwright, Carroll and Mishkin, Pamela and Zhang, Chong and Agarwal, Sandhini and Slama, Katarina and Ray, Alex and others},\n  journal={Advances in Neural Information Processing Systems},\n  volume={35},\n  pages={27730--27744},\n  year={2022}\n}"
            ]
        },
        "Generate Tweets from Abstracts": {
            "Model Name": [
                "gpt-4"
            ],
            "Model Card": [
                "https://cdn.openai.com/papers/gpt-4-system-card.pdf"
            ],
            "License Information": [
                "https://openai.com/policies"
            ],
            "Citation Information": [
                "@article{OpenAI2023GPT4TR,\n  title={GPT-4 Technical Report},\n  author={OpenAI},\n  journal={ArXiv},\n  year={2023},\n  volume={abs/2303.08774},\n  url={https://api.semanticscholar.org/CorpusID:257532815}\n}",
                "@article{ouyang2022training,\n  title={Training language models to follow instructions with human feedback},\n  author={Ouyang, Long and Wu, Jeffrey and Jiang, Xu and Almeida, Diogo and Wainwright, Carroll and Mishkin, Pamela and Zhang, Chong and Agarwal, Sandhini and Slama, Katarina and Ray, Alex and others},\n  journal={Advances in Neural Information Processing Systems},\n  volume={35},\n  pages={27730--27744},\n  year={2022}\n}"
            ]
        }
    },
    "__version__": "0.1.0",
    "datetime": "2024-01-18T18:59:52.071528",
    "type": "TrainHFFineTune",
    "name": "Train an Abstract => Tweet Model",
    "version": 1.0,
    "fingerprint": "3e30bb93dbb5e421",
    "req_versions": {
        "dill": "0.3.7",
        "sqlitedict": "2.1.0",
        "torch": "2.1.2",
        "numpy": "1.26.3",
        "transformers": "4.36.2",
        "datasets": "2.16.1",
        "huggingface_hub": "0.20.2",
        "accelerate": "0.26.1",
        "peft": "0.7.1",
        "tiktoken": "0.5.2",
        "tokenizers": "0.15.0",
        "openai": "1.8.0",
        "ctransformers": "0.2.27",
        "optimum": "1.16.1",
        "bitsandbytes": "0.42.0",
        "litellm": "1.15.3",
        "trl": "0.7.6",
        "setfit": "1.0.3"
    },
    "interpreter": "3.10.13 (main, Sep 11 2023, 13:44:35) [GCC 11.2.0]"
}