math_gpt2_sft / README.md
Sharathhebbar24's picture
Update README.md
5ceb43a verified
metadata
language:
  - en
license: apache-2.0
tags:
  - maths
  - gpt2
  - mathgpt2
datasets:
  - meta-math/MetaMathQA
  - ArtifactAI/arxiv-math-instruct-50k
pipeline_tag: text-generation
widget:
  - text: Which motion is formed by an incident particle?
    example_title: Example 1
  - text: What type of diffusional modeling is used for diffusion?
    example_title: Example 2

This model is a finetuned version of Sharathhebbar24/math_gpt2 using meta-math/MetaMathQA

Model description

GPT-2 is a transformers model pre-trained on a very large corpus of English data in a self-supervised fashion. This means it was pre-trained on the raw texts only, with no humans labeling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was trained to guess the next word in sentences.

More precisely, inputs are sequences of continuous text of a certain length and the targets are the same sequence, shifting one token (word or piece of word) to the right. The model uses a masking mechanism to make sure the predictions for the token i only use the inputs from 1 to i but not the future tokens.

This way, the model learns an inner representation of the English language that can then be used to extract features useful for downstream tasks. The model is best at what it was trained for, however, which is generating texts from a prompt.

To use this model

>>> from transformers import AutoTokenizer, AutoModelForCausalLM
>>> model_name = "Sharathhebbar24/math_gpt2_sft"
>>> model = AutoModelForCausalLM.from_pretrained(model_name)
>>> tokenizer = AutoTokenizer.from_pretrained(model_name)
>>> def generate_text(prompt):
>>>  inputs = tokenizer.encode(prompt, return_tensors='pt')
>>>  outputs = model.generate(inputs, max_length=64, pad_token_id=tokenizer.eos_token_id)
>>>  generated = tokenizer.decode(outputs[0], skip_special_tokens=True)
>>>  return generated[:generated.rfind(".")+1]
>>> prompt = "Gracie and Joe are choosing numbers on the complex plane. Joe chooses the point $1+2i$. Gracie chooses $-1+i$. How far apart are Gracie and Joe's points?"
>>> res = generate_text(prompt)
>>> res

Benchmark / Evaluation

Model Average ARC HellaSwag MMLU TruthfulQA Winogrande GSM8k
Sharathhebbar24/math_gpt2_sft 28.503 22.87 30.41 25.06 37.62 51.54 0.68
{
    "all": {
        "acc": 0.25082189621988066,
        "acc_stderr": 0.030526589726831692,
        "acc_norm": 0.25112870356236633,
        "acc_norm_stderr": 0.03129390389566968,
        "mc1": 0.24112607099143207,
        "mc1_stderr": 0.014974827279752334,
        "mc2": 0.3762297840067963,
        "mc2_stderr": 0.01445991036363257
    },
    "harness|arc:challenge|25": {
        "acc": 0.20563139931740615,
        "acc_stderr": 0.01181074526074258,
        "acc_norm": 0.22866894197952217,
        "acc_norm_stderr": 0.012272853582540799
    },
    "harness|hellaswag|10": {
        "acc": 0.2884883489344752,
        "acc_stderr": 0.004521334761709224,
        "acc_norm": 0.30412268472415854,
        "acc_norm_stderr": 0.00459094683972719
    },
    "harness|hendrycksTest-abstract_algebra|5": {
        "acc": 0.19,
        "acc_stderr": 0.03942772444036625,
        "acc_norm": 0.19,
        "acc_norm_stderr": 0.03942772444036625
    },
    "harness|hendrycksTest-anatomy|5": {
        "acc": 0.2074074074074074,
        "acc_stderr": 0.03502553170678319,
        "acc_norm": 0.2074074074074074,
        "acc_norm_stderr": 0.03502553170678319
    },
    "harness|hendrycksTest-astronomy|5": {
        "acc": 0.17763157894736842,
        "acc_stderr": 0.031103182383123398,
        "acc_norm": 0.17763157894736842,
        "acc_norm_stderr": 0.031103182383123398
    },
    "harness|hendrycksTest-business_ethics|5": {
        "acc": 0.19,
        "acc_stderr": 0.03942772444036622,
        "acc_norm": 0.19,
        "acc_norm_stderr": 0.03942772444036622
    },
    "harness|hendrycksTest-clinical_knowledge|5": {
        "acc": 0.2188679245283019,
        "acc_stderr": 0.025447863825108618,
        "acc_norm": 0.2188679245283019,
        "acc_norm_stderr": 0.025447863825108618
    },
    "harness|hendrycksTest-college_biology|5": {
        "acc": 0.25,
        "acc_stderr": 0.03621034121889507,
        "acc_norm": 0.25,
        "acc_norm_stderr": 0.03621034121889507
    },
    "harness|hendrycksTest-college_chemistry|5": {
        "acc": 0.2,
        "acc_stderr": 0.04020151261036845,
        "acc_norm": 0.2,
        "acc_norm_stderr": 0.04020151261036845
    },
    "harness|hendrycksTest-college_computer_science|5": {
        "acc": 0.32,
        "acc_stderr": 0.046882617226215034,
        "acc_norm": 0.32,
        "acc_norm_stderr": 0.046882617226215034
    },
    "harness|hendrycksTest-college_mathematics|5": {
        "acc": 0.24,
        "acc_stderr": 0.042923469599092816,
        "acc_norm": 0.24,
        "acc_norm_stderr": 0.042923469599092816
    },
    "harness|hendrycksTest-college_medicine|5": {
        "acc": 0.21965317919075145,
        "acc_stderr": 0.031568093627031744,
        "acc_norm": 0.21965317919075145,
        "acc_norm_stderr": 0.031568093627031744
    },
    "harness|hendrycksTest-college_physics|5": {
        "acc": 0.23529411764705882,
        "acc_stderr": 0.04220773659171453,
        "acc_norm": 0.23529411764705882,
        "acc_norm_stderr": 0.04220773659171453
    },
    "harness|hendrycksTest-computer_security|5": {
        "acc": 0.23,
        "acc_stderr": 0.04229525846816505,
        "acc_norm": 0.23,
        "acc_norm_stderr": 0.04229525846816505
    },
    "harness|hendrycksTest-conceptual_physics|5": {
        "acc": 0.2680851063829787,
        "acc_stderr": 0.028957342788342347,
        "acc_norm": 0.2680851063829787,
        "acc_norm_stderr": 0.028957342788342347
    },
    "harness|hendrycksTest-econometrics|5": {
        "acc": 0.24561403508771928,
        "acc_stderr": 0.040493392977481404,
        "acc_norm": 0.24561403508771928,
        "acc_norm_stderr": 0.040493392977481404
    },
    "harness|hendrycksTest-electrical_engineering|5": {
        "acc": 0.2482758620689655,
        "acc_stderr": 0.036001056927277716,
        "acc_norm": 0.2482758620689655,
        "acc_norm_stderr": 0.036001056927277716
    },
    "harness|hendrycksTest-elementary_mathematics|5": {
        "acc": 0.24074074074074073,
        "acc_stderr": 0.0220190800122179,
        "acc_norm": 0.24074074074074073,
        "acc_norm_stderr": 0.0220190800122179
    },
    "harness|hendrycksTest-formal_logic|5": {
        "acc": 0.23015873015873015,
        "acc_stderr": 0.03764950879790605,
        "acc_norm": 0.23015873015873015,
        "acc_norm_stderr": 0.03764950879790605
    },
    "harness|hendrycksTest-global_facts|5": {
        "acc": 0.18,
        "acc_stderr": 0.038612291966536934,
        "acc_norm": 0.18,
        "acc_norm_stderr": 0.038612291966536934
    },
    "harness|hendrycksTest-high_school_biology|5": {
        "acc": 0.25483870967741934,
        "acc_stderr": 0.024790118459332208,
        "acc_norm": 0.25483870967741934,
        "acc_norm_stderr": 0.024790118459332208
    },
    "harness|hendrycksTest-high_school_chemistry|5": {
        "acc": 0.19704433497536947,
        "acc_stderr": 0.02798672466673622,
        "acc_norm": 0.19704433497536947,
        "acc_norm_stderr": 0.02798672466673622
    },
    "harness|hendrycksTest-high_school_computer_science|5": {
        "acc": 0.22,
        "acc_stderr": 0.041633319989322695,
        "acc_norm": 0.22,
        "acc_norm_stderr": 0.041633319989322695
    },
    "harness|hendrycksTest-high_school_european_history|5": {
        "acc": 0.19393939393939394,
        "acc_stderr": 0.0308741451365621,
        "acc_norm": 0.19393939393939394,
        "acc_norm_stderr": 0.0308741451365621
    },
    "harness|hendrycksTest-high_school_geography|5": {
        "acc": 0.3484848484848485,
        "acc_stderr": 0.033948539651564025,
        "acc_norm": 0.3484848484848485,
        "acc_norm_stderr": 0.033948539651564025
    },
    "harness|hendrycksTest-high_school_government_and_politics|5": {
        "acc": 0.32124352331606215,
        "acc_stderr": 0.033699508685490674,
        "acc_norm": 0.32124352331606215,
        "acc_norm_stderr": 0.033699508685490674
    },
    "harness|hendrycksTest-high_school_macroeconomics|5": {
        "acc": 0.23333333333333334,
        "acc_stderr": 0.021444547301560476,
        "acc_norm": 0.23333333333333334,
        "acc_norm_stderr": 0.021444547301560476
    },
    "harness|hendrycksTest-high_school_mathematics|5": {
        "acc": 0.2851851851851852,
        "acc_stderr": 0.027528599210340492,
        "acc_norm": 0.2851851851851852,
        "acc_norm_stderr": 0.027528599210340492
    },
    "harness|hendrycksTest-high_school_microeconomics|5": {
        "acc": 0.29831932773109243,
        "acc_stderr": 0.029719142876342856,
        "acc_norm": 0.29831932773109243,
        "acc_norm_stderr": 0.029719142876342856
    },
    "harness|hendrycksTest-high_school_physics|5": {
        "acc": 0.2781456953642384,
        "acc_stderr": 0.03658603262763744,
        "acc_norm": 0.2781456953642384,
        "acc_norm_stderr": 0.03658603262763744
    },
    "harness|hendrycksTest-high_school_psychology|5": {
        "acc": 0.26788990825688075,
        "acc_stderr": 0.018987462257978652,
        "acc_norm": 0.26788990825688075,
        "acc_norm_stderr": 0.018987462257978652
    },
    "harness|hendrycksTest-high_school_statistics|5": {
        "acc": 0.4351851851851852,
        "acc_stderr": 0.03381200005643525,
        "acc_norm": 0.4351851851851852,
        "acc_norm_stderr": 0.03381200005643525
    },
    "harness|hendrycksTest-high_school_us_history|5": {
        "acc": 0.2647058823529412,
        "acc_stderr": 0.0309645179269234,
        "acc_norm": 0.2647058823529412,
        "acc_norm_stderr": 0.0309645179269234
    },
    "harness|hendrycksTest-high_school_world_history|5": {
        "acc": 0.28270042194092826,
        "acc_stderr": 0.029312814153955927,
        "acc_norm": 0.28270042194092826,
        "acc_norm_stderr": 0.029312814153955927
    },
    "harness|hendrycksTest-human_aging|5": {
        "acc": 0.31390134529147984,
        "acc_stderr": 0.031146796482972465,
        "acc_norm": 0.31390134529147984,
        "acc_norm_stderr": 0.031146796482972465
    },
    "harness|hendrycksTest-human_sexuality|5": {
        "acc": 0.2595419847328244,
        "acc_stderr": 0.03844876139785271,
        "acc_norm": 0.2595419847328244,
        "acc_norm_stderr": 0.03844876139785271
    },
    "harness|hendrycksTest-international_law|5": {
        "acc": 0.2231404958677686,
        "acc_stderr": 0.03800754475228733,
        "acc_norm": 0.2231404958677686,
        "acc_norm_stderr": 0.03800754475228733
    },
    "harness|hendrycksTest-jurisprudence|5": {
        "acc": 0.25925925925925924,
        "acc_stderr": 0.042365112580946336,
        "acc_norm": 0.25925925925925924,
        "acc_norm_stderr": 0.042365112580946336
    },
    "harness|hendrycksTest-logical_fallacies|5": {
        "acc": 0.25153374233128833,
        "acc_stderr": 0.03408997886857529,
        "acc_norm": 0.25153374233128833,
        "acc_norm_stderr": 0.03408997886857529
    },
    "harness|hendrycksTest-machine_learning|5": {
        "acc": 0.29464285714285715,
        "acc_stderr": 0.043270409325787296,
        "acc_norm": 0.29464285714285715,
        "acc_norm_stderr": 0.043270409325787296
    },
    "harness|hendrycksTest-management|5": {
        "acc": 0.17475728155339806,
        "acc_stderr": 0.037601780060266224,
        "acc_norm": 0.17475728155339806,
        "acc_norm_stderr": 0.037601780060266224
    },
    "harness|hendrycksTest-marketing|5": {
        "acc": 0.20085470085470086,
        "acc_stderr": 0.026246772946890488,
        "acc_norm": 0.20085470085470086,
        "acc_norm_stderr": 0.026246772946890488
    },
    "harness|hendrycksTest-medical_genetics|5": {
        "acc": 0.3,
        "acc_stderr": 0.046056618647183814,
        "acc_norm": 0.3,
        "acc_norm_stderr": 0.046056618647183814
    },
    "harness|hendrycksTest-miscellaneous|5": {
        "acc": 0.23499361430395913,
        "acc_stderr": 0.01516202415227844,
        "acc_norm": 0.23499361430395913,
        "acc_norm_stderr": 0.01516202415227844
    },
    "harness|hendrycksTest-moral_disputes|5": {
        "acc": 0.23699421965317918,
        "acc_stderr": 0.02289408248992599,
        "acc_norm": 0.23699421965317918,
        "acc_norm_stderr": 0.02289408248992599
    },
    "harness|hendrycksTest-moral_scenarios|5": {
        "acc": 0.23798882681564246,
        "acc_stderr": 0.014242630070574915,
        "acc_norm": 0.23798882681564246,
        "acc_norm_stderr": 0.014242630070574915
    },
    "harness|hendrycksTest-nutrition|5": {
        "acc": 0.23202614379084968,
        "acc_stderr": 0.024170840879341005,
        "acc_norm": 0.23202614379084968,
        "acc_norm_stderr": 0.024170840879341005
    },
    "harness|hendrycksTest-philosophy|5": {
        "acc": 0.1864951768488746,
        "acc_stderr": 0.02212243977248077,
        "acc_norm": 0.1864951768488746,
        "acc_norm_stderr": 0.02212243977248077
    },
    "harness|hendrycksTest-prehistory|5": {
        "acc": 0.24074074074074073,
        "acc_stderr": 0.02378858355165854,
        "acc_norm": 0.24074074074074073,
        "acc_norm_stderr": 0.02378858355165854
    },
    "harness|hendrycksTest-professional_accounting|5": {
        "acc": 0.2695035460992908,
        "acc_stderr": 0.026469036818590627,
        "acc_norm": 0.2695035460992908,
        "acc_norm_stderr": 0.026469036818590627
    },
    "harness|hendrycksTest-professional_law|5": {
        "acc": 0.2529335071707953,
        "acc_stderr": 0.011102268713839989,
        "acc_norm": 0.2529335071707953,
        "acc_norm_stderr": 0.011102268713839989
    },
    "harness|hendrycksTest-professional_medicine|5": {
        "acc": 0.4411764705882353,
        "acc_stderr": 0.030161911930767102,
        "acc_norm": 0.4411764705882353,
        "acc_norm_stderr": 0.030161911930767102
    },
    "harness|hendrycksTest-professional_psychology|5": {
        "acc": 0.25,
        "acc_stderr": 0.01751781884501444,
        "acc_norm": 0.25,
        "acc_norm_stderr": 0.01751781884501444
    },
    "harness|hendrycksTest-public_relations|5": {
        "acc": 0.21818181818181817,
        "acc_stderr": 0.03955932861795833,
        "acc_norm": 0.21818181818181817,
        "acc_norm_stderr": 0.03955932861795833
    },
    "harness|hendrycksTest-security_studies|5": {
        "acc": 0.20408163265306123,
        "acc_stderr": 0.025801283475090506,
        "acc_norm": 0.20408163265306123,
        "acc_norm_stderr": 0.025801283475090506
    },
    "harness|hendrycksTest-sociology|5": {
        "acc": 0.24378109452736318,
        "acc_stderr": 0.03036049015401465,
        "acc_norm": 0.24378109452736318,
        "acc_norm_stderr": 0.03036049015401465
    },
    "harness|hendrycksTest-us_foreign_policy|5": {
        "acc": 0.24,
        "acc_stderr": 0.04292346959909281,
        "acc_norm": 0.24,
        "acc_norm_stderr": 0.04292346959909281
    },
    "harness|hendrycksTest-virology|5": {
        "acc": 0.22289156626506024,
        "acc_stderr": 0.03240004825594687,
        "acc_norm": 0.22289156626506024,
        "acc_norm_stderr": 0.03240004825594687
    },
    "harness|hendrycksTest-world_religions|5": {
        "acc": 0.3216374269005848,
        "acc_stderr": 0.03582529442573122,
        "acc_norm": 0.3216374269005848,
        "acc_norm_stderr": 0.03582529442573122
    },
    "harness|truthfulqa:mc|0": {
        "mc1": 0.24112607099143207,
        "mc1_stderr": 0.014974827279752334,
        "mc2": 0.3762297840067963,
        "mc2_stderr": 0.01445991036363257
    },
    "harness|winogrande|5": {
        "acc": 0.5153906866614049,
        "acc_stderr": 0.014045826789783668
    },
    "harness|gsm8k|5": {
        "acc": 0.006823351023502654,
        "acc_stderr": 0.0022675371022544823
    }
}