results / config.json
nogae's picture
End of training
ac9ec37 verified
{
"_name_or_path": "sileod/deberta-v3-base-tasksource-nli",
"architectures": [
"DebertaV2ForSequenceClassification"
],
"attention_probs_dropout_prob": 0.1,
"classifiers_size": [
3,
2,
2,
2,
2,
2,
1,
2,
3,
2,
2,
2,
3,
3,
3,
3,
1,
3,
3,
2,
2,
3,
2,
6,
2,
2,
2,
2,
2,
2,
2,
2,
2,
3,
3,
3,
3,
3,
3,
3,
2,
2,
2,
2,
5,
3,
3,
3,
3,
3,
3,
3,
3,
2,
2,
2,
3,
3,
3,
3,
3,
3,
3,
3,
2,
2,
2,
2,
47,
23,
9,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
20,
50,
3,
3,
4,
2,
8,
3,
2,
2,
2,
4,
20,
3,
3,
3,
3,
3,
174,
2,
2,
41,
2,
2,
51,
2,
3,
2,
2,
2,
3,
16,
2,
18,
8,
2,
17,
3,
2,
4,
7,
12,
7,
3,
3,
42,
11,
100,
13,
100,
8,
1,
20,
2,
2,
4,
5,
3,
4,
14,
2,
6,
4,
2,
1,
3,
10,
3,
10,
4,
2,
7,
6,
28,
3,
6,
3,
6,
5,
7,
4,
2,
2,
2,
6,
2,
2,
7,
20,
2,
9,
2,
3,
13,
2,
3,
2,
4,
4,
2,
2,
2,
2,
4,
1,
2,
1,
13,
3,
5,
11,
37,
2,
49,
40,
10,
4,
1,
2,
2,
1,
5,
2,
3,
2,
2,
12,
3,
3,
2,
19,
3,
1,
2,
2,
2,
2,
2,
1,
2,
2,
1,
1,
2,
3,
2,
1,
4,
3,
1,
1,
1,
2,
3,
2,
3,
1,
1,
2,
1,
3,
2,
2,
2,
2,
2,
3,
2,
2,
2,
1,
3,
2,
2,
1,
1,
1,
1,
2,
1,
1,
1,
1,
4,
1,
1,
1,
1,
3,
1,
3,
1,
2,
2,
1,
2,
3,
3,
2,
1,
3,
1,
1,
3,
1,
3,
2,
1,
1,
1,
2,
2,
50,
50,
50,
50,
2,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
77,
2,
1,
3,
2,
2,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
3,
18,
13,
2,
2,
2,
2,
2,
2,
4,
2,
24,
23,
67,
279,
3,
2,
2,
1,
2,
2,
3,
1,
2,
3,
2,
3,
3,
2,
2,
4,
1,
17,
3,
2,
3,
2,
3,
3,
2,
1,
1,
3,
2,
2,
3,
3,
3,
1,
1
],
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"id2label": {
"0": "multi",
"1": "simple",
"2": "compare"
},
"initializer_range": 0.02,
"intermediate_size": 3072,
"label2id": {
"compare": 2,
"multi": 0,
"simple": 1
},
"layer_norm_eps": 1e-07,
"max_position_embeddings": 512,
"max_relative_positions": -1,
"model_type": "deberta-v2",
"norm_rel_ebd": "layer_norm",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pad_token_id": 0,
"pooler_dropout": 0,
"pooler_hidden_act": "gelu",
"pooler_hidden_size": 768,
"pos_att_type": [
"p2c",
"c2p"
],
"position_biased_input": false,
"position_buckets": 256,
"relative_attention": true,
"share_att_key": true,
"tasks": [
"glue/mnli",
"glue/qnli",
"glue/rte",
"glue/wnli",
"glue/mrpc",
"glue/qqp",
"glue/stsb",
"super_glue/boolq",
"super_glue/cb",
"super_glue/multirc",
"super_glue/wic",
"super_glue/axg",
"anli/a1",
"anli/a2",
"anli/a3",
"sick/label",
"sick/relatedness",
"sick/entailment_AB",
"snli",
"scitail/snli_format",
"hans",
"WANLI",
"recast/recast_verbnet",
"recast/recast_kg_relations",
"recast/recast_ner",
"recast/recast_factuality",
"recast/recast_puns",
"recast/recast_megaveridicality",
"recast/recast_sentiment",
"recast/recast_verbcorner",
"probability_words_nli/usnli",
"probability_words_nli/reasoning_1hop",
"probability_words_nli/reasoning_2hop",
"nan-nli/joey234--nan-nli",
"nli_fever",
"breaking_nli",
"conj_nli",
"fracas",
"dialogue_nli",
"mpe",
"dnc",
"recast_white/fnplus",
"recast_white/sprl",
"recast_white/dpr",
"joci",
"robust_nli/IS_CS",
"robust_nli/LI_LI",
"robust_nli/ST_WO",
"robust_nli/PI_SP",
"robust_nli/PI_CD",
"robust_nli/ST_SE",
"robust_nli/ST_NE",
"robust_nli/ST_LM",
"robust_nli_is_sd",
"robust_nli_li_ts",
"add_one_rte",
"imppres/implicature_numerals_10_100/log",
"imppres/implicature_connectives/log",
"imppres/implicature_modals/log",
"imppres/implicature_gradable_verb/log",
"imppres/implicature_gradable_adjective/log",
"imppres/implicature_numerals_2_3/log",
"imppres/implicature_quantifiers/log",
"glue_diagnostics/diagnostics",
"hlgd",
"paws/labeled_final",
"paws/labeled_swap",
"medical_questions_pairs",
"conll2003/pos_tags",
"conll2003/chunk_tags",
"conll2003/ner_tags",
"hh-rlhf",
"model-written-evals",
"truthful_qa/multiple_choice",
"fig-qa",
"bigbench/physical_intuition",
"bigbench/authorship_verification",
"bigbench/implicit_relations",
"bigbench/dyck_languages",
"bigbench/novel_concepts",
"bigbench/moral_permissibility",
"bigbench/metaphor_understanding",
"bigbench/temporal_sequences",
"bigbench/sports_understanding",
"bigbench/analytic_entailment",
"bigbench/social_support",
"bigbench/emoji_movie",
"bigbench/dark_humor_detection",
"bigbench/suicide_risk",
"bigbench/fact_checker",
"bigbench/hhh_alignment",
"bigbench/formal_fallacies_syllogisms_negation",
"bigbench/bbq_lite_json",
"bigbench/cause_and_effect",
"bigbench/logic_grid_puzzle",
"bigbench/empirical_judgments",
"bigbench/human_organs_senses",
"bigbench/misconceptions",
"bigbench/strange_stories",
"bigbench/logical_args",
"bigbench/known_unknowns",
"bigbench/cs_algorithms",
"bigbench/emojis_emotion_prediction",
"bigbench/cifar10_classification",
"bigbench/penguins_in_a_table",
"bigbench/odd_one_out",
"bigbench/intent_recognition",
"bigbench/physics",
"bigbench/conceptual_combinations",
"bigbench/logical_deduction",
"bigbench/causal_judgment",
"bigbench/winowhy",
"bigbench/arithmetic",
"bigbench/undo_permutation",
"bigbench/analogical_similarity",
"bigbench/social_iqa",
"bigbench/key_value_maps",
"bigbench/implicatures",
"bigbench/real_or_fake_text",
"bigbench/disambiguation_qa",
"bigbench/similarities_abstraction",
"bigbench/movie_dialog_same_or_different",
"bigbench/english_proverbs",
"bigbench/presuppositions_as_nli",
"bigbench/entailed_polarity",
"bigbench/snarks",
"bigbench/goal_step_wikihow",
"bigbench/crass_ai",
"bigbench/play_dialog_same_or_different",
"bigbench/hindu_knowledge",
"bigbench/international_phonetic_alphabet_nli",
"bigbench/understanding_fables",
"bigbench/geometric_shapes",
"bigbench/code_line_description",
"bigbench/riddle_sense",
"bigbench/symbol_interpretation",
"bigbench/irony_identification",
"bigbench/anachronisms",
"bigbench/navigate",
"bigbench/crash_blossom",
"bigbench/identify_odd_metaphor",
"bigbench/simple_ethical_questions",
"bigbench/contextual_parametric_knowledge_conflicts",
"bigbench/date_understanding",
"bigbench/figure_of_speech_detection",
"bigbench/question_selection",
"bigbench/elementary_math_qa",
"bigbench/nonsense_words_grammar",
"bigbench/salient_translation_error_detection",
"bigbench/epistemic_reasoning",
"bigbench/movie_recommendation",
"bigbench/strategyqa",
"bigbench/tracking_shuffled_objects",
"bigbench/unit_interpretation",
"bigbench/reasoning_about_colored_objects",
"bigbench/discourse_marker_prediction",
"bigbench/logical_fallacy_detection",
"bigbench/general_knowledge",
"bigbench/abstract_narrative_understanding",
"bigbench/color",
"bigbench/hyperbaton",
"bigbench/logical_sequence",
"bigbench/mnist_ascii",
"bigbench/fantasy_reasoning",
"bigbench/mathematical_induction",
"bigbench/timedial",
"bigbench/identify_math_theorems",
"bigbench/checkmate_in_one",
"bigbench/phrase_relatedness",
"bigbench/ruin_names",
"bigbench/gre_reading_comprehension",
"bigbench/metaphor_boolean",
"bigbench/sentence_ambiguity",
"bigbench/vitaminc_fact_verification",
"bigbench/evaluating_information_essentiality",
"cos_e/v1.0",
"cosmos_qa",
"dream",
"openbookqa",
"qasc",
"quartz",
"quail",
"head_qa/en",
"sciq",
"social_i_qa",
"wiki_hop/original",
"wiqa",
"piqa",
"hellaswag",
"super_glue/copa",
"balanced-copa",
"e-CARE",
"art",
"winogrande/winogrande_xl",
"codah/codah",
"ai2_arc/ARC-Challenge/challenge",
"ai2_arc/ARC-Easy/challenge",
"definite_pronoun_resolution",
"swag/regular",
"math_qa",
"glue/cola",
"glue/sst2",
"utilitarianism",
"amazon_counterfactual/en",
"insincere-questions",
"toxic_conversations",
"TuringBench",
"trec",
"vitaminc/tals--vitaminc",
"hope_edi/english",
"rumoureval_2019/RumourEval2019",
"ethos/binary",
"ethos/multilabel",
"tweet_eval/sentiment",
"tweet_eval/irony",
"tweet_eval/offensive",
"tweet_eval/hate",
"tweet_eval/emotion",
"tweet_eval/emoji",
"tweet_eval/stance_abortion",
"tweet_eval/stance_atheism",
"tweet_eval/stance_climate",
"tweet_eval/stance_feminist",
"tweet_eval/stance_hillary",
"discovery/discovery",
"pragmeval/squinky-informativeness",
"pragmeval/emobank-arousal",
"pragmeval/switchboard",
"pragmeval/squinky-implicature",
"pragmeval/emobank-valence",
"pragmeval/mrda",
"pragmeval/squinky-formality",
"pragmeval/verifiability",
"pragmeval/emobank-dominance",
"pragmeval/persuasiveness-specificity",
"pragmeval/persuasiveness-strength",
"pragmeval/persuasiveness-claimtype",
"pragmeval/pdtb",
"pragmeval/sarcasm",
"pragmeval/stac",
"pragmeval/persuasiveness-premisetype",
"pragmeval/persuasiveness-eloquence",
"pragmeval/gum",
"pragmeval/emergent",
"pragmeval/persuasiveness-relevance",
"silicone/dyda_da",
"silicone/dyda_e",
"silicone/maptask",
"silicone/meld_e",
"silicone/meld_s",
"silicone/sem",
"silicone/oasis",
"silicone/iemocap",
"lex_glue/eurlex",
"lex_glue/scotus",
"lex_glue/ledgar",
"lex_glue/unfair_tos",
"lex_glue/case_hold",
"language-identification",
"imdb",
"rotten_tomatoes",
"ag_news",
"yelp_review_full/yelp_review_full",
"financial_phrasebank/sentences_allagree",
"poem_sentiment",
"dbpedia_14/dbpedia_14",
"amazon_polarity/amazon_polarity",
"app_reviews",
"hate_speech18",
"sms_spam",
"humicroedit/subtask-1",
"humicroedit/subtask-2",
"snips_built_in_intents",
"hate_speech_offensive",
"yahoo_answers_topics",
"stackoverflow-questions",
"hyperpartisan_news",
"sciie",
"citation_intent",
"go_emotions/simplified",
"scicite",
"liar",
"lexical_relation_classification/ROOT09",
"lexical_relation_classification/BLESS",
"lexical_relation_classification/CogALexV",
"lexical_relation_classification/EVALution",
"lexical_relation_classification/K&H+N",
"linguisticprobing/coordination_inversion",
"linguisticprobing/obj_number",
"linguisticprobing/past_present",
"linguisticprobing/sentence_length",
"linguisticprobing/subj_number",
"linguisticprobing/odd_man_out",
"linguisticprobing/tree_depth",
"linguisticprobing/top_constituents",
"linguisticprobing/bigram_shift",
"crowdflower/political-media-message",
"crowdflower/political-media-audience",
"crowdflower/economic-news",
"crowdflower/text_emotion",
"crowdflower/political-media-bias",
"crowdflower/airline-sentiment",
"crowdflower/tweet_global_warming",
"crowdflower/corporate-messaging",
"crowdflower/sentiment_nuclear_power",
"ethics/commonsense",
"ethics/deontology",
"ethics/justice",
"ethics/virtue",
"emo/emo2019",
"google_wellformed_query",
"tweets_hate_speech_detection",
"has_part",
"wnut_17/wnut_17",
"ncbi_disease/ncbi_disease",
"acronym_identification",
"jnlpba/jnlpba",
"ontonotes_english/SpeedOfMagic--ontonotes_english",
"blog_authorship_corpus/gender",
"blog_authorship_corpus/age",
"blog_authorship_corpus/job",
"open_question_type",
"health_fact",
"commonsense_qa",
"mc_taco",
"ade_corpus_v2/Ade_corpus_v2_classification",
"discosense",
"circa",
"phrase_similarity",
"scientific-exaggeration-detection",
"quarel",
"fever-evidence-related/mwong--fever-related",
"numer_sense",
"dynasent/dynabench.dynasent.r1.all/r1",
"dynasent/dynabench.dynasent.r2.all/r2",
"Sarcasm_News_Headline",
"sem_eval_2010_task_8",
"auditor_review/demo-org--auditor_review",
"medmcqa",
"Dynasent_Disagreement",
"Politeness_Disagreement",
"SBIC_Disagreement",
"SChem_Disagreement",
"Dilemmas_Disagreement",
"logiqa",
"wiki_qa",
"cycic_classification",
"cycic_multiplechoice",
"sts-companion",
"commonsense_qa_2.0",
"lingnli",
"monotonicity-entailment",
"arct",
"scinli",
"naturallogic",
"onestop_qa",
"moral_stories/full",
"prost",
"dynahate",
"syntactic-augmentation-nli",
"autotnli",
"CONDAQA",
"webgpt_comparisons",
"synthetic-instruct-gptj-pairwise",
"scruples",
"wouldyourather",
"attempto-nli",
"defeasible-nli/snli",
"defeasible-nli/atomic",
"help-nli",
"nli-veridicality-transitivity",
"natural-language-satisfiability",
"lonli",
"dadc-limit-nli",
"FLUTE",
"strategy-qa",
"summarize_from_feedback/comparisons",
"folio",
"tomi-nli",
"avicenna",
"SHP",
"MedQA-USMLE-4-options-hf",
"wikimedqa/medwiki",
"cicero",
"CREAK",
"mutual",
"NeQA",
"quote-repetition",
"redefine-math",
"puzzte",
"implicatures",
"race/high",
"race/middle",
"race-c",
"spartqa-yn",
"spartqa-mchoice",
"temporal-nli",
"riddle_sense",
"clcd-english",
"twentyquestions",
"reclor",
"counterfactually-augmented-imdb",
"counterfactually-augmented-snli",
"cnli",
"boolq-natural-perturbations",
"acceptability-prediction",
"equate",
"ScienceQA_text_only",
"ekar_english",
"implicit-hate-stg1",
"chaos-mnli-ambiguity",
"headline_cause/en_simple",
"logiqa-2.0-nli",
"oasst1_dense_flat/quality",
"oasst1_dense_flat/toxicity",
"oasst1_dense_flat/helpfulness",
"PARARULE-Plus",
"mindgames",
"universal_dependencies/en_lines/deprel",
"universal_dependencies/en_partut/deprel",
"universal_dependencies/en_ewt/deprel",
"universal_dependencies/en_gum/deprel",
"ambient",
"path-naturalness-prediction",
"civil_comments/toxicity",
"civil_comments/severe_toxicity",
"civil_comments/obscene",
"civil_comments/threat",
"civil_comments/insult",
"civil_comments/identity_attack",
"civil_comments/sexual_explicit",
"cloth",
"dgen",
"oasst1_pairwise_rlhf_reward",
"I2D2",
"args_me",
"Touche23-ValueEval",
"starcon",
"banking77",
"ruletaker",
"lsat_qa/all",
"ConTRoL-nli",
"tracie",
"sherliic",
"sen-making/1",
"sen-making/2",
"winowhy",
"mbib-base/cognitive-bias",
"mbib-base/fake-news",
"mbib-base/gender-bias",
"mbib-base/hate-speech",
"mbib-base/linguistic-bias",
"mbib-base/political-bias",
"mbib-base/racial-bias",
"mbib-base/text-level-bias",
"robustLR",
"v1/gen_train234_test2to10",
"logical-fallacy",
"parade",
"cladder",
"subjectivity",
"MOH",
"VUAC",
"TroFi",
"sharc_modified/mod",
"conceptrules_v2",
"disrpt/eng.dep.scidtb.rels",
"conll2000",
"few-nerd/supervised",
"finer-139",
"zero-shot-label-nli",
"com2sense",
"scone",
"winodict",
"fool-me-twice",
"monli",
"corr2cause",
"lsat_qa/all",
"apt",
"twitter-financial-news-sentiment",
"icl-symbol-tuning-instruct",
"SpaceNLI",
"propsegment/nli",
"HatemojiBuild",
"regset",
"esci",
"chatbot_arena_conversations",
"dnd_style_intents",
"FLD.v2",
"SDOH-NLI",
"scifact_entailment",
"feasibilityQA",
"simple_pair",
"AdjectiveScaleProbe-nli",
"resnli",
"SpaRTUN",
"ReSQ",
"semantic_fragments_nli",
"dataset_train_nli",
"babi_nli",
"gen_debiased_nli",
"imppres/presupposition",
"/prag",
"blimp-2",
"mmlu-4"
],
"torch_dtype": "float32",
"transformers_version": "4.38.2",
"type_vocab_size": 0,
"vocab_size": 128100
}