{ "_name_or_path": "vision-transformer-moderator", "architectures": [ "ViTForImageClassification" ], "attention_probs_dropout_prob": 0.0, "encoder_stride": 16, "hidden_act": "gelu", "hidden_dropout_prob": 0.0, "hidden_size": 768, "id2label": { "0": "NA: None applying / Safe", "1": "O1: Hate, Humiliation, Harassment", "2": "O2: Violence, Harm, or Cruelty", "3": "O3: Sexual Content", "4": "O4: Nudity Content", "5": "O5: Criminal Planning", "6": "O6: Weapons or Substance Abuse", "7": "O7: Self-Harm", "8": "O8: Animal Cruelty", "9": "O9: Disasters or Emergencies", "10": "10: Political Content" }, "image_size": 224, "initializer_range": 0.02, "intermediate_size": 3072, "label2id": { "10: Political Content": 10, "NA: None applying / Safe": 0, "O1: Hate, Humiliation, Harassment": 1, "O2: Violence, Harm, or Cruelty": 2, "O3: Sexual Content": 3, "O4: Nudity Content": 4, "O5: Criminal Planning": 5, "O6: Weapons or Substance Abuse": 6, "O7: Self-Harm": 7, "O8: Animal Cruelty": 8, "O9: Disasters or Emergencies": 9 }, "layer_norm_eps": 1e-12, "model_type": "vit", "num_attention_heads": 12, "num_channels": 3, "num_hidden_layers": 12, "patch_size": 16, "problem_type": "single_label_classification", "qkv_bias": true, "torch_dtype": "float32", "transformers_version": "4.42.4" }