what preprocessor should I use to train the handwritten arabic ocr on this base of ArOCR model?
Hey, I'm trying to train a handwritten arabic OCR using ArOCR and trOCR-Ar-small, but the preprocessor doesn't load so I used "giganticode/roberta-base-ar_miner" as the tokenizer.
When I check the validation set predictions it's all like a repeated garbage as shown.
here's my loadings:
def load_model(from_disk: bool) -> VisionEncoderDecoderModel:
model: VisionEncoderDecoderModel = VisionEncoderDecoderModel.from_pretrained('gagan3012/TrOCR-Ar-Small')#.from_encoder_decoder_pretrained("google/vit-base-patch16-224-in21k", "giganticode/roberta-base-ar_miner")#
print(f"Using device {device}.")
model.to(device)
return model
def init_model_for_training(model: VisionEncoderDecoderModel, processor: TrOCRProcessor):
model.config.decoder_start_token_id = processor.tokenizer.cls_token_id
model.config.pad_token_id = processor.tokenizer.pad_token_id
model.config.vocab_size = model.config.decoder.vocab_size
model.config.bos_token_id = processor.tokenizer.bos_token_id
model.config.decoder_start_token_id = 0
model.config.decoder.is_decoder = True
model.config.decoder.add_cross_attention = True
def load_processor() -> TrOCRProcessor:
feature_extractor=ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-384")
model_path = "giganticode/roberta-base-ar_miner"
tokenizer = AutoTokenizer.from_pretrained(model_path)
return TrOCRProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer)
I think the author used UBC-NLP/MARBERT
if I'm not mistaken.