Spaces:
Runtime error
Runtime error
File size: 1,378 Bytes
de4975e 25f0ea1 de4975e 25f0ea1 fd5008e 25f0ea1 fd5008e 25f0ea1 de4975e 25f0ea1 de4975e 25f0ea1 de4975e 25f0ea1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
from transformers import AutoAdapterModel, AutoTokenizer, TextClassificationPipeline
from huggingface_hub import Repository
# app = Flask(__name__)
#define model
tokenizer = AutoTokenizer.from_pretrained("UBC-NLP/MARBERT")
sarcasm_adapter = Repository(local_dir="sarcasm_adapter", clone_from="nehalelkaref/sarcasm_adapter")
aoc3_adapter = Repository(local_dir="aoc3_adapter", clone_from="nehalelkaref/aoc3_adapter")
aoc4_adapter = Repository(local_dir="aoc4_adapter", clone_from="nehalelkaref/aoc4_adapter")
fusion_adapter = Repository(local_dir="fusion_adapter", clone_from="nehalelkaref/region_fusion")
model = AutoAdapterModel.from_pretrained("UBC-NLP/MARBERT")
model.load_adapter("/aoc3_adapter", set_active=True, with_head=False)
model.load_adapter("/aoc4_adapter", set_active=True, with_head=False)
model.load_adapter("/sarcasm_adapter", set_active=True, with_head=False)
model.load_adapter_fusion("/fusion_adapter/aoc(3),aoc(4),sarcasm",with_head=True, set_active=True)
pipe = TextClassificationPipeline(tokenizer=tokenizer, model=model)
# @app.route('/predict', methods=['POST'])
# def predict():
# text = request.json['inputs']
# prediction = pipe(text)
# labels = {"LABEL_0":"GULF", "LABEL_1":"LEVANT","LABEL_2":"EGYPT"}
# regions = []
# for res in prediction:
# regions.append(labels[res['label']])
# return jsonify({'response': regions})
|