Twitter-XLM-Roberta-base
This is a XLM-Roberta-base model trained on ~198M multilingual tweets, described and evaluated in the reference paper. To evaluate this and other LMs on Twitter-specific data, please refer to the main repository. A usage example is provided below.
Computing tweet similarity
def preprocess(text):
new_text = []
for t in text.split(" "):
t = '@user' if t.startswith('@') and len(t) > 1 else t
t = 'http' if t.startswith('http') else t
new_text.append(t)
return " ".join(new_text)
def get_embedding(text):
text = preprocess(text)
encoded_input = tokenizer(text, return_tensors='pt')
features = model(**encoded_input)
features = features[0].detach().numpy()
features_mean = np.mean(features[0], axis=0)
return features_mean
query = "Acabo de pedir pollo frito π£" #spanish
tweets = ["We had a great time! β½οΈ", # english
"We hebben een geweldige tijd gehad! β©", # dutch
"Nous avons passΓ© un bon moment! π₯", # french
"Ci siamo divertiti! π"] # italian
d = defaultdict(int)
for tweet in tweets:
sim = 1-cosine(get_embedding(query),get_embedding(tweet))
d[tweet] = sim
print('Most similar to: ',query)
print('----------------------------------------')
for idx,x in enumerate(sorted(d.items(), key=lambda x:x[1], reverse=True)):
print(idx+1,x[0])
Most similar to: Acabo de pedir pollo frito π£
----------------------------------------
1 Ci siamo divertiti! π
2 Nous avons passΓ© un bon moment! π₯
3 We had a great time! β½οΈ
4 We hebben een geweldige tijd gehad! β©
- Downloads last month
- 20,175