File size: 3,305 Bytes
7511825 ed540d3 7511825 31f1c86 7511825 31f1c86 7511825 31f1c86 7511825 31f1c86 7511825 ed540d3 7511825 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
from datasets import load_dataset
from pathlib import Path
import pandas as pd
import os
import pickle
import logging
import time
import evaluate
import nltk
CURRENT_PATH = Path(__file__).parent
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s', datefmt='%H:%M:%S', filename=os.path.join(CURRENT_PATH, 'out', 'debug_ngrams.txt'), filemode='w')
nltk.download("stopwords")
nltk.download("punkt")
def tokenizer(text):
return nltk.tokenize.word_tokenize(text, language="portuguese")
def load_pipelines():
in_path = os.path.join(CURRENT_PATH, 'models', 'n_grams')
pipeline = []
for domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']:
with open(os.path.join(in_path, f'{domain}.pickle'), 'rb') as f:
logging.info(f"Loading {domain} pipeline...")
pipeline.append({
'pipeline': pickle.load(f),
'train_domain': domain,
})
return pipeline
def benchmark(pipeline, debug=False):
df_results = pd.DataFrame(
columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall'])
train_domain = pipeline['train_domain']
pipeline = pipeline['pipeline']
for test_domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']:
logging.info(f"Test Domain {test_domain}...")
dataset = load_dataset(
'arubenruben/Portuguese_Language_Identification', test_domain, split='test')
if debug:
logging.info("Debug mode: using only 100 samples")
dataset = dataset.shuffle().select(range(100))
else:
dataset = dataset.shuffle().select(range(min(50_000, len(dataset))))
y = pipeline.predict(dataset['text'])
accuracy = evaluate.load('accuracy').compute(
predictions=y, references=dataset['label'])['accuracy']
f1 = evaluate.load('f1').compute(
predictions=y, references=dataset['label'])['f1']
precision = evaluate.load('precision').compute(
predictions=y, references=dataset['label'])['precision']
recall = evaluate.load('recall').compute(
predictions=y, references=dataset['label'])['recall']
logging.info(
f"Accuracy: {accuracy} | F1: {f1} | Precision: {precision} | Recall: {recall}")
df_results = pd.concat([df_results, pd.DataFrame(
[[train_domain, test_domain, accuracy, f1, precision, recall]], columns=df_results.columns)], ignore_index=True)
return df_results
def test():
DEBUG = False
logging.info(f"Debug mode: {DEBUG}")
pipelines = load_pipelines()
df_results = pd.DataFrame(
columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall'])
for pipeline in pipelines:
logging.info(f"Train Domain {pipeline['train_domain']}...")
df_results = pd.concat(
[df_results, benchmark(pipeline, debug=True)], ignore_index=True)
logging.info("Saving results...")
df_results.to_json(os.path.join(CURRENT_PATH, 'out', 'n_grams.json'),
orient='records', indent=4, force_ascii=False)
if __name__ == "__main__":
test()
|