|
from fastapi import APIRouter |
|
from datetime import datetime |
|
from datasets import load_dataset |
|
from sklearn.metrics import accuracy_score |
|
import torch |
|
from torch.utils.data import Dataset, DataLoader |
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
|
|
from .utils.evaluation import TextEvaluationRequest |
|
from .utils.emissions import tracker, clean_emissions_data, get_space_info |
|
|
|
router = APIRouter() |
|
|
|
DESCRIPTION = "BERT Fine tuned" |
|
ROUTE = "/text" |
|
|
|
@router.post(ROUTE, tags=["Text Task"], |
|
description=DESCRIPTION) |
|
async def evaluate_text(request: TextEvaluationRequest): |
|
""" |
|
Evaluate text classification for climate disinformation detection. |
|
|
|
Current Model: Random Baseline |
|
- Makes random predictions from the label space (0-7) |
|
- Used as a baseline for comparison |
|
""" |
|
|
|
username, space_url = get_space_info() |
|
|
|
|
|
LABEL_MAPPING = { |
|
"0_not_relevant": 0, |
|
"1_not_happening": 1, |
|
"2_not_human": 2, |
|
"3_not_bad": 3, |
|
"4_solutions_harmful_unnecessary": 4, |
|
"5_science_unreliable": 5, |
|
"6_proponents_biased": 6, |
|
"7_fossil_fuels_needed": 7 |
|
} |
|
|
|
|
|
dataset = load_dataset(request.dataset_name) |
|
|
|
|
|
dataset = dataset.map(lambda x: {"label": LABEL_MAPPING[x["label"]]}) |
|
|
|
|
|
train_test = dataset["train"].train_test_split(test_size=request.test_size, seed=request.test_seed) |
|
test_dataset = train_test["test"] |
|
|
|
|
|
tracker.start() |
|
tracker.start_task("inference") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
true_labels = test_dataset["label"] |
|
texts=test_dataset["quote"] |
|
labels=test_dataset["label"] |
|
|
|
model_dir = "./" |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_dir) |
|
model = AutoModelForSequenceClassification.from_pretrained(model_dir) |
|
|
|
class TextDataset(Dataset): |
|
def __init__(self, texts, labels, tokenizer, max_len=128): |
|
self.texts = texts |
|
self.labels = labels |
|
self.tokenizer = tokenizer |
|
self.max_len = max_len |
|
|
|
def __len__(self): |
|
return len(self.texts) |
|
|
|
def __getitem__(self, idx): |
|
text = self.texts[idx] |
|
label = self.labels[idx] |
|
encodings = self.tokenizer( |
|
text, |
|
max_length=self.max_len, |
|
padding='max_length', |
|
truncation=True, |
|
return_tensors="pt" |
|
) |
|
return { |
|
'input_ids': encodings['input_ids'].squeeze(0), |
|
'attention_mask': encodings['attention_mask'].squeeze(0), |
|
'labels': torch.tensor(label, dtype=torch.long) |
|
} |
|
|
|
test_dataset = TextDataset(texts, labels, tokenizer) |
|
test_loader = DataLoader(test_dataset, batch_size=16) |
|
|
|
model.eval() |
|
predictions = [] |
|
with torch.no_grad(): |
|
for inputs, labels in test_loader: |
|
inputs, labels = inputs.to('cpu'), labels.to('cpu') |
|
outputs = model(inputs) |
|
_, predicted = torch.max(outputs, 1) |
|
predictions.extend(predicted.cpu().numpy()) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
emissions_data = tracker.stop_task() |
|
|
|
|
|
accuracy = accuracy_score(true_labels, predictions) |
|
|
|
|
|
results = { |
|
"username": username, |
|
"space_url": space_url, |
|
"submission_timestamp": datetime.now().isoformat(), |
|
"model_description": DESCRIPTION, |
|
"accuracy": float(accuracy), |
|
"energy_consumed_wh": emissions_data.energy_consumed * 1000, |
|
"emissions_gco2eq": emissions_data.emissions * 1000, |
|
"emissions_data": clean_emissions_data(emissions_data), |
|
"api_route": ROUTE, |
|
"dataset_config": { |
|
"dataset_name": request.dataset_name, |
|
"test_size": request.test_size, |
|
"test_seed": request.test_seed |
|
} |
|
} |
|
|
|
return results |