File size: 2,062 Bytes
4260a3d
 
 
2d8f19a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4260a3d
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import gradio as gr

def greet(name):
    from datasets import load_dataset
    
    dataset = load_dataset("yelp_review_full")
    dataset["train"][100]
    
    #creating the dataset
    from transformers import AutoTokenizer
    
    tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
    
    
    def tokenize_function(examples):
        return tokenizer(examples["text"], padding="max_length", truncation=True)
    
    #mapping the values:
    tokenized_datasets = dataset.map(tokenize_function, batched=True)
    
    #small Datasets:
    small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(1000))
    small_eval_dataset = tokenized_datasets["test"].shuffle(seed=42).select(range(1000))
    
    #Loading pretrained Model:
    from transformers import AutoModelForSequenceClassification
    
    model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5)
    
    ###
    
    from transformers import TrainingArguments
    
    training_args = TrainingArguments(output_dir="test_trainer")
    
    #Evaluate
    
    def compute_metrics(eval_pred):
        logits, labels = eval_pred
        predictions = np.argmax(logits, axis=-1)
        return metric.compute(predictions=predictions, references=labels)
    
    #Training Argumnents and importing Trainer:
    from transformers import TrainingArguments, Trainer
    
    training_args = TrainingArguments(output_dir="test_trainer", evaluation_strategy="epoch")
    
    #Defining Hyperparameters for Trainer:
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=small_train_dataset,
        eval_dataset=small_eval_dataset,
        compute_metrics=compute_metrics,
    )
    
    #Execute the training:
    trainer.train()
    
    #Predictions:
    predictions = trainer.predict(small_eval_dataset)
    print(predictions.predictions.shape,predictions.label_ids.shape)
    return predictions    
    


    

demo = gr.Interface(fn=greet, inputs="text", outputs="text")

demo.launch()