File size: 6,839 Bytes
fedcb95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
import torch
import config
import math
import sys
import os
from tqdm import tqdm
from torch.optim import AdamW
from transformers import AutoTokenizer
from diffusion import WrapESM, Diffusion
from data_loader import get_dataloaders

def save_hyperparams(ckpt_dir):
    hyperparms_txt_file = os.path.join(ckpt_dir, "hyperparameters.txt")
    with open(hyperparms_txt_file, 'w') as f:
        for k, v in vars(config).items():
            if k.isupper():
                f.write(f"{k}: {v}\n")

def train_and_validate(model, optimizer, device, train_loader, val_loader, num_epochs, ckpt_dir):
    best_val_loss = float('inf')

    for epoch in range(num_epochs):
        model.train()

        print(f"EPOCH {epoch+1}/{num_epochs}")
        sys.stderr.flush()
        total_loss = 0.0
        train_tokens = 0
        weighted_total_train_loss = 0.0

        train_update_interval = len(train_loader) // 4

        with tqdm(enumerate(train_loader), desc="Training batch", total=len(train_loader), leave=True, position=0, ncols=100) as trainbar:
            for step, inputs in trainbar:
                inputs = {k: v.to(device) for k, v in inputs.items()}
                optimizer.zero_grad()
                outputs = model(**inputs)
                train_loss = diffusion_model.compute_loss(inputs["input_ids"], inputs['attention_mask'],
                                                          val=False).loss
                train_loss.backward()
                optimizer.step()

                total_loss += train_loss.item()
                weighted_total_train_loss += train_loss.item() * inputs['input_ids'].shape[1] # Loss * sequence length
                train_tokens += inputs['input_ids'].shape[1]

                if (step+1) % train_update_interval == 0:
                    trainbar.update(train_update_interval)

            avg_train_loss = total_loss / len(train_loader)
            avg_train_neg_log_likelihood = weighted_total_train_loss / train_tokens
            train_perplexity = math.exp(avg_train_neg_log_likelihood)

        # Save model every epoch
        train_ckpt_path = os.path.join(config.Eval.CHECKPOINT_PATH, f'epoch{epoch+1}')
        model.save_model(train_ckpt_path)
        save_hyperparams(train_ckpt_path)

        # Validate model
        if val_loader:
            model.eval()
            total_val_loss = 0.0
            weighted_total_val_loss = 0.0
            val_tokens = 0

            with torch.no_grad():
                val_update_interval = len(val_loader) // 4

                with tqdm(enumerate(val_loader), desc='Validiation batch', total=len(val_loader), leave=True, position=0) as valbar:
                    for step, inputs in valbar:
                        inputs = {k: v.to(device) for k, v in inputs.items()}
                        outputs = model(**inputs)
                        val_loss = diffusion_model.compute_loss(inputs['input_ids'], inputs['attention_mask'],
                                                                val=True).loss.item()
                        
                        total_val_loss += val_loss
                        weighted_total_val_loss += val_loss * inputs['input_ids'].shape[1] # Loss * sequence length
                        val_tokens += inputs['input_ids'].shape[1]

                        if (step+1) % val_update_interval == 0:
                            valbar.update(val_update_interval)

                avg_val_loss = total_val_loss / len(val_loader)
                avg_val_log_likelihood = weighted_total_val_loss / val_tokens
                val_perplexity = math.exp(avg_val_log_likelihood)

        # Save the best model based on validation loss
        if avg_val_loss < best_val_loss:
            best_val_loss = avg_val_loss
            val_ckpt_path = os.path.join(config.Eval.CHECKPOINT_PATH, "best_model_epoch")
            model.save_model(val_ckpt_path)
            save_hyperparams(val_ckpt_path)


        print(f"Average train loss: {avg_train_loss}")
        print(f"Average train perplexity: {train_perplexity}\n")
        sys.stdout.flush()

        print(f"Average validation loss: {avg_val_loss}")
        print(f"Average validation perplexity: {val_perplexity}\n")
        sys.stdout.flush()
        

    return avg_train_loss, train_perplexity, avg_val_loss, val_perplexity
                            

def test(model, test_loader, device):
    model.to(device).eval()
    total_test_loss = 0.0
    weighted_total_test_loss = 0.0
    test_tokens = 0

    with torch.no_grad():
        for step, inputs in enumerate(test_loader):
            inputs = {k: v.to(device) for k, v in inputs.items()}
            outputs = model(**inputs)
            test_loss = diffusion_model.compute_loss(inputs['input_ids'], inputs['attention_mask'],
                                                     val=True).loss.item()

            total_test_loss += test_loss
            weighted_total_test_loss += test_loss * inputs['input_ids'].shape[1] # loss * sequence length
            test_tokens += inputs['input_ids'].shape[1]
        
        avg_test_loss = total_test_loss / len(test_loader)
        avg_test_log_likelihood = weighted_total_test_loss / test_tokens
        test_perplexity = math.exp(avg_test_log_likelihood)

    return avg_test_loss, test_perplexity


if __name__ == "__main__":
    device = torch.device('cuda' if torch.cuda.is_available() else "cpu")
    tokenizer = AutoTokenizer.from_pretrained(config.MODEL_NAME)

    esm_model = WrapESM()
    diffusion_model = Diffusion(config, tokenizer=tokenizer)

    print(f'Trainable params before unfreezing: {sum(p.numel() for p in esm_model.parameters() if p.requires_grad)}')
    
    esm_model.to(device)
    diffusion_model.to(device)

    esm_model.freeze_model()
    esm_model.unfreeze_n_layers()

    print(f'Trainable params after unfreezing: {sum(p.numel() for p in esm_model.parameters() if p.requires_grad)}')

    train_loader, val_loader, test_loader = get_dataloaders(config)
    optimizer = AdamW(filter(lambda p: p.requires_grad, esm_model.parameters()), lr=config.Optim.LR)
    
    # Train and test the model
    avg_train_loss, train_ppl, avg_val_loss, val_ppl = train_and_validate(esm_model, optimizer, device, train_loader, val_loader, config.Training.NUM_EPOCHS, config.Eval.CHECKPOINT_PATH)
    avg_test_loss, test_ppl = test(esm_model, test_loader, device)

    results_dict = {"Average train loss": avg_train_loss,
                    "Average train perplexity": train_ppl,
                    "Average val loss": avg_val_loss,
                    "Average val perplexity": val_ppl,
                    "Average test loss": avg_test_loss,
                    "Average test perplexity": test_ppl,
    }

    print("TRAIN AND TEST RESULTS")
    for k, v in results_dict.items():
        print(f"{k}: {v}\n")