diff --git "a/attnserver.run_attnserver.slurm.sh.343245.out.log" "b/attnserver.run_attnserver.slurm.sh.343245.out.log" --- "a/attnserver.run_attnserver.slurm.sh.343245.out.log" +++ "b/attnserver.run_attnserver.slurm.sh.343245.out.log" @@ -10460,3 +10460,5871 @@ batch tensor after cp: position_ids torch.Size([4, 24576]) Start exporting trace 1 Done exporting trace 1 [2025-06-21 21:59:32] iteration 2/ 10 | consumed samples: 2 | elapsed time per iteration (ms): 8143.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 2147483648.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +Start exporting trace 2 +Done exporting trace 2 + [2025-06-21 21:59:40] iteration 3/ 10 | consumed samples: 3 | elapsed time per iteration (ms): 8049.6 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 1073741824.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +Start exporting trace 3 +Done exporting trace 3 + [2025-06-21 21:59:48] iteration 4/ 10 | consumed samples: 4 | elapsed time per iteration (ms): 8001.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 536870912.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +Start exporting trace 4 +Done exporting trace 4 + [2025-06-21 21:59:56] iteration 5/ 10 | consumed samples: 5 | elapsed time per iteration (ms): 7905.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 268435456.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +Start exporting trace 5 +Done exporting trace 5 + [2025-06-21 22:00:05] iteration 6/ 10 | consumed samples: 6 | elapsed time per iteration (ms): 8923.1 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 134217728.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +Start exporting trace 6 +Done exporting trace 6 + [2025-06-21 22:00:13] iteration 7/ 10 | consumed samples: 7 | elapsed time per iteration (ms): 7931.2 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 67108864.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +Start exporting trace 7 +Done exporting trace 7 + [2025-06-21 22:00:21] iteration 8/ 10 | consumed samples: 8 | elapsed time per iteration (ms): 7942.3 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 33554432.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +Start exporting trace 8 +Done exporting trace 8 + [2025-06-21 22:00:29] iteration 9/ 10 | consumed samples: 9 | elapsed time per iteration (ms): 7917.7 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 16777216.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +Start exporting trace 9 +Done exporting trace 9 + [2025-06-21 22:00:37] iteration 10/ 10 | consumed samples: 10 | elapsed time per iteration (ms): 7971.0 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 8388608.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +[after training is done] datetime: 2025-06-21 22:00:37 +saving checkpoint at iteration 10 to gpt-checkpoint in torch_dist format +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.04561257362365723 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.04564857482910156 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.04565262794494629 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.04613518714904785 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.04616403579711914 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.04619407653808594 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.04644060134887695 to prepare state dict for ckpt +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.056025028228759766 to prepare state dict for ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(413138944), 0), (np.int64(424673280), 1), (np.int64(427819008), 2), (np.int64(411176960), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(413138944), 0), (np.int64(424673280), 1), (np.int64(427819008), 2), (np.int64(411176960), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(413138944), 0), (np.int64(424673280), 1), (np.int64(427819008), 2), (np.int64(411176960), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(413138944), 0), (np.int64(424673280), 1), (np.int64(427819008), 2), (np.int64(411176960), 3)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:Apply save parallelization +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(564133888), 0), (np.int64(578813952), 1), (np.int64(575668224), 2), (np.int64(562515968), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(564133888), 0), (np.int64(578813952), 1), (np.int64(575668224), 2), (np.int64(562515968), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(564133888), 0), (np.int64(578813952), 1), (np.int64(575668224), 2), (np.int64(562515968), 3)] +DEBUG:megatron.core.dist_checkpointing.exchange_utils:distribute_shards_to_ranks distribution: [(np.int64(564133888), 0), (np.int64(578813952), 1), (np.int64(575668224), 2), (np.int64(562515968), 3)] +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 2.8169000148773193 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 2.8168628215789795 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 2.8163509368896484 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 2.8172197341918945 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 2.816526174545288 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 2.8172411918640137 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 2.817450761795044 +DEBUG:megatron.core.dist_checkpointing.strategies.fully_parallel:parallel save sharding, time: 0.009677886962890625 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, starting state dict save +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:verifying reuse of global metadata +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:loaded global metadata reuse verification: no loaded plans passed +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 3, plan time: 0.005688905715942383 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 6, plan time: 0.004932403564453125 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 4, plan time: 0.006430149078369141 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 7, plan time: 0.005855083465576172 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543242.3079858 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 2, plan time: 0.006386756896972656 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543242.30799 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 5, plan time: 0.004456758499145508 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543242.3079956 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543242.3080049 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 1, plan time: 0.0018155574798583984 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543242.3080192 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543242.3080251 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543242.3080523 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.937980651855469e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.915496826171875e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 0.00010204315185546875 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 7.724761962890625e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.036064147949219e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 6.29425048828125e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 9.465217590332031e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:rank: 0, plan time: 0.007209062576293945 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:thread_count: 2, time: 1750543242.312125 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:bucket_prep, time: 5.936622619628906e-05 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.045163631439208984 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543242.3535993 rank: 7, write(async) time: 0.045597076416015625 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.04550313949584961 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543242.3539362 rank: 3, write(async) time: 0.04594683647155762 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.049059152603149414 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543242.3575194 rank: 1, write(async) time: 0.049468040466308594 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.049854278564453125 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543242.3583205 rank: 6, write(async) time: 0.05032849311828613 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05008840560913086 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543242.358562 rank: 2, write(async) time: 0.05054616928100586 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05020761489868164 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543242.3586745 rank: 4, write(async) time: 0.050679683685302734 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.050520896911621094 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543242.3630664 rank: 0, write(async) time: 0.05094408988952637 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:D2H and push, time: 0.05852317810058594 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543242.3669863 rank: 5, write(async) time: 0.05896353721618652 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 2.4318695068359375e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, takes 0.0288240909576416 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 1.71661376953125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, takes 0.029969215393066406 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 1.71661376953125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 282624, before: 1635717120, after: 1635999744 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, takes 0.030200719833374023 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 1.9073486328125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 1.5497207641601562e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, takes 0.026186466217041016 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 1.9550323486328125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, takes 0.037552833557128906 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, takes 0.02643609046936035 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 1.9550323486328125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 1.9550323486328125e-05 to finish D2H +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, takes 0.027039527893066406 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, takes 0.031565189361572266 to schedule async ckpt +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 0, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 1, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 3, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 7, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 2, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 5, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 4, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:rank: 6, joining self.process +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collecting worker results... +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 started +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 155566080, before: 1914781696, after: 2070347776 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 181293056, before: 1626681344, after: 1807974400 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 206708736, before: 1622102016, after: 1828810752 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 211714048, before: 1627856896, after: 1839570944 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 214089728, before: 1622085632, after: 1836175360 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 223186944, before: 1628082176, after: 1851269120 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 214794240, before: 1628073984, after: 1842868224 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 223248384, before: 1627856896, after: 1851105280 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543243.1629348, rank: 7, write(sync,parallel): 0.630255937576294 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543243.1737585, rank: 5, write(sync,parallel): 0.6171097755432129 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543243.1827302, rank: 3, write(sync,parallel): 0.6250243186950684 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.71s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.69s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.70s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 295759872, before: 1629749248, after: 1925509120 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 274182144, before: 1625473024, after: 1899655168 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1 consumed: 289927168, before: 1629749248, after: 1919676416 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 298848256, before: 1625473024, after: 1924321280 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 407842816, before: 1626689536, after: 2034532352 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 417767424, before: 1635717120, after: 2053484544 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543243.3934004, rank: 4, write(sync,parallel): 0.7756955623626709 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:0 consumed: 417566720, before: 1914781696, after: 2332348416 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543243.4126506, rank: 6, write(sync,parallel): 0.7899494171142578 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.84s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543243.4359648, rank: 2, write(sync,parallel): 0.9801404476165771 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543243.4363415, rank: 1, write(sync,parallel): 1.0347609519958496 +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:FileSystemWriterAsync: collected worker results successfully +DEBUG:megatron.core.dist_checkpointing.strategies.filesystem_async:1750543243.4428952, rank: 0, write(sync,parallel): 0.9484405517578125 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 0.87s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 1.10s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 1.06s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:TemporalAsyncCaller: Async process join finished after 1.02s from forking +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543243.4864204, 1, gather: 0.01407933235168457 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543243.486547, 2, gather: 0.006209135055541992 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543243.4865518, 4, gather: 0.05898118019104004 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543243.4866505, 6, gather: 0.030963659286499023 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543243.486751, 5, gather: 0.2680652141571045 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543243.4867768, 7, gather: 0.2749454975128174 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543243.486773, 3, gather: 0.2686605453491211 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543243.4879699, 0, gather: 0.0033025741577148438 +DEBUG:megatron.core.dist_checkpointing.strategies.state_dict_saver:1750543243.496697, metadata_write: 0.00859975814819336 +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0433s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0183s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0260s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.2892s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0708s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.0139s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.2800s +DEBUG:megatron.core.dist_checkpointing.strategies.async_utils:finalize took 0.2804s + successfully saved checkpoint from iteration 10 to gpt-checkpoint [ t 1/2, p 1/1 ] +DEBUG:megatron.training.checkpointing:rank: 1, takes 0.0026841163635253906 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 6, takes 0.002707958221435547 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 5, takes 0.0026688575744628906 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 4, takes 0.0026671886444091797 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 7, takes 0.0027217864990234375 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 2, takes 0.0026443004608154297 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 0, takes 0.0026667118072509766 to finalize ckpt save +DEBUG:megatron.training.checkpointing:rank: 3, takes 0.002665281295776367 to finalize ckpt save +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +Start exporting trace 10 +Done exporting trace 10 +(min, max) time across ranks (ms): + evaluate .......................................: (8718.53, 8719.09) +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +---------------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on validation set | lm loss value: 9.644366E+00 | lm loss PPL: 1.543459E+04 | +---------------------------------------------------------------------------------------------------------------- +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Evaluating on 1 samples +Evaluating iter 1/1 +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +batch tensor: tokens torch.Size([4, 98304]) +batch tensor: labels torch.Size([4, 98304]) +batch tensor: loss_mask torch.Size([4, 98304]) +batch tensor: attention_mask torch.Size([4, 1, 98304, 98304]) +batch tensor: position_ids torch.Size([4, 98304]) +batch tensor after cp: tokens torch.Size([4, 24576]) +batch tensor after cp: labels torch.Size([4, 24576]) +batch tensor after cp: loss_mask torch.Size([4, 24576]) +batch tensor after cp: attention_mask torch.Size([4, 1, 24576, 98304]) +batch tensor after cp: position_ids torch.Size([4, 24576]) +Start exporting trace 11 +Done exporting trace 11 +(min, max) time across ranks (ms): + evaluate .......................................: (5894.66, 5895.20) +---------------------------------------------------------------------------------------------------------- + validation loss at iteration 10 on test set | lm loss value: 9.644366E+00 | lm loss PPL: 1.543459E+04 | +---------------------------------------------------------------------------------------------------------- +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +WARNING:megatron.core.rerun_state_machine:Setting RerunStateMachine mode RerunMode.DISABLED +Running ctx_length=32768, TP_SIZE=2, CP_SIZE=4, BATCH_SIZE=4 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 32768 +TP_SIZE: 2 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 8, data-parallel size: 1, context-parallel size: 4, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 2, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 4 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 32768 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 2 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 8 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 32768 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 32768 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 2 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 8 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 175 dummy tokens (new size: 50432) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 2 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.044 seconds +WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.377 seconds +time to initialize megatron (seconds): 8.004 +[after megatron is initialized] datetime: 2025-06-21 22:01:42 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 413743104 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 413743104 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 413743104 + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 413743104 +>>> embedding +>>> decoder>>> embedding + +>>> output_layer +>>> embedding>>> decoder + +>>> output_layer +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 413743104 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 413743104 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 413743104 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 413743104 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (413743104 elements, 413743104 padded size): + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.final_layernorm.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.embedding.position_embeddings.weight + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (2.78, 3.02) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 22:01:44 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=32768, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.005300 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 2081 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001929 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 2080 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001615 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 2083 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 22:01:44 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (1525.65, 1542.95) + train/valid/test-data-iterators-setup ..........: (27.82, 150.73) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 22:01:44 +batch tensor: tokens torch.Size([4, 131072]) +batch tensor: labels torch.Size([4, 131072]) +batch tensor: loss_mask torch.Size([4, 131072]) +batch tensor: attention_mask torch.Size([4, 1, 131072, 131072]) +batch tensor: position_ids torch.Size([4, 131072]) +batch tensor after cp: tokens torch.Size([4, 32768]) +batch tensor after cp: labels torch.Size([4, 32768]) +batch tensor after cp: loss_mask torch.Size([4, 32768]) +batch tensor after cp: attention_mask torch.Size([4, 1, 32768, 131072]) +batch tensor after cp: position_ids torch.Size([4, 32768]) +batch tensor: tokens torch.Size([4, 131072]) +batch tensor: labels torch.Size([4, 131072]) +batch tensor: loss_mask torch.Size([4, 131072]) +batch tensor: attention_mask torch.Size([4, 1, 131072, 131072]) +batch tensor: position_ids torch.Size([4, 131072]) +batch tensor: tokens torch.Size([4, 131072]) +batch tensor: labels torch.Size([4, 131072]) +batch tensor: loss_mask torch.Size([4, 131072]) +batch tensor: attention_mask torch.Size([4, 1, 131072, 131072]) +batch tensor: position_ids torch.Size([4, 131072]) +batch tensor after cp: tokens torch.Size([4, 32768]) +batch tensor after cp: labels torch.Size([4, 32768]) +batch tensor after cp: loss_mask torch.Size([4, 32768]) +batch tensor after cp: attention_mask torch.Size([4, 1, 32768, 131072]) +batch tensor after cp: position_ids torch.Size([4, 32768]) +batch tensor: tokens torch.Size([4, 131072]) +batch tensor: labels torch.Size([4, 131072]) +batch tensor: loss_mask torch.Size([4, 131072]) +batch tensor: attention_mask torch.Size([4, 1, 131072, 131072]) +batch tensor: position_ids torch.Size([4, 131072]) +batch tensor after cp: tokens torch.Size([4, 32768]) +batch tensor after cp: labels torch.Size([4, 32768]) +batch tensor after cp: loss_mask torch.Size([4, 32768]) +batch tensor after cp: attention_mask torch.Size([4, 1, 32768, 131072]) +batch tensor after cp: position_ids torch.Size([4, 32768]) +batch tensor after cp: tokens torch.Size([4, 32768]) +batch tensor after cp: labels torch.Size([4, 32768]) +batch tensor after cp: loss_mask torch.Size([4, 32768]) +batch tensor after cp: attention_mask torch.Size([4, 1, 32768, 131072]) +batch tensor after cp: position_ids torch.Size([4, 32768]) +batch tensor: tokens torch.Size([4, 131072]) +batch tensor: labels torch.Size([4, 131072]) +batch tensor: loss_mask torch.Size([4, 131072]) +batch tensor: attention_mask torch.Size([4, 1, 131072, 131072]) +batch tensor: position_ids torch.Size([4, 131072]) +batch tensor: tokens torch.Size([4, 131072]) +batch tensor: labels torch.Size([4, 131072]) +batch tensor: loss_mask torch.Size([4, 131072]) +batch tensor: attention_mask torch.Size([4, 1, 131072, 131072]) +batch tensor: position_ids torch.Size([4, 131072]) +batch tensor: tokens torch.Size([4, 131072]) +batch tensor: labels torch.Size([4, 131072]) +batch tensor: loss_mask torch.Size([4, 131072]) +batch tensor: attention_mask torch.Size([4, 1, 131072, 131072]) +batch tensor: position_ids torch.Size([4, 131072]) +batch tensor: tokens torch.Size([4, 131072]) +batch tensor: labels torch.Size([4, 131072]) +batch tensor: loss_mask torch.Size([4, 131072]) +batch tensor: attention_mask torch.Size([4, 1, 131072, 131072]) +batch tensor: position_ids torch.Size([4, 131072]) +batch tensor after cp: tokens torch.Size([4, 32768]) +batch tensor after cp: labels torch.Size([4, 32768]) +batch tensor after cp: loss_mask torch.Size([4, 32768]) +batch tensor after cp: attention_mask torch.Size([4, 1, 32768, 131072]) +batch tensor after cp: position_ids torch.Size([4, 32768]) +batch tensor after cp: tokens torch.Size([4, 32768]) +batch tensor after cp: labels torch.Size([4, 32768]) +batch tensor after cp: loss_mask torch.Size([4, 32768]) +batch tensor after cp: attention_mask torch.Size([4, 1, 32768, 131072]) +batch tensor after cp: position_ids torch.Size([4, 32768]) +batch tensor after cp: tokens torch.Size([4, 32768]) +batch tensor after cp: labels torch.Size([4, 32768]) +batch tensor after cp: loss_mask torch.Size([4, 32768]) +batch tensor after cp: attention_mask torch.Size([4, 1, 32768, 131072]) +batch tensor after cp: position_ids torch.Size([4, 32768]) +batch tensor after cp: tokens torch.Size([4, 32768]) +batch tensor after cp: labels torch.Size([4, 32768]) +batch tensor after cp: loss_mask torch.Size([4, 32768]) +batch tensor after cp: attention_mask torch.Size([4, 1, 32768, 131072]) +batch tensor after cp: position_ids torch.Size([4, 32768]) +Start exporting trace 0 +Done exporting trace 0 + [2025-06-21 22:02:22] iteration 1/ 10 | consumed samples: 1 | elapsed time per iteration (ms): 38085.9 | learning rate: 0.000000E+00 | global batch size: 1 | loss scale: 4294967296.0 | number of skipped iterations: 1 | number of nan iterations: 0 | +Number of parameters in transformer block in billions: 0.35 +Number of parameters in embedding layers in billions: 0.21 +Total number of parameters in billions: 0.56 +Number of parameters in most loaded shard in billions: 0.2795 +Theoretical memory footprints: weight and optimizer=4797.35 MB +[Rank 1] (after 1 iterations) memory (MB) | allocated: 86938.26806640625 | max allocated: 127666.71728515625 | reserved: 131928.0 | max reserved: 131928.0 +[Rank 7] (after 1 iterations) memory (MB) | allocated: 86938.26806640625 | max allocated: 127666.71728515625 | reserved: 133776.0 | max reserved: 133776.0 +[Rank 2] (after 1 iterations) memory (MB) | allocated: 86938.26806640625 | max allocated: 127666.71728515625 | reserved: 132212.0 | max reserved: 132212.0[Rank 3] (after 1 iterations) memory (MB) | allocated: 86938.26806640625 | max allocated: 127666.71728515625 | reserved: 133236.0 | max reserved: 133236.0 + +[Rank 5] (after 1 iterations) memory (MB) | allocated: 86938.26806640625 | max allocated: 127666.71728515625 | reserved: 133536.0 | max reserved: 133536.0 +[Rank 4] (after 1 iterations) memory (MB) | allocated: 86938.26806640625 | max allocated: 127666.71728515625 | reserved: 133024.0 | max reserved: 133024.0 +[Rank 6] (after 1 iterations) memory (MB) | allocated: 86938.26806640625 | max allocated: 127666.71728515625 | reserved: 132752.0 | max reserved: 132752.0 +[Rank 0] (after 1 iterations) memory (MB) | allocated: 86938.26806640625 | max allocated: 127666.71728515625 | reserved: 131928.0 | max reserved: 131928.0 +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 64.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 52.11 GiB is free. Including non-PyTorch memory, this process has 87.69 GiB memory in use. Of the allocated memory 83.36 GiB is allocated by PyTorch, and 70.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 64.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 52.11 GiB is free. Including non-PyTorch memory, this process has 87.69 GiB memory in use. Of the allocated memory 83.36 GiB is allocated by PyTorch, and 70.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 64.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 52.09 GiB is free. Including non-PyTorch memory, this process has 87.69 GiB memory in use. Of the allocated memory 83.36 GiB is allocated by PyTorch, and 70.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 64.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 52.09 GiB is free. Including non-PyTorch memory, this process has 87.69 GiB memory in use. Of the allocated memory 83.36 GiB is allocated by PyTorch, and 70.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 64.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 51.99 GiB is free. Including non-PyTorch memory, this process has 87.79 GiB memory in use. Of the allocated memory 83.36 GiB is allocated by PyTorch, and 170.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 64.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 51.99 GiB is free. Including non-PyTorch memory, this process has 87.79 GiB memory in use. Of the allocated memory 83.36 GiB is allocated by PyTorch, and 170.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 64.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 51.80 GiB is free. Including non-PyTorch memory, this process has 88.00 GiB memory in use. Of the allocated memory 83.36 GiB is allocated by PyTorch, and 394.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 64.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 51.80 GiB is free. Including non-PyTorch memory, this process has 88.00 GiB memory in use. Of the allocated memory 83.36 GiB is allocated by PyTorch, and 394.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 64.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 52.02 GiB is free. Including non-PyTorch memory, this process has 87.79 GiB memory in use. Of the allocated memory 83.36 GiB is allocated by PyTorch, and 170.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 64.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 52.02 GiB is free. Including non-PyTorch memory, this process has 87.79 GiB memory in use. Of the allocated memory 83.36 GiB is allocated by PyTorch, and 170.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 64.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 51.77 GiB is free. Including non-PyTorch memory, this process has 88.00 GiB memory in use. Of the allocated memory 83.36 GiB is allocated by PyTorch, and 394.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 64.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 51.77 GiB is free. Including non-PyTorch memory, this process has 88.00 GiB memory in use. Of the allocated memory 83.36 GiB is allocated by PyTorch, and 394.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 64.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 52.09 GiB is free. Including non-PyTorch memory, this process has 87.69 GiB memory in use. Of the allocated memory 83.36 GiB is allocated by PyTorch, and 70.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 64.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 52.09 GiB is free. Including non-PyTorch memory, this process has 87.69 GiB memory in use. Of the allocated memory 83.36 GiB is allocated by PyTorch, and 70.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 64.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 52.11 GiB is free. Including non-PyTorch memory, this process has 87.69 GiB memory in use. Of the allocated memory 83.36 GiB is allocated by PyTorch, and 70.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 64.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 52.11 GiB is free. Including non-PyTorch memory, this process has 87.69 GiB memory in use. Of the allocated memory 83.36 GiB is allocated by PyTorch, and 70.04 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +Running ctx_length=40960, TP_SIZE=2, CP_SIZE=4, BATCH_SIZE=4 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 40960 +TP_SIZE: 2 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 8, data-parallel size: 1, context-parallel size: 4, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 2, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 4 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 40960 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 2 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 8 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 40960 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 40960 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 2 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 8 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... + > padded vocab (size: 50257) with 175 dummy tokens (new size: 50432) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 2 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.046 seconds +WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.212 seconds +time to initialize megatron (seconds): 7.207 +[after megatron is initialized] datetime: 2025-06-21 22:03:05 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 447297536 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 447297536 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 447297536 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 447297536 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 447297536 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 447297536 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 447297536 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (447297536 elements, 447297536 padded size): + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.embedding.position_embeddings.weight + module.embedding.word_embeddings.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight +>>> embedding +>>> decoder +>>> output_layer +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 447297536 +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (8.04, 10.66) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 22:03:07 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=40960, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.005023 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 1664 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001837 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 1664 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001578 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 1667 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 22:03:07 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (2032.71, 2033.21) + train/valid/test-data-iterators-setup ..........: (30.07, 153.35) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 22:03:07 +batch tensor: tokens torch.Size([4, 163840]) +batch tensor: labels torch.Size([4, 163840]) +batch tensor: loss_mask torch.Size([4, 163840]) +batch tensor: attention_mask torch.Size([4, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([4, 163840]) +batch tensor after cp: tokens torch.Size([4, 40960]) +batch tensor after cp: labels torch.Size([4, 40960]) +batch tensor after cp: loss_mask torch.Size([4, 40960]) +batch tensor after cp: attention_mask torch.Size([4, 1, 40960, 163840]) +batch tensor after cp: position_ids torch.Size([4, 40960]) +batch tensor: tokens torch.Size([4, 163840]) +batch tensor: labels torch.Size([4, 163840]) +batch tensor: loss_mask torch.Size([4, 163840]) +batch tensor: attention_mask torch.Size([4, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([4, 163840]) +batch tensor after cp: tokens torch.Size([4, 40960]) +batch tensor after cp: labels torch.Size([4, 40960]) +batch tensor after cp: loss_mask torch.Size([4, 40960]) +batch tensor after cp: attention_mask torch.Size([4, 1, 40960, 163840]) +batch tensor after cp: position_ids torch.Size([4, 40960]) +batch tensor: tokens torch.Size([4, 163840]) +batch tensor: labels torch.Size([4, 163840]) +batch tensor: loss_mask torch.Size([4, 163840]) +batch tensor: attention_mask torch.Size([4, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([4, 163840]) +batch tensor after cp: tokens torch.Size([4, 40960]) +batch tensor after cp: labels torch.Size([4, 40960]) +batch tensor after cp: loss_mask torch.Size([4, 40960]) +batch tensor after cp: attention_mask torch.Size([4, 1, 40960, 163840]) +batch tensor after cp: position_ids torch.Size([4, 40960]) +batch tensor: tokens torch.Size([4, 163840]) +batch tensor: labels torch.Size([4, 163840]) +batch tensor: loss_mask torch.Size([4, 163840]) +batch tensor: attention_mask torch.Size([4, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([4, 163840]) +batch tensor after cp: tokens torch.Size([4, 40960]) +batch tensor after cp: labels torch.Size([4, 40960]) +batch tensor after cp: loss_mask torch.Size([4, 40960]) +batch tensor after cp: attention_mask torch.Size([4, 1, 40960, 163840]) +batch tensor after cp: position_ids torch.Size([4, 40960]) +batch tensor: tokens torch.Size([4, 163840]) +batch tensor: labels torch.Size([4, 163840]) +batch tensor: loss_mask torch.Size([4, 163840]) +batch tensor: attention_mask torch.Size([4, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([4, 163840]) +batch tensor: tokens torch.Size([4, 163840]) +batch tensor: labels torch.Size([4, 163840]) +batch tensor: loss_mask torch.Size([4, 163840]) +batch tensor: attention_mask torch.Size([4, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([4, 163840]) +batch tensor after cp: tokens torch.Size([4, 40960]) +batch tensor after cp: labels torch.Size([4, 40960]) +batch tensor after cp: loss_mask torch.Size([4, 40960]) +batch tensor after cp: attention_mask torch.Size([4, 1, 40960, 163840]) +batch tensor after cp: position_ids torch.Size([4, 40960]) +batch tensor after cp: tokens torch.Size([4, 40960]) +batch tensor after cp: labels torch.Size([4, 40960]) +batch tensor after cp: loss_mask torch.Size([4, 40960]) +batch tensor after cp: attention_mask torch.Size([4, 1, 40960, 163840]) +batch tensor after cp: position_ids torch.Size([4, 40960]) +batch tensor: tokens torch.Size([4, 163840]) +batch tensor: labels torch.Size([4, 163840]) +batch tensor: loss_mask torch.Size([4, 163840]) +batch tensor: attention_mask torch.Size([4, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([4, 163840]) +batch tensor: tokens torch.Size([4, 163840]) +batch tensor: labels torch.Size([4, 163840]) +batch tensor: loss_mask torch.Size([4, 163840]) +batch tensor: attention_mask torch.Size([4, 1, 163840, 163840]) +batch tensor: position_ids torch.Size([4, 163840]) +batch tensor after cp: tokens torch.Size([4, 40960]) +batch tensor after cp: labels torch.Size([4, 40960]) +batch tensor after cp: loss_mask torch.Size([4, 40960]) +batch tensor after cp: attention_mask torch.Size([4, 1, 40960, 163840]) +batch tensor after cp: position_ids torch.Size([4, 40960]) +batch tensor after cp: tokens torch.Size([4, 40960]) +batch tensor after cp: labels torch.Size([4, 40960]) +batch tensor after cp: loss_mask torch.Size([4, 40960]) +batch tensor after cp: attention_mask torch.Size([4, 1, 40960, 163840]) +batch tensor after cp: position_ids torch.Size([4, 40960]) +Running ctx_length=49152, TP_SIZE=2, CP_SIZE=4, BATCH_SIZE=4 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 49152 +TP_SIZE: 2 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +using world size: 8, data-parallel size: 1, context-parallel size: 4, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 2, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 4 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 49152 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 2 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 8 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 49152 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 49152 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 2 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 8 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 175 dummy tokens (new size: 50432) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 2 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.043 seconds +WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.163 seconds +time to initialize megatron (seconds): 7.576 +[after megatron is initialized] datetime: 2025-06-21 22:04:22 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 480851968 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 480851968 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 480851968 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 480851968 +>>> embedding +>>> decoder +>>> output_layer +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 480851968 + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 480851968 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 480851968 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (480851968 elements, 480851968 padded size): + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.embedding.position_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.embedding.word_embeddings.weight + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.final_layernorm.weight + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.bias +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 480851968 +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (3.76, 4.05) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 22:04:25 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=49152, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.006778 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 1387 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001742 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 1386 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001549 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 1389 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 22:04:25 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (2212.80, 2234.82) + train/valid/test-data-iterators-setup ..........: (34.78, 143.42) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 22:04:25 +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 144.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 134.55 GiB is free. Including non-PyTorch memory, this process has 5.25 GiB memory in use. Of the allocated memory 3.60 GiB is allocated by PyTorch, and 200.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 144.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 134.55 GiB is free. Including non-PyTorch memory, this process has 5.25 GiB memory in use. Of the allocated memory 3.60 GiB is allocated by PyTorch, and 200.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 144.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 134.55 GiB is free. Including non-PyTorch memory, this process has 5.25 GiB memory in use. Of the allocated memory 3.60 GiB is allocated by PyTorch, and 200.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 144.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 134.55 GiB is free. Including non-PyTorch memory, this process has 5.25 GiB memory in use. Of the allocated memory 3.60 GiB is allocated by PyTorch, and 200.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 144.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 134.55 GiB is free. Including non-PyTorch memory, this process has 5.25 GiB memory in use. Of the allocated memory 3.60 GiB is allocated by PyTorch, and 200.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 144.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 134.55 GiB is free. Including non-PyTorch memory, this process has 5.25 GiB memory in use. Of the allocated memory 3.60 GiB is allocated by PyTorch, and 200.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 144.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 134.55 GiB is free. Including non-PyTorch memory, this process has 5.25 GiB memory in use. Of the allocated memory 3.60 GiB is allocated by PyTorch, and 200.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 144.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 134.55 GiB is free. Including non-PyTorch memory, this process has 5.25 GiB memory in use. Of the allocated memory 3.60 GiB is allocated by PyTorch, and 200.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 144.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 134.55 GiB is free. Including non-PyTorch memory, this process has 5.25 GiB memory in use. Of the allocated memory 3.60 GiB is allocated by PyTorch, and 200.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 144.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 134.55 GiB is free. Including non-PyTorch memory, this process has 5.25 GiB memory in use. Of the allocated memory 3.60 GiB is allocated by PyTorch, and 200.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 144.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 134.55 GiB is free. Including non-PyTorch memory, this process has 5.25 GiB memory in use. Of the allocated memory 3.60 GiB is allocated by PyTorch, and 200.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 144.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 134.55 GiB is free. Including non-PyTorch memory, this process has 5.25 GiB memory in use. Of the allocated memory 3.60 GiB is allocated by PyTorch, and 200.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 144.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 134.55 GiB is free. Including non-PyTorch memory, this process has 5.25 GiB memory in use. Of the allocated memory 3.60 GiB is allocated by PyTorch, and 200.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 144.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 134.55 GiB is free. Including non-PyTorch memory, this process has 5.25 GiB memory in use. Of the allocated memory 3.60 GiB is allocated by PyTorch, and 200.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 144.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 134.55 GiB is free. Including non-PyTorch memory, this process has 5.25 GiB memory in use. Of the allocated memory 3.60 GiB is allocated by PyTorch, and 200.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 144.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 134.55 GiB is free. Including non-PyTorch memory, this process has 5.25 GiB memory in use. Of the allocated memory 3.60 GiB is allocated by PyTorch, and 200.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +Running ctx_length=65536, TP_SIZE=2, CP_SIZE=4, BATCH_SIZE=4 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 65536 +TP_SIZE: 2 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +using world size: 8, data-parallel size: 1, context-parallel size: 4, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 2, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 4 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 65536 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 2 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 8 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 65536 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 65536 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 2 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 8 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... + > padded vocab (size: 50257) with 175 dummy tokens (new size: 50432) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 2 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.129 seconds +WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.180 seconds +time to initialize megatron (seconds): 7.564 +[after megatron is initialized] datetime: 2025-06-21 22:05:02 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 547960832 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 547960832 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 547960832 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 547960832 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 547960832 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 547960832 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 547960832 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 547960832 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (547960832 elements, 547960832 padded size): + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.bias + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.embedding.word_embeddings.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.embedding.position_embeddings.weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (2.89, 3.26) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 22:05:05 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=65536, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.005574 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 1040 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001706 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 1040 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001506 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 1041 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 22:05:05 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (2985.26, 3006.56) + train/valid/test-data-iterators-setup ..........: (20.46, 142.95) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 22:05:05 +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 256.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 133.80 GiB is free. Including non-PyTorch memory, this process has 6.00 GiB memory in use. Of the allocated memory 4.10 GiB is allocated by PyTorch, and 451.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 256.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 133.80 GiB is free. Including non-PyTorch memory, this process has 6.00 GiB memory in use. Of the allocated memory 4.10 GiB is allocated by PyTorch, and 451.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 256.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 133.80 GiB is free. Including non-PyTorch memory, this process has 6.00 GiB memory in use. Of the allocated memory 4.10 GiB is allocated by PyTorch, and 451.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 256.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 133.80 GiB is free. Including non-PyTorch memory, this process has 6.00 GiB memory in use. Of the allocated memory 4.10 GiB is allocated by PyTorch, and 451.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 256.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 133.80 GiB is free. Including non-PyTorch memory, this process has 6.00 GiB memory in use. Of the allocated memory 4.10 GiB is allocated by PyTorch, and 451.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 256.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 133.80 GiB is free. Including non-PyTorch memory, this process has 6.00 GiB memory in use. Of the allocated memory 4.10 GiB is allocated by PyTorch, and 451.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 256.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 133.80 GiB is free. Including non-PyTorch memory, this process has 6.00 GiB memory in use. Of the allocated memory 4.10 GiB is allocated by PyTorch, and 451.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 256.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 133.80 GiB is free. Including non-PyTorch memory, this process has 6.00 GiB memory in use. Of the allocated memory 4.10 GiB is allocated by PyTorch, and 451.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 256.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 133.80 GiB is free. Including non-PyTorch memory, this process has 6.00 GiB memory in use. Of the allocated memory 4.10 GiB is allocated by PyTorch, and 451.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 256.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 133.80 GiB is free. Including non-PyTorch memory, this process has 6.00 GiB memory in use. Of the allocated memory 4.10 GiB is allocated by PyTorch, and 451.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 256.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 133.80 GiB is free. Including non-PyTorch memory, this process has 6.00 GiB memory in use. Of the allocated memory 4.10 GiB is allocated by PyTorch, and 451.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 256.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 133.80 GiB is free. Including non-PyTorch memory, this process has 6.00 GiB memory in use. Of the allocated memory 4.10 GiB is allocated by PyTorch, and 451.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 256.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 133.80 GiB is free. Including non-PyTorch memory, this process has 6.00 GiB memory in use. Of the allocated memory 4.10 GiB is allocated by PyTorch, and 451.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 256.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 133.80 GiB is free. Including non-PyTorch memory, this process has 6.00 GiB memory in use. Of the allocated memory 4.10 GiB is allocated by PyTorch, and 451.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 256.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 133.80 GiB is free. Including non-PyTorch memory, this process has 6.00 GiB memory in use. Of the allocated memory 4.10 GiB is allocated by PyTorch, and 451.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 256.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 133.80 GiB is free. Including non-PyTorch memory, this process has 6.00 GiB memory in use. Of the allocated memory 4.10 GiB is allocated by PyTorch, and 451.54 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +Running ctx_length=81920, TP_SIZE=2, CP_SIZE=4, BATCH_SIZE=4 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 81920 +TP_SIZE: 2 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 8, data-parallel size: 1, context-parallel size: 4, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 2, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 4 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 81920 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 2 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 8 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 81920 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 81920 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 2 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 8 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 175 dummy tokens (new size: 50432) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +INFO:megatron.training.initialize:Setting logging level to 0 +> initialized tensor model parallel with size 2 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.055 seconds +WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.108 seconds +time to initialize megatron (seconds): 7.322 +[after megatron is initialized] datetime: 2025-06-21 22:05:41 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 615069696 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 615069696 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 615069696 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 615069696 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 615069696 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 615069696 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 615069696 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 615069696 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (615069696 elements, 615069696 padded size): + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.embedding.word_embeddings.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc1.bias + module.embedding.position_embeddings.weight + module.decoder.final_layernorm.weight + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (8.88, 9.08) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 22:05:44 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=81920, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.006123 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 832 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001822 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 832 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001519 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 833 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 22:05:45 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (3626.64, 3655.94) + train/valid/test-data-iterators-setup ..........: (22.05, 151.21) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 22:05:45 +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 400.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 133.50 GiB is free. Including non-PyTorch memory, this process has 6.31 GiB memory in use. Of the allocated memory 4.61 GiB is allocated by PyTorch, and 248.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 400.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 133.50 GiB is free. Including non-PyTorch memory, this process has 6.31 GiB memory in use. Of the allocated memory 4.61 GiB is allocated by PyTorch, and 248.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 400.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 133.50 GiB is free. Including non-PyTorch memory, this process has 6.31 GiB memory in use. Of the allocated memory 4.61 GiB is allocated by PyTorch, and 248.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 400.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 133.50 GiB is free. Including non-PyTorch memory, this process has 6.31 GiB memory in use. Of the allocated memory 4.61 GiB is allocated by PyTorch, and 248.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 400.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 133.50 GiB is free. Including non-PyTorch memory, this process has 6.31 GiB memory in use. Of the allocated memory 4.61 GiB is allocated by PyTorch, and 248.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 400.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 133.50 GiB is free. Including non-PyTorch memory, this process has 6.31 GiB memory in use. Of the allocated memory 4.61 GiB is allocated by PyTorch, and 248.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 400.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 133.50 GiB is free. Including non-PyTorch memory, this process has 6.31 GiB memory in use. Of the allocated memory 4.61 GiB is allocated by PyTorch, and 248.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 400.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 133.50 GiB is free. Including non-PyTorch memory, this process has 6.31 GiB memory in use. Of the allocated memory 4.61 GiB is allocated by PyTorch, and 248.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 400.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 133.50 GiB is free. Including non-PyTorch memory, this process has 6.31 GiB memory in use. Of the allocated memory 4.61 GiB is allocated by PyTorch, and 248.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 400.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 133.50 GiB is free. Including non-PyTorch memory, this process has 6.31 GiB memory in use. Of the allocated memory 4.61 GiB is allocated by PyTorch, and 248.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 400.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 133.50 GiB is free. Including non-PyTorch memory, this process has 6.31 GiB memory in use. Of the allocated memory 4.61 GiB is allocated by PyTorch, and 248.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 400.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 133.50 GiB is free. Including non-PyTorch memory, this process has 6.31 GiB memory in use. Of the allocated memory 4.61 GiB is allocated by PyTorch, and 248.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 400.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 133.50 GiB is free. Including non-PyTorch memory, this process has 6.31 GiB memory in use. Of the allocated memory 4.61 GiB is allocated by PyTorch, and 248.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 400.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 133.50 GiB is free. Including non-PyTorch memory, this process has 6.31 GiB memory in use. Of the allocated memory 4.61 GiB is allocated by PyTorch, and 248.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 400.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 133.50 GiB is free. Including non-PyTorch memory, this process has 6.31 GiB memory in use. Of the allocated memory 4.61 GiB is allocated by PyTorch, and 248.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 400.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 133.50 GiB is free. Including non-PyTorch memory, this process has 6.31 GiB memory in use. Of the allocated memory 4.61 GiB is allocated by PyTorch, and 248.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +Running ctx_length=98304, TP_SIZE=2, CP_SIZE=4, BATCH_SIZE=4 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 98304 +TP_SIZE: 2 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 8, data-parallel size: 1, context-parallel size: 4, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 2, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 4 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 98304 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 2 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 8 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 98304 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 98304 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 2 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 8 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 175 dummy tokens (new size: 50432) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +> initialized tensor model parallel with size 2 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.045 seconds +WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.230 seconds +time to initialize megatron (seconds): 7.346 +[after megatron is initialized] datetime: 2025-06-21 22:06:21 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 682178560 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 682178560 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 682178560 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 682178560 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 682178560 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 682178560 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 682178560 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 682178560 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (682178560 elements, 682178560 padded size): + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.embedding.position_embeddings.weight + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc2.weight + module.decoder.layers.0.mlp.linear_fc1.bias + module.embedding.word_embeddings.weight + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.self_attention.linear_qkv.bias +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (2.96, 4.79) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 22:06:27 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=98304, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.007009 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 693 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001711 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 693 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001384 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 694 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 22:06:27 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (5741.55, 5742.63) + train/valid/test-data-iterators-setup ..........: (27.16, 172.79) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 22:06:27 +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 576.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 132.94 GiB is free. Including non-PyTorch memory, this process has 6.86 GiB memory in use. Of the allocated memory 5.11 GiB is allocated by PyTorch, and 302.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 576.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 132.94 GiB is free. Including non-PyTorch memory, this process has 6.86 GiB memory in use. Of the allocated memory 5.11 GiB is allocated by PyTorch, and 302.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 576.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 132.94 GiB is free. Including non-PyTorch memory, this process has 6.86 GiB memory in use. Of the allocated memory 5.11 GiB is allocated by PyTorch, and 302.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 576.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 132.94 GiB is free. Including non-PyTorch memory, this process has 6.86 GiB memory in use. Of the allocated memory 5.11 GiB is allocated by PyTorch, and 302.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 576.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 132.94 GiB is free. Including non-PyTorch memory, this process has 6.86 GiB memory in use. Of the allocated memory 5.11 GiB is allocated by PyTorch, and 302.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 576.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 132.94 GiB is free. Including non-PyTorch memory, this process has 6.86 GiB memory in use. Of the allocated memory 5.11 GiB is allocated by PyTorch, and 302.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 576.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 132.94 GiB is free. Including non-PyTorch memory, this process has 6.86 GiB memory in use. Of the allocated memory 5.11 GiB is allocated by PyTorch, and 302.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 576.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 132.94 GiB is free. Including non-PyTorch memory, this process has 6.86 GiB memory in use. Of the allocated memory 5.11 GiB is allocated by PyTorch, and 302.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 576.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 132.94 GiB is free. Including non-PyTorch memory, this process has 6.86 GiB memory in use. Of the allocated memory 5.11 GiB is allocated by PyTorch, and 302.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 576.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 132.94 GiB is free. Including non-PyTorch memory, this process has 6.86 GiB memory in use. Of the allocated memory 5.11 GiB is allocated by PyTorch, and 302.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 576.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 132.94 GiB is free. Including non-PyTorch memory, this process has 6.86 GiB memory in use. Of the allocated memory 5.11 GiB is allocated by PyTorch, and 302.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 576.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 132.94 GiB is free. Including non-PyTorch memory, this process has 6.86 GiB memory in use. Of the allocated memory 5.11 GiB is allocated by PyTorch, and 302.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 576.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 132.94 GiB is free. Including non-PyTorch memory, this process has 6.86 GiB memory in use. Of the allocated memory 5.11 GiB is allocated by PyTorch, and 302.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 576.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 132.94 GiB is free. Including non-PyTorch memory, this process has 6.86 GiB memory in use. Of the allocated memory 5.11 GiB is allocated by PyTorch, and 302.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 576.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 132.94 GiB is free. Including non-PyTorch memory, this process has 6.86 GiB memory in use. Of the allocated memory 5.11 GiB is allocated by PyTorch, and 302.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 576.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 132.94 GiB is free. Including non-PyTorch memory, this process has 6.86 GiB memory in use. Of the allocated memory 5.11 GiB is allocated by PyTorch, and 302.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +Running ctx_length=131072, TP_SIZE=2, CP_SIZE=4, BATCH_SIZE=4 +Cleaning up checkpoint directory: gpt-checkpoint +-------------------------------- +CTX_LENGTH: 131072 +TP_SIZE: 2 +CP_SIZE: 4 +CHECKPOINT_PATH: gpt-checkpoint +PWD: /mnt/weka/home/hao.zhang/junda/attnserver-megatron +-------------------------------- +/mnt/weka/home/hao.zhang/conda/miniconda/envs/junda-attnserver/bin/python3 +INFO:megatron.training.initialize:Setting logging level to 0 +using world size: 8, data-parallel size: 1, context-parallel size: 4, hierarchical context-parallel sizes: Nonetensor-model-parallel size: 2, encoder-tensor-model-parallel size: 0, pipeline-model-parallel size: 1, encoder-pipeline-model-parallel size: 0 +Number of virtual stages per pipeline stage: None +WARNING: Setting args.check_for_nan_in_loss_and_grad to False since dynamic loss scaling is being used +using torch.float16 for parameters ... +------------------------ arguments ------------------------ + account_for_embedding_in_pipeline_split ......... False + account_for_loss_in_pipeline_split .............. False + accumulate_allreduce_grads_in_fp32 .............. False + adam_beta1 ...................................... 0.9 + adam_beta2 ...................................... 0.999 + adam_eps ........................................ 1e-08 + add_bias_linear ................................. True + add_position_embedding .......................... True + add_qkv_bias .................................... True + adlr_autoresume ................................. False + adlr_autoresume_interval ........................ 1000 + align_grad_reduce ............................... True + align_param_gather .............................. False + app_tag_run_name ................................ None + app_tag_run_version ............................. 0.0.0 + apply_layernorm_1p .............................. False + apply_query_key_layer_scaling ................... False + apply_residual_connection_post_layernorm ........ False + apply_rope_fusion ............................... False + async_save ...................................... None + async_tensor_model_parallel_allreduce ........... True + attention_backend ............................... AttnBackend.auto + attention_dropout ............................... 0.1 + attention_softmax_in_fp32 ....................... False + auto_detect_ckpt_format ......................... False + barrier_with_L1_time ............................ True + bert_binary_head ................................ True + bert_embedder_type .............................. megatron + bert_load ....................................... None + bf16 ............................................ False + bias_dropout_fusion ............................. True + bias_gelu_fusion ................................ True + bias_swiglu_fusion .............................. True + biencoder_projection_dim ........................ 0 + biencoder_shared_query_context_model ............ False + block_data_path ................................. None + calc_ft_timeouts ................................ False + calculate_per_token_loss ........................ False + check_for_large_grads ........................... False + check_for_nan_in_loss_and_grad .................. False + check_for_spiky_loss ............................ False + check_weight_hash_across_dp_replicas_interval ... None + ckpt_assume_constant_structure .................. False + ckpt_convert_format ............................. None + ckpt_convert_save ............................... None + ckpt_convert_update_legacy_dist_opt_format ...... False + ckpt_format ..................................... torch_dist + ckpt_fully_parallel_load ........................ False + ckpt_fully_parallel_save ........................ True + ckpt_fully_parallel_save_deprecated ............. False + ckpt_step ....................................... None + classes_fraction ................................ 1.0 + clip_grad ....................................... 1.0 + clone_scatter_output_in_embedding ............... True + config_logger_dir ............................... + consumed_train_samples .......................... 0 + consumed_valid_samples .......................... 0 + context_parallel_size ........................... 4 + cp_comm_type .................................... ['p2p'] + create_attention_mask_in_dataloader ............. True + cross_entropy_fusion_impl ....................... native + cross_entropy_loss_fusion ....................... False + cuda_graph_scope ................................ full + cuda_graph_warmup_steps ......................... 3 + data_args_path .................................. None + data_cache_path ................................. None + data_parallel_random_init ....................... False + data_parallel_sharding_strategy ................. no_shard + data_parallel_size .............................. 1 + data_path ....................................... None + data_per_class_fraction ......................... 1.0 + data_sharding ................................... True + dataloader_type ................................. single + ddp_average_in_collective ....................... False + ddp_bucket_size ................................. None + ddp_num_buckets ................................. None + ddp_pad_buckets_for_high_nccl_busbw ............. False + decoder_first_pipeline_num_layers ............... None + decoder_last_pipeline_num_layers ................ None + decoder_num_layers .............................. None + decoder_seq_length .............................. None + decoupled_lr .................................... None + decoupled_min_lr ................................ None + decrease_batch_size_if_needed ................... False + defer_embedding_wgrad_compute ................... False + deprecated_use_mcore_models ..................... False + deterministic_mode .............................. False + dino_bottleneck_size ............................ 256 + dino_freeze_last_layer .......................... 1 + dino_head_hidden_size ........................... 2048 + dino_local_crops_number ......................... 10 + dino_local_img_size ............................. 96 + dino_norm_last_layer ............................ False + dino_teacher_temp ............................... 0.07 + dino_warmup_teacher_temp ........................ 0.04 + dino_warmup_teacher_temp_epochs ................. 30 + disable_bf16_reduced_precision_matmul ........... False + disable_mamba_mem_eff_path ...................... False + disable_straggler_on_startup .................... False + dist_ckpt_format_deprecated ..................... None + dist_ckpt_strictness ............................ assume_ok_unexpected + distribute_saved_activations .................... False + distributed_backend ............................. nccl + distributed_timeout_minutes ..................... 10 + embedding_path .................................. None + empty_unused_memory_level ....................... 0 + enable_cuda_graph ............................... False + enable_ft_package ............................... False + enable_gloo_process_groups ...................... True + enable_msc ...................................... True + enable_one_logger ............................... True + encoder_num_layers .............................. 2 + encoder_pipeline_model_parallel_size ............ 0 + encoder_seq_length .............................. 131072 + encoder_tensor_model_parallel_size .............. 0 + end_weight_decay ................................ 0.1 + eod_mask_loss ................................... False + error_injection_rate ............................ 0 + error_injection_type ............................ transient_error + eval_interval ................................... 16 + eval_iters ...................................... 1 + evidence_data_path .............................. None + exit_duration_in_mins ........................... None + exit_interval ................................... None + exit_on_missing_checkpoint ...................... False + exit_signal_handler ............................. False + exp_avg_dtype ................................... torch.float32 + exp_avg_sq_dtype ................................ torch.float32 + expert_model_parallel_size ...................... 1 + expert_tensor_parallel_size ..................... 2 + external_cuda_graph ............................. False + ffn_hidden_size ................................. 16384 + finetune ........................................ False + first_last_layers_bf16 .......................... False + flash_decode .................................... False + fp16 ............................................ True + fp16_lm_cross_entropy ........................... False + fp32_residual_connection ........................ False + fp8 ............................................. None + fp8_amax_compute_algo ........................... most_recent + fp8_amax_history_len ............................ 1 + fp8_interval .................................... 1 + fp8_margin ...................................... 0 + fp8_param_gather ................................ False + fp8_recipe ...................................... delayed + fp8_wgrad ....................................... True + fsdp_double_buffer .............................. False + global_batch_size ............................... 1 + grad_reduce_in_bf16 ............................. False + gradient_accumulation_fusion .................... True + gradient_reduce_div_fusion ...................... True + group_query_attention ........................... True + head_lr_mult .................................... 1.0 + heterogeneous_layers_config_encoded_json ........ None + heterogeneous_layers_config_path ................ None + hidden_dropout .................................. 0.1 + hidden_size ..................................... 4096 + hierarchical_context_parallel_sizes ............. None + high_priority_stream_groups ..................... [] + hybrid_attention_ratio .......................... 0.0 + hybrid_mlp_ratio ................................ 0.0 + hybrid_override_pattern ......................... None + hysteresis ...................................... 2 + ict_head_size ................................... None + ict_load ........................................ None + img_h ........................................... 224 + img_w ........................................... 224 + indexer_batch_size .............................. 128 + indexer_log_interval ............................ 1000 + inference_batch_times_seqlen_threshold .......... -1 + inference_dynamic_batching ...................... False + inference_dynamic_batching_buffer_guaranteed_fraction 0.2 + inference_dynamic_batching_buffer_overflow_factor None + inference_dynamic_batching_buffer_size_gb ....... 40.0 + inference_dynamic_batching_chunk_size ........... 256 + inference_dynamic_batching_max_requests_override None + inference_dynamic_batching_max_tokens_override .. None + inference_max_batch_size ........................ 8 + inference_max_seq_length ........................ 2560 + inference_rng_tracker ........................... False + init_method_std ................................. 0.02 + init_method_xavier_uniform ...................... False + init_model_with_meta_device ..................... False + initial_loss_scale .............................. 4294967296 + inprocess_active_world_size ..................... 8 + inprocess_barrier_timeout ....................... 120 + inprocess_completion_timeout .................... 120 + inprocess_empty_cuda_cache ...................... False + inprocess_granularity ........................... node + inprocess_hard_timeout .......................... 90 + inprocess_heartbeat_interval .................... 30 + inprocess_heartbeat_timeout ..................... 60 + inprocess_last_call_wait ........................ 1 + inprocess_max_iterations ........................ None + inprocess_monitor_process_interval .............. 1.0 + inprocess_monitor_thread_interval ............... 1.0 + inprocess_progress_watchdog_interval ............ 1.0 + inprocess_restart ............................... False + inprocess_soft_timeout .......................... 60 + inprocess_termination_grace_time ................ 1 + is_hybrid_model ................................. False + iter_per_epoch .................................. 1250 + iterations_to_skip .............................. [] + keep_fp8_transpose_cache_when_using_custom_fsdp . False + kv_channels ..................................... 64 + kv_lora_rank .................................... 32 + lazy_mpu_init ................................... None + load ............................................ gpt-checkpoint + load_model_opt_format ........................... False + local_rank ...................................... 0 + log_interval .................................... 1 + log_loss_scale_to_tensorboard ................... True + log_memory_to_tensorboard ....................... False + log_num_zeros_in_grad ........................... False + log_params_norm ................................. False + log_progress .................................... False + log_straggler ................................... False + log_throughput .................................. False + log_timers_to_tensorboard ....................... False + log_validation_ppl_to_tensorboard ............... False + log_world_size_to_tensorboard ................... False + logging_level ................................... 0 + loss_scale ...................................... None + loss_scale_window ............................... 1000 + lr .............................................. 0.0005 + lr_decay_iters .................................. 150000 + lr_decay_samples ................................ None + lr_decay_style .................................. cosine + lr_warmup_fraction .............................. None + lr_warmup_init .................................. 0.0 + lr_warmup_iters ................................. 2 + lr_warmup_samples ............................... 0 + lr_wsd_decay_iters .............................. None + lr_wsd_decay_samples ............................ None + lr_wsd_decay_style .............................. exponential + main_grads_dtype ................................ torch.float32 + main_params_dtype ............................... torch.float32 + make_vocab_size_divisible_by .................... 128 + mamba_head_dim .................................. 64 + mamba_num_groups ................................ 8 + mamba_num_heads ................................. None + mamba_state_dim ................................. 128 + manual_gc ....................................... False + manual_gc_eval .................................. True + manual_gc_interval .............................. 0 + mask_factor ..................................... 1.0 + mask_prob ....................................... 0.15 + mask_type ....................................... random + masked_softmax_fusion ........................... True + max_position_embeddings ......................... 131072 + max_tokens_to_oom ............................... 12000 + memory_snapshot_path ............................ snapshot.pickle + merge_file ...................................... merges.txt + micro_batch_size ................................ 1 + microbatch_group_size_per_vp_stage .............. None + mid_level_dataset_surplus ....................... 0.005 + min_loss_scale .................................. 1.0 + min_lr .......................................... 0.0 + mlp_chunks_for_prefill .......................... 1 + mmap_bin_files .................................. True + mock_data ....................................... True + moe_apply_probs_on_input ........................ False + moe_aux_loss_coeff .............................. 0.0 + moe_enable_deepep ............................... False + moe_expert_capacity_factor ...................... None + moe_extended_tp ................................. False + moe_ffn_hidden_size ............................. None + moe_grouped_gemm ................................ False + moe_input_jitter_eps ............................ None + moe_layer_freq .................................. 1 + moe_layer_recompute ............................. False + moe_pad_expert_input_to_capacity ................ False + moe_per_layer_logging ........................... False + moe_permute_fusion .............................. False + moe_router_bias_update_rate ..................... 0.001 + moe_router_dtype ................................ None + moe_router_enable_expert_bias ................... False + moe_router_force_load_balancing ................. False + moe_router_group_topk ........................... None + moe_router_load_balancing_type .................. aux_loss + moe_router_num_groups ........................... None + moe_router_padding_for_fp8 ...................... False + moe_router_pre_softmax .......................... False + moe_router_score_function ....................... softmax + moe_router_topk ................................. 2 + moe_router_topk_scaling_factor .................. None + moe_shared_expert_intermediate_size ............. None + moe_shared_expert_overlap ....................... False + moe_token_dispatcher_type ....................... allgather + moe_token_drop_policy ........................... probs + moe_use_legacy_grouped_gemm ..................... False + moe_use_upcycling ............................... False + moe_z_loss_coeff ................................ None + mrope_section ................................... None + mscale .......................................... 1.0 + mscale_all_dim .................................. 1.0 + mtp_loss_scaling_factor ......................... 0.1 + mtp_num_layers .................................. None + multi_latent_attention .......................... False + nccl_all_reduce_for_prefill ..................... False + nccl_communicator_config_path ................... None + nccl_ub ......................................... False + no_load_optim ................................... None + no_load_rng ..................................... None + no_persist_layer_norm ........................... False + no_rope_freq .................................... None + no_save_optim ................................... None + no_save_rng ..................................... None + non_persistent_ckpt_type ........................ None + non_persistent_global_ckpt_dir .................. None + non_persistent_local_ckpt_algo .................. fully_parallel + non_persistent_local_ckpt_dir ................... None + non_persistent_save_interval .................... None + norm_epsilon .................................... 1e-05 + normalization ................................... LayerNorm + num_attention_heads ............................. 64 + num_channels .................................... 3 + num_classes ..................................... 1000 + num_dataset_builder_threads ..................... 1 + num_distributed_optimizer_instances ............. 1 + num_experts ..................................... None + num_layers ...................................... 2 + num_layers_at_end_in_bf16 ....................... 1 + num_layers_at_start_in_bf16 ..................... 1 + num_layers_per_virtual_pipeline_stage ........... None + num_query_groups ................................ 16 + num_virtual_stages_per_pipeline_rank ............ None + num_workers ..................................... 2 + object_storage_cache_path ....................... None + one_logger_async ................................ False + one_logger_project .............................. megatron-lm + one_logger_run_name ............................. None + onnx_safe ....................................... None + openai_gelu ..................................... False + optimizer ....................................... adam + optimizer_cpu_offload ........................... False + optimizer_offload_fraction ...................... 1.0 + output_bert_embeddings .......................... False + overlap_cpu_optimizer_d2h_h2d ................... False + overlap_grad_reduce ............................. False + overlap_p2p_comm ................................ False + overlap_p2p_comm_warmup_flush ................... False + overlap_param_gather ............................ False + overlap_param_gather_with_optimizer_step ........ False + override_opt_param_scheduler .................... False + params_dtype .................................... torch.float16 + patch_dim ....................................... 16 + per_split_data_args_path ........................ None + perform_initialization .......................... True + pin_cpu_grads ................................... True + pin_cpu_params .................................. True + pipeline_model_parallel_comm_backend ............ None + pipeline_model_parallel_size .................... 1 + pipeline_model_parallel_split_rank .............. None + position_embedding_type ......................... learned_absolute + pretrained_checkpoint ........................... None + profile ......................................... False + profile_ranks ................................... [0] + profile_step_end ................................ 12 + profile_step_start .............................. 10 + q_lora_rank ..................................... None + qk_head_dim ..................................... 128 + qk_l2_norm ...................................... False + qk_layernorm .................................... False + qk_pos_emb_head_dim ............................. 64 + query_in_block_prob ............................. 0.1 + rampup_batch_size ............................... None + rank ............................................ 0 + recompute_granularity ........................... None + recompute_method ................................ None + recompute_modules ............................... None + recompute_num_layers ............................ None + record_memory_history ........................... False + relative_attention_max_distance ................. 128 + relative_attention_num_buckets .................. 32 + replication ..................................... False + replication_factor .............................. 2 + replication_jump ................................ None + rerun_mode ...................................... disabled + reset_attention_mask ............................ False + reset_position_ids .............................. False + result_rejected_tracker_filename ................ None + retriever_report_topk_accuracies ................ [] + retriever_score_scaling ......................... False + retriever_seq_length ............................ 256 + retro_add_retriever ............................. False + retro_attention_gate ............................ 1 + retro_cyclic_train_iters ........................ None + retro_encoder_attention_dropout ................. 0.1 + retro_encoder_hidden_dropout .................... 0.1 + retro_encoder_layers ............................ 2 + retro_num_neighbors ............................. 2 + retro_num_retrieved_chunks ...................... 2 + retro_project_dir ............................... None + retro_verify_neighbor_count ..................... True + rope_scaling_factor ............................. 8.0 + rotary_base ..................................... 10000 + rotary_interleaved .............................. False + rotary_percent .................................. 1.0 + rotary_scaling_factor ........................... 1.0 + rotary_seq_len_interpolation_factor ............. None + run_workload_inspector_server ................... False + sample_rate ..................................... 1.0 + save ............................................ gpt-checkpoint + save_interval ................................... 16 + scatter_gather_tensors_in_pipeline .............. True + seed ............................................ 1234 + seq_length ...................................... 131072 + sequence_parallel ............................... False + sgd_momentum .................................... 0.9 + short_seq_prob .................................. 0.1 + skip_train ...................................... False + skipped_train_samples ........................... 0 + spec ............................................ None + split ........................................... None + squared_relu .................................... False + start_weight_decay .............................. 0.1 + straggler_ctrlr_port ............................ 65535 + straggler_minmax_count .......................... 1 + suggested_communication_unit_size ............... None + swiglu .......................................... False + swin_backbone_type .............................. tiny + symmetric_ar_type ............................... None + te_rng_tracker .................................. False + tensor_model_parallel_size ...................... 2 + tensorboard_dir ................................. tensorboard-logs/ + tensorboard_log_interval ........................ 1 + tensorboard_queue_size .......................... 1000 + test_data_path .................................. None + test_mode ....................................... False + tiktoken_num_special_tokens ..................... 1000 + tiktoken_pattern ................................ None + tiktoken_special_tokens ......................... None + timing_log_level ................................ 0 + timing_log_option ............................... minmax + titles_data_path ................................ None + tokenizer_model ................................. None + tokenizer_type .................................. GPT2BPETokenizer + torch_fsdp2_reshard_after_forward ............... True + tp_comm_bootstrap_backend ....................... nccl + tp_comm_bulk_dgrad .............................. True + tp_comm_bulk_wgrad .............................. True + tp_comm_overlap ................................. False + tp_comm_overlap_ag .............................. True + tp_comm_overlap_cfg ............................. None + tp_comm_overlap_rs .............................. True + tp_comm_overlap_rs_dgrad ........................ False + tp_comm_split_ag ................................ True + tp_comm_split_rs ................................ True + train_data_path ................................. None + train_iters ..................................... 10 + train_samples ................................... None + train_sync_interval ............................. None + transformer_impl ................................ transformer_engine + transformer_pipeline_model_parallel_size ........ 1 + untie_embeddings_and_output_weights ............. False + use_checkpoint_args ............................. False + use_checkpoint_opt_param_scheduler .............. False + use_cpu_initialization .......................... None + use_custom_fsdp ................................. False + use_dist_ckpt ................................... True + use_dist_ckpt_deprecated ........................ False + use_distributed_optimizer ....................... False + use_flash_attn .................................. False + use_legacy_models ............................... False + use_mp_args_from_checkpoint_args ................ False + use_one_sent_docs ............................... False + use_persistent_ckpt_worker ...................... False + use_precision_aware_optimizer ................... False + use_pytorch_profiler ............................ False + use_ring_exchange_p2p ........................... False + use_rope_scaling ................................ False + use_rotary_position_embeddings .................. False + use_sharp ....................................... False + use_tokenizer_model_from_checkpoint_args ........ True + use_torch_fsdp2 ................................. False + use_torch_optimizer_for_cpu_offload ............. False + use_tp_pp_dp_mapping ............................ False + v_head_dim ...................................... 128 + valid_data_path ................................. None + variable_seq_lengths ............................ False + virtual_pipeline_model_parallel_size ............ None + vision_backbone_type ............................ vit + vision_pretraining .............................. False + vision_pretraining_type ......................... classify + vocab_extra_ids ................................. 0 + vocab_file ...................................... vocab.json + vocab_size ...................................... None + wandb_exp_name .................................. + wandb_project ................................... + wandb_save_dir .................................. + weight_decay .................................... 0.1 + weight_decay_incr_style ......................... constant + wgrad_deferral_limit ............................ 0 + world_size ...................................... 8 + yaml_cfg ........................................ None +-------------------- end of arguments --------------------- +INFO:megatron.core.num_microbatches_calculator:setting number of microbatches to constant 1 +> building GPT2BPETokenizer tokenizer ... +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING: TensorBoard writing requested but is not available (are you using PyTorch 1.1.0 or later?), no TensorBoard logs will be written. +WARNING: one_logger package is required to enable e2e metrics tracking. please go to https://confluence.nvidia.com/display/MLWFO/Package+Repositories for details to install it +INFO:megatron.training.initialize:Setting logging level to 0 + > padded vocab (size: 50257) with 175 dummy tokens (new size: 50432) +INFO:megatron.training.initialize:Setting logging level to 0 +WARNING:megatron.core.rerun_state_machine:RerunStateMachine initialized in mode RerunMode.DISABLED +> initializing torch distributed ... +> initialized tensor model parallel with size 2 +> initialized pipeline model parallel with size 1 +> setting random seeds to 1234 ... +> compiling dataset index builder ... +make: Entering directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +make: Nothing to be done for 'default'. +make: Leaving directory '/mnt/weka/home/hao.zhang/junda/attnserver-megatron/megatron/core/datasets' +>>> done with dataset index builder. Compilation time: 0.049 seconds +WARNING: constraints for invoking optimized fused softmax kernel are not met. We default back to unfused kernel invocations. +> compiling and loading fused kernels ... +>>> done with compiling and loading fused kernels. Compilation time: 2.172 seconds +time to initialize megatron (seconds): 7.444 +[after megatron is initialized] datetime: 2025-06-21 22:07:04 +building GPT model ... +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 816396288 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 816396288 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 816396288 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 816396288 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 816396288 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 816396288 +INFO:megatron.core.distributed.distributed_data_parallel:Setting up DistributedDataParallel with config DistributedDataParallelConfig(grad_reduce_in_fp32=False, overlap_grad_reduce=False, overlap_param_gather=False, align_param_gather=False, use_distributed_optimizer=False, num_distributed_optimizer_instances=1, check_for_nan_in_grad=False, check_for_large_grads=False, bucket_size=None, pad_buckets_for_high_nccl_busbw=False, average_in_collective=False, fp8_param_gather=False, use_custom_fsdp=False, data_parallel_sharding_strategy='no_shard', gradient_reduce_div_fusion=True, suggested_communication_unit_size=None, preserve_fp32_weights=True, keep_fp8_transpose_cache_when_using_custom_fsdp=False, nccl_ub=False, fsdp_double_buffer=False) +INFO:megatron.core.distributed.param_and_grad_buffer:Number of buckets for gradient all-reduce / reduce-scatter: 1 +Params for bucket 1 (816396288 elements, 816396288 padded size): + module.decoder.layers.1.self_attention.linear_qkv.weight + module.decoder.layers.1.self_attention.linear_proj.weight + module.decoder.layers.0.mlp.linear_fc2.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.bias + module.decoder.final_layernorm.bias + module.decoder.layers.1.mlp.linear_fc2.weight + module.decoder.layers.1.self_attention.linear_proj.bias + module.decoder.layers.0.mlp.linear_fc1.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_weight + module.embedding.word_embeddings.weight + module.decoder.final_layernorm.weight + module.decoder.layers.1.mlp.linear_fc1.layer_norm_weight + module.decoder.layers.1.self_attention.linear_qkv.bias + module.decoder.layers.0.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.weight + module.decoder.layers.0.mlp.linear_fc1.bias + module.embedding.position_embeddings.weight + module.decoder.layers.1.mlp.linear_fc2.bias + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_weight + module.decoder.layers.0.self_attention.linear_qkv.weight + module.decoder.layers.0.self_attention.linear_proj.weight + module.decoder.layers.1.self_attention.linear_qkv.layer_norm_bias + module.decoder.layers.0.self_attention.linear_proj.bias + module.decoder.layers.0.mlp.linear_fc1.layer_norm_bias + module.decoder.layers.1.mlp.linear_fc1.bias + module.decoder.layers.0.mlp.linear_fc2.weight +INFO:megatron.core.optimizer:Setting up optimizer with config OptimizerConfig(optimizer='adam', lr=0.0005, min_lr=0.0, decoupled_lr=None, decoupled_min_lr=None, weight_decay=0.1, fp16=True, bf16=False, params_dtype=torch.float16, use_precision_aware_optimizer=False, store_param_remainders=True, main_grads_dtype=torch.float32, main_params_dtype=torch.float32, exp_avg_dtype=torch.float32, exp_avg_sq_dtype=torch.float32, loss_scale=None, initial_loss_scale=4294967296, min_loss_scale=1.0, loss_scale_window=1000, hysteresis=2, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-08, sgd_momentum=0.9, use_distributed_optimizer=False, overlap_param_gather_with_optimizer_step=False, optimizer_cpu_offload=False, optimizer_offload_fraction=1.0, use_torch_optimizer_for_cpu_offload=False, overlap_cpu_optimizer_d2h_h2d=False, pin_cpu_grads=True, pin_cpu_params=True, clip_grad=1.0, log_num_zeros_in_grad=False, barrier_with_L1_time=True, timers=, config_logger_dir='') +INFO:megatron.core.optimizer_param_scheduler:> learning rate decay style: cosine +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (1, 0): 816396288 +>>> embedding +>>> decoder +>>> output_layer + > number of parameters on (tensor, pipeline) model parallel rank (0, 0): 816396288 +WARNING: could not find the metadata file gpt-checkpoint/latest_checkpointed_iteration.txt + will not load any checkpoints and will start from random +(min, max) time across ranks (ms): + load-checkpoint ................................: (2.86, 3.32) +[after model, optimizer, and learning rate scheduler are built] datetime: 2025-06-21 22:07:10 +> building train, validation, and test datasets ... + > datasets target sizes (minimum size): + train: 10 + validation: 1 + test: 1 +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let mock = True, as both blend and blend_per_split are None +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split = 1,1,1, an arbitrarily even split, as mock is True +INFO:megatron.core.datasets.blended_megatron_dataset_config:Let split_matrix = [(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)] +> building train, validation, and test datasets for GPT ... +INFO:megatron.core.datasets.blended_megatron_dataset_builder:Building MockGPTDataset splits with sizes=(10, 1, 1) and config=GPTDatasetConfig(random_seed=1234, sequence_length=131072, blend=None, blend_per_split=None, split='1,1,1', split_matrix=[(0, 0.3333333333333333), (0.3333333333333333, 0.6666666666666666), (0.6666666666666666, 1.0)], num_dataset_builder_threads=1, path_to_cache=None, mmap_bin_files=True, mock=True, tokenizer=, mid_level_dataset_surplus=0.005, reset_position_ids=False, reset_attention_mask=False, eod_mask_loss=False, create_attention_mask=True, drop_last_partial_validation_sequence=True, add_extra_token_to_sequence=True, object_storage_cache_path=None) +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset train indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.005867 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 520 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset valid indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001699 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 520 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +INFO:megatron.core.datasets.gpt_dataset:Build and save the MockGPTDataset test indices +DEBUG:megatron.core.datasets.gpt_dataset:> separate_final_epoch: False +WARNING:megatron.core.datasets.gpt_dataset:Unable to save MockGPTDataset indexes because path_to_cache is None +DEBUG:megatron.core.datasets.gpt_dataset: > time elapsed: 0.001503 seconds +INFO:megatron.core.datasets.gpt_dataset:> total number of samples: 520 +INFO:megatron.core.datasets.gpt_dataset:> total number of epochs: 1 +> finished creating GPT datasets ... +[after dataloaders are built] datetime: 2025-06-21 22:07:10 +done with setup ... +(min, max) time across ranks (ms): + model-and-optimizer-setup ......................: (6695.20, 6695.51) + train/valid/test-data-iterators-setup ..........: (18.56, 139.32) +training ... +Setting rerun_state_machine.current_iteration to 0... +[before the start of training step] datetime: 2025-06-21 22:07:10 +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1024.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 131.44 GiB is free. Including non-PyTorch memory, this process has 8.36 GiB memory in use. Of the allocated memory 6.12 GiB is allocated by PyTorch, and 804.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1024.00 GiB. GPU 7 has a total capacity of 139.81 GiB of which 131.44 GiB is free. Including non-PyTorch memory, this process has 8.36 GiB memory in use. Of the allocated memory 6.12 GiB is allocated by PyTorch, and 804.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1024.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 131.44 GiB is free. Including non-PyTorch memory, this process has 8.36 GiB memory in use. Of the allocated memory 6.12 GiB is allocated by PyTorch, and 804.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1024.00 GiB. GPU 0 has a total capacity of 139.81 GiB of which 131.44 GiB is free. Including non-PyTorch memory, this process has 8.36 GiB memory in use. Of the allocated memory 6.12 GiB is allocated by PyTorch, and 804.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1024.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 131.44 GiB is free. Including non-PyTorch memory, this process has 8.36 GiB memory in use. Of the allocated memory 6.12 GiB is allocated by PyTorch, and 804.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1024.00 GiB. GPU 5 has a total capacity of 139.81 GiB of which 131.44 GiB is free. Including non-PyTorch memory, this process has 8.36 GiB memory in use. Of the allocated memory 6.12 GiB is allocated by PyTorch, and 804.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1024.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 131.44 GiB is free. Including non-PyTorch memory, this process has 8.36 GiB memory in use. Of the allocated memory 6.12 GiB is allocated by PyTorch, and 804.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1024.00 GiB. GPU 4 has a total capacity of 139.81 GiB of which 131.44 GiB is free. Including non-PyTorch memory, this process has 8.36 GiB memory in use. Of the allocated memory 6.12 GiB is allocated by PyTorch, and 804.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1024.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 131.44 GiB is free. Including non-PyTorch memory, this process has 8.36 GiB memory in use. Of the allocated memory 6.12 GiB is allocated by PyTorch, and 804.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1024.00 GiB. GPU 3 has a total capacity of 139.81 GiB of which 131.44 GiB is free. Including non-PyTorch memory, this process has 8.36 GiB memory in use. Of the allocated memory 6.12 GiB is allocated by PyTorch, and 804.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1024.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 131.44 GiB is free. Including non-PyTorch memory, this process has 8.36 GiB memory in use. Of the allocated memory 6.12 GiB is allocated by PyTorch, and 804.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1024.00 GiB. GPU 1 has a total capacity of 139.81 GiB of which 131.44 GiB is free. Including non-PyTorch memory, this process has 8.36 GiB memory in use. Of the allocated memory 6.12 GiB is allocated by PyTorch, and 804.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1024.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 131.44 GiB is free. Including non-PyTorch memory, this process has 8.36 GiB memory in use. Of the allocated memory 6.12 GiB is allocated by PyTorch, and 804.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1024.00 GiB. GPU 2 has a total capacity of 139.81 GiB of which 131.44 GiB is free. Including non-PyTorch memory, this process has 8.36 GiB memory in use. Of the allocated memory 6.12 GiB is allocated by PyTorch, and 804.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n'] +WARNING:megatron.core.utils:CUDA out of memory. Tried to allocate 1024.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 131.44 GiB is free. Including non-PyTorch memory, this process has 8.36 GiB memory in use. Of the allocated memory 6.12 GiB is allocated by PyTorch, and 804.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables) +['Traceback (most recent call last):\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 446, in forward_step\n (tokens, labels, loss_mask, attention_mask, position_ids), token_lens = get_batch(data_iterator)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 284, in get_batch\n batch = next(global_batches)\n ^^^^^^^^^^^^^^^^^^^^\n', ' File "/mnt/weka/home/hao.zhang/junda/attnserver-megatron/./pretrain_gpt_profile.py", line 226, in setup_batches\n attention_mask = torch.ones(\n ^^^^^^^^^^^\n', 'torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 1024.00 GiB. GPU 6 has a total capacity of 139.81 GiB of which 131.44 GiB is free. Including non-PyTorch memory, this process has 8.36 GiB memory in use. Of the allocated memory 6.12 GiB is allocated by PyTorch, and 804.39 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)\n']