ggml_init_cublas: found 1 CUDA devices:
Device 0: NVIDIA A10G, compute capability 8.6
llama_model_loader: loaded meta data with 16 key-value pairs and 291 tensors from llama-2-7b-chat-q5_k_m.gguf (version GGUF V2 (latest))
llama_model_loader: - tensor 0: token_embd.weight q5_K [ 4096, 32000, 1, 1 ]
llama_model_loader: - tensor 1: output_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 2: output.weight q6_K [ 4096, 32000, 1, 1 ]
llama_model_loader: - tensor 3: blk.0.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 4: blk.0.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 5: blk.0.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 6: blk.0.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 7: blk.0.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 8: blk.0.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 9: blk.0.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 10: blk.0.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 11: blk.0.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 12: blk.1.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 13: blk.1.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 14: blk.1.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 15: blk.1.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 16: blk.1.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 17: blk.1.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 18: blk.1.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 19: blk.1.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 20: blk.1.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 21: blk.2.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 22: blk.2.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 23: blk.2.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 24: blk.2.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 25: blk.2.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 26: blk.2.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 27: blk.2.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 28: blk.2.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 29: blk.2.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 30: blk.3.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 31: blk.3.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 32: blk.3.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 33: blk.3.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 34: blk.3.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 35: blk.3.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 36: blk.3.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 37: blk.3.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 38: blk.3.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 39: blk.4.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 40: blk.4.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 41: blk.4.attn_v.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 42: blk.4.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 43: blk.4.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 44: blk.4.ffn_down.weight q5_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 45: blk.4.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 46: blk.4.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 47: blk.4.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 48: blk.5.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 49: blk.5.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 50: blk.5.attn_v.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 51: blk.5.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 52: blk.5.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 53: blk.5.ffn_down.weight q5_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 54: blk.5.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 55: blk.5.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 56: blk.5.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 57: blk.6.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 58: blk.6.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 59: blk.6.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 60: blk.6.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 61: blk.6.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 62: blk.6.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 63: blk.6.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 64: blk.6.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 65: blk.6.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 66: blk.7.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 67: blk.7.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 68: blk.7.attn_v.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 69: blk.7.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 70: blk.7.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 71: blk.7.ffn_down.weight q5_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 72: blk.7.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 73: blk.7.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 74: blk.7.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 75: blk.8.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 76: blk.8.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 77: blk.8.attn_v.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 78: blk.8.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 79: blk.8.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 80: blk.8.ffn_down.weight q5_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 81: blk.8.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 82: blk.8.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 83: blk.8.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 84: blk.9.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 85: blk.9.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 86: blk.9.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 87: blk.9.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 88: blk.9.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 89: blk.9.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 90: blk.9.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 91: blk.9.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 92: blk.9.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 93: blk.10.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 94: blk.10.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 95: blk.10.attn_v.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 96: blk.10.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 97: blk.10.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 98: blk.10.ffn_down.weight q5_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 99: blk.10.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 100: blk.10.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 101: blk.10.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 102: blk.11.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 103: blk.11.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 104: blk.11.attn_v.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 105: blk.11.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 106: blk.11.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 107: blk.11.ffn_down.weight q5_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 108: blk.11.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 109: blk.11.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 110: blk.11.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 111: blk.12.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 112: blk.12.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 113: blk.12.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 114: blk.12.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 115: blk.12.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 116: blk.12.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 117: blk.12.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 118: blk.12.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 119: blk.12.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 120: blk.13.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 121: blk.13.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 122: blk.13.attn_v.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 123: blk.13.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 124: blk.13.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 125: blk.13.ffn_down.weight q5_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 126: blk.13.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 127: blk.13.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 128: blk.13.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 129: blk.14.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 130: blk.14.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 131: blk.14.attn_v.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 132: blk.14.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 133: blk.14.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 134: blk.14.ffn_down.weight q5_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 135: blk.14.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 136: blk.14.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 137: blk.14.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 138: blk.15.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 139: blk.15.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 140: blk.15.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 141: blk.15.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 142: blk.15.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 143: blk.15.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 144: blk.15.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 145: blk.15.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 146: blk.15.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 147: blk.16.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 148: blk.16.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 149: blk.16.attn_v.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 150: blk.16.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 151: blk.16.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 152: blk.16.ffn_down.weight q5_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 153: blk.16.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 154: blk.16.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 155: blk.16.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 156: blk.17.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 157: blk.17.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 158: blk.17.attn_v.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 159: blk.17.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 160: blk.17.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 161: blk.17.ffn_down.weight q5_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 162: blk.17.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 163: blk.17.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 164: blk.17.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 165: blk.18.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 166: blk.18.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 167: blk.18.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 168: blk.18.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 169: blk.18.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 170: blk.18.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 171: blk.18.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 172: blk.18.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 173: blk.18.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 174: blk.19.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 175: blk.19.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 176: blk.19.attn_v.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 177: blk.19.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 178: blk.19.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 179: blk.19.ffn_down.weight q5_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 180: blk.19.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 181: blk.19.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 182: blk.19.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 183: blk.20.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 184: blk.20.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 185: blk.20.attn_v.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 186: blk.20.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 187: blk.20.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 188: blk.20.ffn_down.weight q5_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 189: blk.20.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 190: blk.20.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 191: blk.20.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 192: blk.21.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 193: blk.21.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 194: blk.21.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 195: blk.21.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 196: blk.21.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 197: blk.21.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 198: blk.21.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 199: blk.21.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 200: blk.21.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 201: blk.22.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 202: blk.22.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 203: blk.22.attn_v.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 204: blk.22.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 205: blk.22.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 206: blk.22.ffn_down.weight q5_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 207: blk.22.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 208: blk.22.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 209: blk.22.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 210: blk.23.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 211: blk.23.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 212: blk.23.attn_v.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 213: blk.23.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 214: blk.23.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 215: blk.23.ffn_down.weight q5_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 216: blk.23.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 217: blk.23.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 218: blk.23.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 219: blk.24.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 220: blk.24.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 221: blk.24.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 222: blk.24.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 223: blk.24.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 224: blk.24.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 225: blk.24.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 226: blk.24.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 227: blk.24.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 228: blk.25.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 229: blk.25.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 230: blk.25.attn_v.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 231: blk.25.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 232: blk.25.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 233: blk.25.ffn_down.weight q5_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 234: blk.25.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 235: blk.25.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 236: blk.25.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 237: blk.26.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 238: blk.26.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 239: blk.26.attn_v.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 240: blk.26.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 241: blk.26.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 242: blk.26.ffn_down.weight q5_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 243: blk.26.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 244: blk.26.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 245: blk.26.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 246: blk.27.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 247: blk.27.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 248: blk.27.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 249: blk.27.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 250: blk.27.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 251: blk.27.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 252: blk.27.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 253: blk.27.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 254: blk.27.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 255: blk.28.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 256: blk.28.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 257: blk.28.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 258: blk.28.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 259: blk.28.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 260: blk.28.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 261: blk.28.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 262: blk.28.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 263: blk.28.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 264: blk.29.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 265: blk.29.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 266: blk.29.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 267: blk.29.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 268: blk.29.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 269: blk.29.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 270: blk.29.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 271: blk.29.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 272: blk.29.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 273: blk.30.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 274: blk.30.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 275: blk.30.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 276: blk.30.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 277: blk.30.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 278: blk.30.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 279: blk.30.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 280: blk.30.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 281: blk.30.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 282: blk.31.attn_q.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 283: blk.31.attn_k.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 284: blk.31.attn_v.weight q6_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 285: blk.31.attn_output.weight q5_K [ 4096, 4096, 1, 1 ]
llama_model_loader: - tensor 286: blk.31.ffn_gate.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 287: blk.31.ffn_down.weight q6_K [ 11008, 4096, 1, 1 ]
llama_model_loader: - tensor 288: blk.31.ffn_up.weight q5_K [ 4096, 11008, 1, 1 ]
llama_model_loader: - tensor 289: blk.31.attn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - tensor 290: blk.31.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]
llama_model_loader: - kv 0: general.architecture str
llama_model_loader: - kv 1: general.name str
llama_model_loader: - kv 2: llama.context_length u32
llama_model_loader: - kv 3: llama.embedding_length u32
llama_model_loader: - kv 4: llama.block_count u32
llama_model_loader: - kv 5: llama.feed_forward_length u32
llama_model_loader: - kv 6: llama.rope.dimension_count u32
llama_model_loader: - kv 7: llama.attention.head_count u32
llama_model_loader: - kv 8: llama.attention.head_count_kv u32
llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32
llama_model_loader: - kv 10: general.file_type u32
llama_model_loader: - kv 11: tokenizer.ggml.model str
llama_model_loader: - kv 12: tokenizer.ggml.tokens arr
llama_model_loader: - kv 13: tokenizer.ggml.scores arr
llama_model_loader: - kv 14: tokenizer.ggml.token_type arr
llama_model_loader: - kv 15: general.quantization_version u32
llama_model_loader: - type f32: 65 tensors
llama_model_loader: - type q5_K: 193 tensors
llama_model_loader: - type q6_K: 33 tensors
llm_load_print_meta: format = GGUF V2 (latest)
llm_load_print_meta: arch = llama
llm_load_print_meta: vocab type = SPM
llm_load_print_meta: n_vocab = 32000
llm_load_print_meta: n_merges = 0
llm_load_print_meta: n_ctx_train = 4096
llm_load_print_meta: n_embd = 4096
llm_load_print_meta: n_head = 32
llm_load_print_meta: n_head_kv = 32
llm_load_print_meta: n_layer = 32
llm_load_print_meta: n_rot = 128
llm_load_print_meta: n_gqa = 1
llm_load_print_meta: f_norm_eps = 0.0e+00
llm_load_print_meta: f_norm_rms_eps = 1.0e-05
llm_load_print_meta: f_clamp_kqv = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: n_ff = 11008
llm_load_print_meta: freq_base_train = 10000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: model type = 7B
llm_load_print_meta: model ftype = mostly Q5_K - Medium
llm_load_print_meta: model params = 6.74 B
llm_load_print_meta: model size = 4.45 GiB (5.68 BPW)
llm_load_print_meta: general.name = LLaMA v2
llm_load_print_meta: BOS token = 1 ''
llm_load_print_meta: EOS token = 2 ''
llm_load_print_meta: UNK token = 0 ''
llm_load_print_meta: LF token = 13 '<0x0A>'
llm_load_tensors: ggml ctx size = 0.10 MB
llm_load_tensors: using CUDA for GPU acceleration
llm_load_tensors: mem required = 4560.96 MB
llm_load_tensors: offloading 0 repeating layers to GPU
llm_load_tensors: offloaded 0/35 layers to GPU
llm_load_tensors: VRAM used: 0.00 MB
..................................................................................................
llama_new_context_with_model: n_ctx = 512
llama_new_context_with_model: freq_base = 10000.0
llama_new_context_with_model: freq_scale = 1
llama_new_context_with_model: kv self size = 256.00 MB
llama_new_context_with_model: compute buffer total size = 76.63 MB
llama_new_context_with_model: VRAM scratch buffer: 70.50 MB
llama_new_context_with_model: total VRAM used: 70.50 MB (model: 0.00 MB, context: 70.50 MB)
main: seed: 1700419101
main: model base = 'llama-2-7b-chat-q5_k_m.gguf'
main: init model
print_params: n_vocab: 32000
print_params: n_ctx: 128
print_params: n_embd: 4096
print_params: n_ff: 11008
print_params: n_head: 32
print_params: n_head_kv: 32
print_params: n_layer: 32
print_params: norm_rms_eps : 0.000010
print_params: rope_freq_base : 10000.000000
print_params: rope_freq_scale : 1.000000
print_lora_params: n_rank_attention_norm : 1
print_lora_params: n_rank_wq : 4
print_lora_params: n_rank_wk : 4
print_lora_params: n_rank_wv : 4
print_lora_params: n_rank_wo : 4
print_lora_params: n_rank_ffn_norm : 1
print_lora_params: n_rank_w1 : 4
print_lora_params: n_rank_w2 : 4
print_lora_params: n_rank_w3 : 4
print_lora_params: n_rank_tok_embeddings : 4
print_lora_params: n_rank_norm : 1
print_lora_params: n_rank_output : 4
main: total train_iterations 0
main: seen train_samples 0
main: seen train_tokens 0
main: completed train_epochs 0
main: lora_size = 84807904 bytes (80.9 MB)
main: opt_size = 126592864 bytes (120.7 MB)
main: opt iter 0
main: input_size = 131076128 bytes (125.0 MB)
main: compute_size = 14064566880 bytes (13413.0 MB)
main: evaluation order = RIGHT_TO_LEFT
main: tokenize training data
tokenize_file: warning: found 2 samples (max length 197) that exceed context length of 128. samples will be cut off.
tokenize_file: warning: found 190 samples (min length 22) that are shorter than context length of 128.
tokenize_file: total number of samples: 192
main: number of training tokens: 8099
main: number of unique tokens: 948
main: train data seems to have changed. restarting shuffled epoch.
main: begin training
main: work_size = 1024512 bytes (1.0 MB)
train_opt_callback: iter= 0 sample=1/192 sched=0.000000 loss=0.000000 |->
train_opt_callback: iter= 1 sample=9/192 sched=0.010000 loss=7.639391 dt=00:32:55 eta=5d 19:55:06 |->
train_opt_callback: iter= 2 sample=17/192 sched=0.020000 loss=8.123040 dt=00:32:58 eta=5d 19:35:29 |>
train_opt_callback: iter= 3 sample=25/192 sched=0.030000 loss=6.609226 dt=00:32:58 eta=5d 19:01:49 |----------->
train_opt_callback: iter= 4 sample=33/192 sched=0.040000 loss=7.152853 dt=00:32:56 eta=5d 18:21:48 |------>
train_opt_callback: iter= 5 sample=41/192 sched=0.050000 loss=6.951632 dt=00:32:51 eta=5d 17:29:11 |-------->
train_opt_callback: iter= 6 sample=49/192 sched=0.060000 loss=5.657753 dt=00:32:48 eta=5d 16:43:22 |--------------------->
train_opt_callback: iter= 7 sample=57/192 sched=0.070000 loss=4.757537 dt=00:32:51 eta=5d 16:23:32 |------------------------------>
train_opt_callback: iter= 8 sample=65/192 sched=0.080000 loss=4.564727 dt=00:32:43 eta=5d 15:14:19 |-------------------------------->
train_opt_callback: iter= 9 sample=73/192 sched=0.090000 loss=3.958102 dt=00:32:42 eta=5d 14:37:57 |-------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-10.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 10 sample=81/192 sched=0.100000 loss=3.537075 dt=00:32:55 eta=5d 14:57:33 |------------------------------------------>
train_opt_callback: iter= 11 sample=89/192 sched=0.110000 loss=2.946956 dt=00:33:00 eta=5d 14:45:12 |------------------------------------------------>
train_opt_callback: iter= 12 sample=97/192 sched=0.120000 loss=2.738868 dt=00:33:02 eta=5d 14:22:33 |-------------------------------------------------->
train_opt_callback: iter= 13 sample=105/192 sched=0.130000 loss=2.126371 dt=00:33:00 eta=5d 13:40:51 |-------------------------------------------------------->
train_opt_callback: iter= 14 sample=113/192 sched=0.140000 loss=1.546756 dt=00:32:58 eta=5d 13:01:56 |-------------------------------------------------------------->
train_opt_callback: iter= 15 sample=121/192 sched=0.150000 loss=1.432991 dt=00:33:00 eta=5d 12:36:59 |--------------------------------------------------------------->
train_opt_callback: iter= 16 sample=129/192 sched=0.160000 loss=1.533808 dt=00:32:54 eta=5d 11:36:32 |-------------------------------------------------------------->
train_opt_callback: iter= 17 sample=137/192 sched=0.170000 loss=1.147277 dt=00:33:00 eta=5d 11:27:14 |------------------------------------------------------------------>
train_opt_callback: iter= 18 sample=145/192 sched=0.180000 loss=1.295468 dt=00:33:12 eta=5d 11:42:18 |---------------------------------------------------------------->
train_opt_callback: iter= 19 sample=153/192 sched=0.190000 loss=1.036252 dt=00:33:05 eta=5d 10:43:54 |------------------------------------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-20.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 20 sample=161/192 sched=0.200000 loss=1.117745 dt=00:33:03 eta=5d 10:03:17 |------------------------------------------------------------------>
train_opt_callback: iter= 21 sample=169/192 sched=0.210000 loss=0.919692 dt=00:33:04 eta=5d 09:31:31 |-------------------------------------------------------------------->
train_opt_callback: iter= 22 sample=177/192 sched=0.220000 loss=0.958725 dt=00:32:52 eta=5d 08:11:11 |-------------------------------------------------------------------->
train_opt_callback: iter= 23 sample=185/192 sched=0.230000 loss=0.677074 dt=00:33:00 eta=5d 08:12:24 |----------------------------------------------------------------------->
train_opt_callback: reshuffle samples. completed epochs: 1
train_opt_callback: iter= 24 sample=1/192 sched=0.240000 loss=0.648157 dt=00:33:16 eta=5d 08:39:19 |----------------------------------------------------------------------->
train_opt_callback: iter= 25 sample=9/192 sched=0.250000 loss=0.820849 dt=00:33:14 eta=5d 07:57:04 |--------------------------------------------------------------------->
train_opt_callback: iter= 26 sample=17/192 sched=0.260000 loss=0.679941 dt=00:33:12 eta=5d 07:17:18 |----------------------------------------------------------------------->
train_opt_callback: iter= 27 sample=25/192 sched=0.270000 loss=0.553361 dt=00:33:02 eta=5d 06:07:10 |------------------------------------------------------------------------>
train_opt_callback: iter= 28 sample=33/192 sched=0.280000 loss=0.656925 dt=00:32:59 eta=5d 05:22:14 |----------------------------------------------------------------------->
train_opt_callback: iter= 29 sample=41/192 sched=0.290000 loss=0.681380 dt=00:33:11 eta=5d 05:34:39 |----------------------------------------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-30.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 30 sample=49/192 sched=0.300000 loss=0.628787 dt=00:33:11 eta=5d 05:02:11 |----------------------------------------------------------------------->
train_opt_callback: iter= 31 sample=57/192 sched=0.310000 loss=0.453469 dt=00:33:25 eta=5d 05:20:01 |------------------------------------------------------------------------->
train_opt_callback: iter= 32 sample=65/192 sched=0.320000 loss=0.359532 dt=00:33:01 eta=5d 03:18:35 |-------------------------------------------------------------------------->
train_opt_callback: iter= 33 sample=73/192 sched=0.330000 loss=0.424839 dt=00:32:54 eta=5d 02:17:13 |------------------------------------------------------------------------->
train_opt_callback: iter= 34 sample=81/192 sched=0.340000 loss=0.433118 dt=00:33:01 eta=5d 02:10:01 |------------------------------------------------------------------------->
train_opt_callback: iter= 35 sample=89/192 sched=0.350000 loss=0.473854 dt=00:32:59 eta=5d 01:31:52 |------------------------------------------------------------------------->
train_opt_callback: iter= 36 sample=97/192 sched=0.360000 loss=0.399268 dt=00:33:03 eta=5d 01:12:33 |------------------------------------------------------------------------->
train_opt_callback: iter= 37 sample=105/192 sched=0.370000 loss=0.560971 dt=00:31:58 eta=4d 20:41:25 |------------------------------------------------------------------------>
train_opt_callback: iter= 38 sample=113/192 sched=0.380000 loss=0.401658 dt=00:20:32 eta=3d 02:37:22 |------------------------------------------------------------------------->
train_opt_callback: iter= 39 sample=121/192 sched=0.390000 loss=0.358174 dt=00:17:02 eta=2d 13:38:29 |-------------------------------------------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-40.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 40 sample=129/192 sched=0.400000 loss=0.378025 dt=00:15:54 eta=2d 09:17:30 |-------------------------------------------------------------------------->
train_opt_callback: iter= 41 sample=137/192 sched=0.410000 loss=0.428651 dt=00:15:37 eta=2d 07:59:48 |------------------------------------------------------------------------->
train_opt_callback: iter= 42 sample=145/192 sched=0.420000 loss=0.323726 dt=00:15:40 eta=2d 07:55:10 |-------------------------------------------------------------------------->
train_opt_callback: iter= 43 sample=153/192 sched=0.430000 loss=0.296268 dt=00:15:40 eta=2d 07:37:30 |-------------------------------------------------------------------------->
train_opt_callback: iter= 44 sample=161/192 sched=0.440000 loss=0.285518 dt=00:15:32 eta=2d 06:55:17 |--------------------------------------------------------------------------->
train_opt_callback: iter= 45 sample=169/192 sched=0.450000 loss=0.342957 dt=00:15:33 eta=2d 06:43:16 |-------------------------------------------------------------------------->
train_opt_callback: iter= 46 sample=177/192 sched=0.460000 loss=0.194749 dt=00:15:46 eta=2d 07:11:14 |--------------------------------------------------------------------------->
train_opt_callback: iter= 47 sample=185/192 sched=0.470000 loss=0.244491 dt=00:15:39 eta=2d 06:34:14 |--------------------------------------------------------------------------->
train_opt_callback: reshuffle samples. completed epochs: 2
train_opt_callback: iter= 48 sample=1/192 sched=0.480000 loss=0.222005 dt=00:15:37 eta=2d 06:10:32 |--------------------------------------------------------------------------->
train_opt_callback: iter= 49 sample=9/192 sched=0.490000 loss=0.342179 dt=00:15:37 eta=2d 05:54:36 |-------------------------------------------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-50.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 50 sample=17/192 sched=0.500000 loss=0.334579 dt=00:15:37 eta=2d 05:38:32 |-------------------------------------------------------------------------->
train_opt_callback: iter= 51 sample=25/192 sched=0.510000 loss=0.361630 dt=00:15:33 eta=2d 05:09:21 |-------------------------------------------------------------------------->
train_opt_callback: iter= 52 sample=33/192 sched=0.520000 loss=0.188962 dt=00:15:34 eta=2d 04:56:45 |---------------------------------------------------------------------------->
train_opt_callback: iter= 53 sample=41/192 sched=0.530000 loss=0.238500 dt=00:15:38 eta=2d 04:54:09 |--------------------------------------------------------------------------->
train_opt_callback: iter= 54 sample=49/192 sched=0.540000 loss=0.263577 dt=00:15:34 eta=2d 04:26:19 |--------------------------------------------------------------------------->
train_opt_callback: iter= 55 sample=57/192 sched=0.550000 loss=0.194148 dt=00:15:36 eta=2d 04:16:57 |--------------------------------------------------------------------------->
train_opt_callback: iter= 56 sample=65/192 sched=0.560000 loss=0.216232 dt=00:15:29 eta=2d 03:38:30 |--------------------------------------------------------------------------->
train_opt_callback: iter= 57 sample=73/192 sched=0.570000 loss=0.113424 dt=00:15:31 eta=2d 03:30:22 |---------------------------------------------------------------------------->
train_opt_callback: iter= 58 sample=81/192 sched=0.580000 loss=0.191995 dt=00:15:37 eta=2d 03:32:15 |--------------------------------------------------------------------------->
train_opt_callback: iter= 59 sample=89/192 sched=0.590000 loss=0.183454 dt=00:15:31 eta=2d 02:59:52 |---------------------------------------------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-60.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 60 sample=97/192 sched=0.600000 loss=0.155663 dt=00:15:34 eta=2d 02:53:06 |---------------------------------------------------------------------------->
train_opt_callback: iter= 61 sample=105/192 sched=0.610000 loss=0.198579 dt=00:15:32 eta=2d 02:29:39 |--------------------------------------------------------------------------->
train_opt_callback: iter= 62 sample=113/192 sched=0.620000 loss=0.209679 dt=00:15:27 eta=2d 02:00:01 |--------------------------------------------------------------------------->
train_opt_callback: iter= 63 sample=121/192 sched=0.630000 loss=0.134847 dt=00:15:33 eta=2d 02:02:22 |---------------------------------------------------------------------------->
train_opt_callback: iter= 64 sample=129/192 sched=0.640000 loss=0.284779 dt=00:15:36 eta=2d 01:57:23 |--------------------------------------------------------------------------->
train_opt_callback: iter= 65 sample=137/192 sched=0.650000 loss=0.155553 dt=00:15:35 eta=2d 01:38:03 |---------------------------------------------------------------------------->
train_opt_callback: iter= 66 sample=145/192 sched=0.660000 loss=0.231996 dt=00:15:34 eta=2d 01:17:50 |--------------------------------------------------------------------------->
train_opt_callback: iter= 67 sample=153/192 sched=0.670000 loss=0.237264 dt=00:15:29 eta=2d 00:47:50 |--------------------------------------------------------------------------->
train_opt_callback: iter= 68 sample=161/192 sched=0.680000 loss=0.176222 dt=00:15:34 eta=2d 00:49:20 |---------------------------------------------------------------------------->
train_opt_callback: iter= 69 sample=169/192 sched=0.690000 loss=0.126854 dt=00:15:32 eta=2d 00:25:54 |---------------------------------------------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-70.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 70 sample=177/192 sched=0.700000 loss=0.163200 dt=00:15:30 eta=2d 00:05:29 |---------------------------------------------------------------------------->
train_opt_callback: iter= 71 sample=185/192 sched=0.710000 loss=0.382751 dt=00:15:29 eta=1d 23:46:04 |-------------------------------------------------------------------------->
train_opt_callback: reshuffle samples. completed epochs: 3
train_opt_callback: iter= 72 sample=1/192 sched=0.720000 loss=0.249731 dt=00:15:30 eta=1d 23:35:02 |--------------------------------------------------------------------------->
train_opt_callback: iter= 73 sample=9/192 sched=0.730000 loss=0.240528 dt=00:15:35 eta=1d 23:34:26 |--------------------------------------------------------------------------->
train_opt_callback: iter= 74 sample=17/192 sched=0.740000 loss=0.125279 dt=00:15:29 eta=1d 23:00:17 |---------------------------------------------------------------------------->
train_opt_callback: iter= 75 sample=25/192 sched=0.750000 loss=0.230016 dt=00:15:34 eta=1d 22:57:46 |--------------------------------------------------------------------------->
train_opt_callback: iter= 76 sample=33/192 sched=0.760000 loss=0.187424 dt=00:15:32 eta=1d 22:36:58 |---------------------------------------------------------------------------->
train_opt_callback: iter= 77 sample=41/192 sched=0.770000 loss=0.198255 dt=00:15:37 eta=1d 22:37:52 |--------------------------------------------------------------------------->
train_opt_callback: iter= 78 sample=49/192 sched=0.780000 loss=0.058203 dt=00:15:31 eta=1d 22:03:32 |----------------------------------------------------------------------------->
train_opt_callback: iter= 79 sample=57/192 sched=0.790000 loss=0.110901 dt=00:15:27 eta=1d 21:36:39 |---------------------------------------------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-80.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 80 sample=65/192 sched=0.800000 loss=0.198940 dt=00:15:41 eta=1d 22:00:42 |--------------------------------------------------------------------------->
train_opt_callback: iter= 81 sample=73/192 sched=0.810000 loss=0.101694 dt=00:15:39 eta=1d 21:38:50 |---------------------------------------------------------------------------->
train_opt_callback: iter= 82 sample=81/192 sched=0.820000 loss=0.140884 dt=00:15:35 eta=1d 21:14:18 |---------------------------------------------------------------------------->
train_opt_callback: iter= 83 sample=89/192 sched=0.830000 loss=0.160913 dt=00:15:30 eta=1d 20:42:40 |---------------------------------------------------------------------------->
train_opt_callback: iter= 84 sample=97/192 sched=0.840000 loss=0.147126 dt=00:15:34 eta=1d 20:39:51 |---------------------------------------------------------------------------->
train_opt_callback: iter= 85 sample=105/192 sched=0.850000 loss=0.104718 dt=00:15:35 eta=1d 20:24:55 |---------------------------------------------------------------------------->
train_opt_callback: iter= 86 sample=113/192 sched=0.860000 loss=0.143592 dt=00:15:31 eta=1d 19:58:35 |---------------------------------------------------------------------------->
train_opt_callback: iter= 87 sample=121/192 sched=0.870000 loss=0.101460 dt=00:15:29 eta=1d 19:39:18 |---------------------------------------------------------------------------->
train_opt_callback: iter= 88 sample=129/192 sched=0.880000 loss=0.172694 dt=00:15:31 eta=1d 19:26:54 |---------------------------------------------------------------------------->
train_opt_callback: iter= 89 sample=137/192 sched=0.890000 loss=0.097297 dt=00:15:29 eta=1d 19:07:34 |---------------------------------------------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-90.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 90 sample=145/192 sched=0.900000 loss=0.203012 dt=00:15:27 eta=1d 18:47:01 |--------------------------------------------------------------------------->
train_opt_callback: iter= 91 sample=153/192 sched=0.910000 loss=0.061470 dt=00:15:31 eta=1d 18:41:13 |----------------------------------------------------------------------------->
train_opt_callback: iter= 92 sample=161/192 sched=0.920000 loss=0.186582 dt=00:15:32 eta=1d 18:29:30 |---------------------------------------------------------------------------->
train_opt_callback: iter= 93 sample=169/192 sched=0.930000 loss=0.136616 dt=00:15:30 eta=1d 18:08:52 |---------------------------------------------------------------------------->
train_opt_callback: iter= 94 sample=177/192 sched=0.940000 loss=0.107690 dt=00:15:26 eta=1d 17:41:06 |---------------------------------------------------------------------------->
train_opt_callback: iter= 95 sample=185/192 sched=0.950000 loss=0.125770 dt=00:15:33 eta=1d 17:45:06 |---------------------------------------------------------------------------->
train_opt_callback: reshuffle samples. completed epochs: 4
train_opt_callback: iter= 96 sample=1/192 sched=0.960000 loss=0.147633 dt=00:15:31 eta=1d 17:23:56 |---------------------------------------------------------------------------->
train_opt_callback: iter= 97 sample=9/192 sched=0.970000 loss=0.112660 dt=00:15:27 eta=1d 16:59:02 |---------------------------------------------------------------------------->
train_opt_callback: iter= 98 sample=17/192 sched=0.980000 loss=0.071028 dt=00:15:34 eta=1d 17:00:34 |----------------------------------------------------------------------------->
train_opt_callback: iter= 99 sample=25/192 sched=0.990000 loss=0.148655 dt=00:15:27 eta=1d 16:25:43 |---------------------------------------------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-100.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 100 sample=33/192 sched=0.977975 loss=0.122147 dt=00:16:26 eta=1d 18:46:11 |---------------------------------------------------------------------------->
train_opt_callback: iter= 101 sample=41/192 sched=0.977536 loss=0.075263 dt=00:15:59 eta=1d 17:17:59 |----------------------------------------------------------------------------->
train_opt_callback: iter= 102 sample=49/192 sched=0.977093 loss=0.098616 dt=00:15:43 eta=1d 16:21:33 |---------------------------------------------------------------------------->
train_opt_callback: iter= 103 sample=57/192 sched=0.976646 loss=0.071327 dt=00:15:40 eta=1d 15:59:30 |----------------------------------------------------------------------------->
train_opt_callback: iter= 104 sample=65/192 sched=0.976194 loss=0.079295 dt=00:15:36 eta=1d 15:33:37 |----------------------------------------------------------------------------->
train_opt_callback: iter= 105 sample=73/192 sched=0.975738 loss=0.073060 dt=00:15:35 eta=1d 15:13:33 |----------------------------------------------------------------------------->
train_opt_callback: iter= 106 sample=81/192 sched=0.975278 loss=0.126725 dt=00:15:42 eta=1d 15:15:25 |---------------------------------------------------------------------------->
train_opt_callback: iter= 107 sample=89/192 sched=0.974814 loss=0.099712 dt=00:15:35 eta=1d 14:42:32 |---------------------------------------------------------------------------->
train_opt_callback: iter= 108 sample=97/192 sched=0.974346 loss=0.069941 dt=00:15:30 eta=1d 14:14:28 |----------------------------------------------------------------------------->
train_opt_callback: iter= 109 sample=105/192 sched=0.973873 loss=0.098124 dt=00:15:34 eta=1d 14:09:41 |---------------------------------------------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-110.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 110 sample=113/192 sched=0.973396 loss=0.098537 dt=00:15:34 eta=1d 13:54:34 |---------------------------------------------------------------------------->
train_opt_callback: iter= 111 sample=121/192 sched=0.972915 loss=0.126159 dt=00:15:31 eta=1d 13:31:34 |---------------------------------------------------------------------------->
train_opt_callback: iter= 112 sample=129/192 sched=0.972430 loss=0.077748 dt=00:15:29 eta=1d 13:10:55 |----------------------------------------------------------------------------->
train_opt_callback: iter= 113 sample=137/192 sched=0.971941 loss=0.102149 dt=00:15:31 eta=1d 13:00:02 |---------------------------------------------------------------------------->
train_opt_callback: iter= 114 sample=145/192 sched=0.971447 loss=0.071937 dt=00:15:38 eta=1d 13:01:24 |----------------------------------------------------------------------------->
train_opt_callback: iter= 115 sample=153/192 sched=0.970950 loss=0.077037 dt=00:15:28 eta=1d 12:21:31 |----------------------------------------------------------------------------->
train_opt_callback: iter= 116 sample=161/192 sched=0.970448 loss=0.082243 dt=00:15:39 eta=1d 12:32:09 |----------------------------------------------------------------------------->
train_opt_callback: iter= 117 sample=169/192 sched=0.969942 loss=0.084385 dt=00:15:46 eta=1d 12:31:43 |----------------------------------------------------------------------------->
train_opt_callback: iter= 118 sample=177/192 sched=0.969432 loss=0.133562 dt=00:15:46 eta=1d 12:16:08 |---------------------------------------------------------------------------->
train_opt_callback: iter= 119 sample=185/192 sched=0.968918 loss=0.109072 dt=00:15:35 eta=1d 11:35:59 |---------------------------------------------------------------------------->
train_opt_callback: reshuffle samples. completed epochs: 5
save_checkpoint_lora_file: saving to checkpoint-120.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 120 sample=1/192 sched=0.968399 loss=0.113454 dt=00:15:34 eta=1d 11:17:49 |---------------------------------------------------------------------------->
train_opt_callback: iter= 121 sample=9/192 sched=0.967877 loss=0.077776 dt=00:15:33 eta=1d 11:00:28 |----------------------------------------------------------------------------->
train_opt_callback: iter= 122 sample=17/192 sched=0.967350 loss=0.061315 dt=00:15:30 eta=1d 10:39:02 |----------------------------------------------------------------------------->
train_opt_callback: iter= 123 sample=25/192 sched=0.966820 loss=0.072449 dt=00:15:36 eta=1d 10:35:18 |----------------------------------------------------------------------------->
train_opt_callback: iter= 124 sample=33/192 sched=0.966285 loss=0.060652 dt=00:15:34 eta=1d 10:14:59 |----------------------------------------------------------------------------->
train_opt_callback: iter= 125 sample=41/192 sched=0.965746 loss=0.072381 dt=00:15:32 eta=1d 09:54:52 |----------------------------------------------------------------------------->
train_opt_callback: iter= 126 sample=49/192 sched=0.965203 loss=0.058497 dt=00:15:35 eta=1d 09:45:58 |----------------------------------------------------------------------------->
train_opt_callback: iter= 127 sample=57/192 sched=0.964656 loss=0.066129 dt=00:15:40 eta=1d 09:42:49 |----------------------------------------------------------------------------->
train_opt_callback: iter= 128 sample=65/192 sched=0.964104 loss=0.048283 dt=00:15:45 eta=1d 09:36:16 |----------------------------------------------------------------------------->
train_opt_callback: iter= 129 sample=73/192 sched=0.963549 loss=0.070143 dt=00:15:37 eta=1d 09:04:45 |----------------------------------------------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-130.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 130 sample=81/192 sched=0.962990 loss=0.071810 dt=00:15:37 eta=1d 08:49:34 |----------------------------------------------------------------------------->
train_opt_callback: iter= 131 sample=89/192 sched=0.962426 loss=0.073223 dt=00:15:33 eta=1d 08:24:44 |----------------------------------------------------------------------------->
train_opt_callback: iter= 132 sample=97/192 sched=0.961859 loss=0.073529 dt=00:15:35 eta=1d 08:12:35 |----------------------------------------------------------------------------->
train_opt_callback: iter= 133 sample=105/192 sched=0.961287 loss=0.075146 dt=00:15:38 eta=1d 08:04:55 |----------------------------------------------------------------------------->
train_opt_callback: iter= 134 sample=113/192 sched=0.960711 loss=0.068796 dt=00:15:34 eta=1d 07:39:44 |----------------------------------------------------------------------------->
train_opt_callback: iter= 135 sample=121/192 sched=0.960131 loss=0.076424 dt=00:15:30 eta=1d 07:15:46 |----------------------------------------------------------------------------->
train_opt_callback: iter= 136 sample=129/192 sched=0.959548 loss=0.081089 dt=00:15:36 eta=1d 07:12:43 |----------------------------------------------------------------------------->
train_opt_callback: iter= 137 sample=137/192 sched=0.958960 loss=0.059550 dt=00:15:37 eta=1d 06:58:43 |----------------------------------------------------------------------------->
train_opt_callback: iter= 138 sample=145/192 sched=0.958368 loss=0.066975 dt=00:15:34 eta=1d 06:38:48 |----------------------------------------------------------------------------->
train_opt_callback: iter= 139 sample=153/192 sched=0.957772 loss=0.048607 dt=00:15:39 eta=1d 06:31:42 |----------------------------------------------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-140.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 140 sample=161/192 sched=0.957172 loss=0.068270 dt=00:15:34 eta=1d 06:05:57 |----------------------------------------------------------------------------->
train_opt_callback: iter= 141 sample=169/192 sched=0.956568 loss=0.066943 dt=00:15:34 eta=1d 05:50:37 |----------------------------------------------------------------------------->
train_opt_callback: iter= 142 sample=177/192 sched=0.955960 loss=0.083575 dt=00:15:40 eta=1d 05:46:20 |----------------------------------------------------------------------------->
train_opt_callback: iter= 143 sample=185/192 sched=0.955348 loss=0.090246 dt=00:15:37 eta=1d 05:25:46 |---------------------------------------------------------------------------->
train_opt_callback: reshuffle samples. completed epochs: 6
train_opt_callback: iter= 144 sample=1/192 sched=0.954732 loss=0.085501 dt=00:15:31 eta=1d 04:59:31 |----------------------------------------------------------------------------->
train_opt_callback: iter= 145 sample=9/192 sched=0.954112 loss=0.061576 dt=00:15:32 eta=1d 04:45:20 |----------------------------------------------------------------------------->
train_opt_callback: iter= 146 sample=17/192 sched=0.953488 loss=0.056798 dt=00:15:38 eta=1d 04:40:07 |----------------------------------------------------------------------------->
train_opt_callback: iter= 147 sample=25/192 sched=0.952861 loss=0.049612 dt=00:15:51 eta=1d 04:49:02 |----------------------------------------------------------------------------->
train_opt_callback: iter= 148 sample=33/192 sched=0.952229 loss=0.052921 dt=00:15:52 eta=1d 04:33:53 |----------------------------------------------------------------------------->
train_opt_callback: iter= 149 sample=41/192 sched=0.951593 loss=0.060486 dt=00:15:43 eta=1d 04:01:47 |----------------------------------------------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-150.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 150 sample=49/192 sched=0.950953 loss=0.048820 dt=00:15:39 eta=1d 03:39:18 |----------------------------------------------------------------------------->
train_opt_callback: iter= 151 sample=57/192 sched=0.950309 loss=0.049720 dt=00:15:39 eta=1d 03:23:54 |----------------------------------------------------------------------------->
train_opt_callback: iter= 152 sample=65/192 sched=0.949661 loss=0.067127 dt=00:15:36 eta=1d 03:02:40 |----------------------------------------------------------------------------->
train_opt_callback: iter= 153 sample=73/192 sched=0.949010 loss=0.060491 dt=00:15:38 eta=1d 02:50:57 |----------------------------------------------------------------------------->
train_opt_callback: iter= 154 sample=81/192 sched=0.948354 loss=0.069643 dt=00:15:39 eta=1d 02:36:36 |----------------------------------------------------------------------------->
train_opt_callback: iter= 155 sample=89/192 sched=0.947695 loss=0.063830 dt=00:15:37 eta=1d 02:17:37 |----------------------------------------------------------------------------->
train_opt_callback: iter= 156 sample=97/192 sched=0.947031 loss=0.065827 dt=00:15:41 eta=1d 02:08:59 |----------------------------------------------------------------------------->
train_opt_callback: iter= 157 sample=105/192 sched=0.946364 loss=0.069183 dt=00:15:44 eta=1d 01:57:36 |----------------------------------------------------------------------------->
train_opt_callback: iter= 158 sample=113/192 sched=0.945692 loss=0.102524 dt=00:15:50 eta=1d 01:51:46 |---------------------------------------------------------------------------->
train_opt_callback: iter= 159 sample=121/192 sched=0.945017 loss=0.057947 dt=00:15:47 eta=1d 01:32:19 |----------------------------------------------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-160.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 160 sample=129/192 sched=0.944338 loss=0.050483 dt=00:15:44 eta=1d 01:11:31 |----------------------------------------------------------------------------->
train_opt_callback: iter= 161 sample=137/192 sched=0.943655 loss=0.065197 dt=00:15:40 eta=1d 00:48:28 |----------------------------------------------------------------------------->
train_opt_callback: iter= 162 sample=145/192 sched=0.942968 loss=0.060740 dt=00:15:37 eta=1d 00:28:16 |----------------------------------------------------------------------------->
train_opt_callback: iter= 163 sample=153/192 sched=0.942277 loss=0.052192 dt=00:15:34 eta=1d 00:09:01 |----------------------------------------------------------------------------->
train_opt_callback: iter= 164 sample=161/192 sched=0.941583 loss=0.053813 dt=00:15:38 eta=23:59:39 |----------------------------------------------------------------------------->
train_opt_callback: iter= 165 sample=169/192 sched=0.940884 loss=0.059665 dt=00:15:39 eta=23:45:11 |----------------------------------------------------------------------------->
train_opt_callback: iter= 166 sample=177/192 sched=0.940182 loss=0.058576 dt=00:15:32 eta=23:18:44 |----------------------------------------------------------------------------->
train_opt_callback: iter= 167 sample=185/192 sched=0.939476 loss=0.053657 dt=00:15:29 eta=22:59:26 |----------------------------------------------------------------------------->
train_opt_callback: reshuffle samples. completed epochs: 7
train_opt_callback: iter= 168 sample=1/192 sched=0.938765 loss=0.050923 dt=00:15:33 eta=22:48:52 |----------------------------------------------------------------------------->
train_opt_callback: iter= 169 sample=9/192 sched=0.938052 loss=0.044524 dt=00:15:37 eta=22:39:28 |----------------------------------------------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-170.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 170 sample=17/192 sched=0.937334 loss=0.046028 dt=00:15:24 eta=22:05:24 |----------------------------------------------------------------------------->
train_opt_callback: iter= 171 sample=25/192 sched=0.936612 loss=0.049158 dt=00:15:23 eta=21:48:21 |----------------------------------------------------------------------------->
train_opt_callback: iter= 172 sample=33/192 sched=0.935887 loss=0.053132 dt=00:15:26 eta=21:36:52 |----------------------------------------------------------------------------->
train_opt_callback: iter= 173 sample=41/192 sched=0.935158 loss=0.046411 dt=00:15:28 eta=21:24:48 |----------------------------------------------------------------------------->
train_opt_callback: iter= 174 sample=49/192 sched=0.934425 loss=0.048739 dt=00:15:25 eta=21:04:53 |----------------------------------------------------------------------------->
train_opt_callback: iter= 175 sample=57/192 sched=0.933688 loss=0.056599 dt=00:15:24 eta=20:48:33 |----------------------------------------------------------------------------->
train_opt_callback: iter= 176 sample=65/192 sched=0.932948 loss=0.056154 dt=00:15:30 eta=20:41:11 |----------------------------------------------------------------------------->
train_opt_callback: iter= 177 sample=73/192 sched=0.932203 loss=0.053577 dt=00:15:29 eta=20:23:34 |----------------------------------------------------------------------------->
train_opt_callback: iter= 178 sample=81/192 sched=0.931455 loss=0.056811 dt=00:15:22 eta=19:58:57 |----------------------------------------------------------------------------->
train_opt_callback: iter= 179 sample=89/192 sched=0.930703 loss=0.047785 dt=00:15:28 eta=19:51:41 |----------------------------------------------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-180.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 180 sample=97/192 sched=0.929948 loss=0.065201 dt=00:15:31 eta=19:40:29 |----------------------------------------------------------------------------->
train_opt_callback: iter= 181 sample=105/192 sched=0.929188 loss=0.064345 dt=00:15:29 eta=19:22:17 |----------------------------------------------------------------------------->
train_opt_callback: iter= 182 sample=113/192 sched=0.928425 loss=0.047728 dt=00:15:28 eta=19:05:23 |----------------------------------------------------------------------------->
train_opt_callback: iter= 183 sample=121/192 sched=0.927658 loss=0.058445 dt=00:15:25 eta=18:45:29 |----------------------------------------------------------------------------->
train_opt_callback: iter= 184 sample=129/192 sched=0.926888 loss=0.055892 dt=00:15:26 eta=18:31:55 |----------------------------------------------------------------------------->
train_opt_callback: iter= 185 sample=137/192 sched=0.926113 loss=0.053026 dt=00:15:27 eta=18:17:25 |----------------------------------------------------------------------------->
train_opt_callback: iter= 186 sample=145/192 sched=0.925335 loss=0.046620 dt=00:15:29 eta=18:04:11 |----------------------------------------------------------------------------->
train_opt_callback: iter= 187 sample=153/192 sched=0.924554 loss=0.061322 dt=00:15:30 eta=17:50:35 |----------------------------------------------------------------------------->
train_opt_callback: iter= 188 sample=161/192 sched=0.923768 loss=0.047851 dt=00:15:37 eta=17:42:26 |----------------------------------------------------------------------------->
train_opt_callback: iter= 189 sample=169/192 sched=0.922979 loss=0.048963 dt=00:15:35 eta=17:25:03 |----------------------------------------------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-190.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 190 sample=177/192 sched=0.922186 loss=0.047204 dt=00:15:36 eta=17:09:36 |----------------------------------------------------------------------------->
train_opt_callback: iter= 191 sample=185/192 sched=0.921390 loss=0.055146 dt=00:15:33 eta=16:50:51 |----------------------------------------------------------------------------->
train_opt_callback: reshuffle samples. completed epochs: 8
train_opt_callback: iter= 192 sample=1/192 sched=0.920590 loss=0.065108 dt=00:15:31 eta=16:33:42 |----------------------------------------------------------------------------->
train_opt_callback: iter= 193 sample=9/192 sched=0.919786 loss=0.045205 dt=00:15:29 eta=16:16:11 |----------------------------------------------------------------------------->
train_opt_callback: iter= 194 sample=17/192 sched=0.918978 loss=0.049374 dt=00:15:33 eta=16:05:03 |----------------------------------------------------------------------------->
train_opt_callback: iter= 195 sample=25/192 sched=0.918167 loss=0.047677 dt=00:15:41 eta=15:56:52 |----------------------------------------------------------------------------->
train_opt_callback: iter= 196 sample=33/192 sched=0.917353 loss=0.044926 dt=00:15:31 eta=15:31:04 |----------------------------------------------------------------------------->
train_opt_callback: iter= 197 sample=41/192 sched=0.916534 loss=0.046525 dt=00:15:10 eta=14:55:20 |----------------------------------------------------------------------------->
train_opt_callback: iter= 198 sample=49/192 sched=0.915712 loss=0.045030 dt=00:15:05 eta=14:35:23 |----------------------------------------------------------------------------->
train_opt_callback: iter= 199 sample=57/192 sched=0.914887 loss=0.045530 dt=00:15:00 eta=14:15:47 |----------------------------------------------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-200.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 200 sample=65/192 sched=0.914058 loss=0.046049 dt=00:15:00 eta=14:00:14 |----------------------------------------------------------------------------->
train_opt_callback: iter= 201 sample=73/192 sched=0.913225 loss=0.053930 dt=00:14:58 eta=13:43:57 |----------------------------------------------------------------------------->
train_opt_callback: iter= 202 sample=81/192 sched=0.912389 loss=0.051443 dt=00:14:58 eta=13:28:28 |----------------------------------------------------------------------------->
train_opt_callback: iter= 203 sample=89/192 sched=0.911549 loss=0.046394 dt=00:14:58 eta=13:13:19 |----------------------------------------------------------------------------->
train_opt_callback: iter= 204 sample=97/192 sched=0.910705 loss=0.046575 dt=00:15:00 eta=13:00:36 |----------------------------------------------------------------------------->
train_opt_callback: iter= 205 sample=105/192 sched=0.909858 loss=0.049402 dt=00:15:03 eta=12:48:18 |----------------------------------------------------------------------------->
train_opt_callback: iter= 206 sample=113/192 sched=0.909007 loss=0.043352 dt=00:15:01 eta=12:31:19 |----------------------------------------------------------------------------->
train_opt_callback: iter= 207 sample=121/192 sched=0.908153 loss=0.053521 dt=00:14:58 eta=12:14:09 |----------------------------------------------------------------------------->
train_opt_callback: iter= 208 sample=129/192 sched=0.907296 loss=0.049153 dt=00:15:01 eta=12:00:50 |----------------------------------------------------------------------------->
train_opt_callback: iter= 209 sample=137/192 sched=0.906434 loss=0.050224 dt=00:15:01 eta=11:46:09 |----------------------------------------------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-210.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 210 sample=145/192 sched=0.905570 loss=0.051670 dt=00:15:05 eta=11:33:58 |----------------------------------------------------------------------------->
train_opt_callback: iter= 211 sample=153/192 sched=0.904702 loss=0.051875 dt=00:15:06 eta=11:19:33 |----------------------------------------------------------------------------->
train_opt_callback: iter= 212 sample=161/192 sched=0.903830 loss=0.057988 dt=00:15:01 eta=11:00:52 |----------------------------------------------------------------------------->
train_opt_callback: iter= 213 sample=169/192 sched=0.902955 loss=0.048556 dt=00:15:04 eta=10:48:32 |----------------------------------------------------------------------------->
train_opt_callback: iter= 214 sample=177/192 sched=0.902076 loss=0.052551 dt=00:14:59 eta=10:29:59 |----------------------------------------------------------------------------->
train_opt_callback: iter= 215 sample=185/192 sched=0.901194 loss=0.057498 dt=00:15:05 eta=10:18:53 |----------------------------------------------------------------------------->
train_opt_callback: reshuffle samples. completed epochs: 9
train_opt_callback: iter= 216 sample=1/192 sched=0.900308 loss=0.051821 dt=00:15:01 eta=10:01:08 |----------------------------------------------------------------------------->
train_opt_callback: iter= 217 sample=9/192 sched=0.899419 loss=0.042146 dt=00:15:01 eta=09:45:51 |----------------------------------------------------------------------------->
train_opt_callback: iter= 218 sample=17/192 sched=0.898526 loss=0.048716 dt=00:15:01 eta=09:30:57 |----------------------------------------------------------------------------->
train_opt_callback: iter= 219 sample=25/192 sched=0.897630 loss=0.079733 dt=00:15:01 eta=09:16:01 |----------------------------------------------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-220.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 220 sample=33/192 sched=0.896731 loss=0.045738 dt=00:14:57 eta=08:58:40 |----------------------------------------------------------------------------->
train_opt_callback: iter= 221 sample=41/192 sched=0.895828 loss=0.049312 dt=00:15:01 eta=08:46:03 |----------------------------------------------------------------------------->
train_opt_callback: iter= 222 sample=49/192 sched=0.894922 loss=0.044266 dt=00:15:04 eta=08:32:43 |----------------------------------------------------------------------------->
train_opt_callback: iter= 223 sample=57/192 sched=0.894012 loss=0.043147 dt=00:15:01 eta=08:15:58 |----------------------------------------------------------------------------->
train_opt_callback: iter= 224 sample=65/192 sched=0.893099 loss=0.055512 dt=00:15:28 eta=08:15:06 |----------------------------------------------------------------------------->
train_opt_callback: iter= 225 sample=73/192 sched=0.892183 loss=0.049956 dt=00:15:25 eta=07:58:08 |----------------------------------------------------------------------------->
train_opt_callback: iter= 226 sample=81/192 sched=0.891263 loss=0.050909 dt=00:15:33 eta=07:46:37 |----------------------------------------------------------------------------->
train_opt_callback: iter= 227 sample=89/192 sched=0.890340 loss=0.050016 dt=00:15:38 eta=07:33:34 |----------------------------------------------------------------------------->
train_opt_callback: iter= 228 sample=97/192 sched=0.889413 loss=0.050583 dt=00:15:41 eta=07:19:09 |----------------------------------------------------------------------------->
train_opt_callback: iter= 229 sample=105/192 sched=0.888483 loss=0.043942 dt=00:15:42 eta=07:03:57 |----------------------------------------------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-230.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 230 sample=113/192 sched=0.887550 loss=0.048901 dt=00:15:41 eta=06:48:05 |----------------------------------------------------------------------------->
train_opt_callback: iter= 231 sample=121/192 sched=0.886613 loss=0.048936 dt=00:15:39 eta=06:31:31 |----------------------------------------------------------------------------->
train_opt_callback: iter= 232 sample=129/192 sched=0.885674 loss=0.047928 dt=00:15:42 eta=06:16:52 |----------------------------------------------------------------------------->
train_opt_callback: iter= 233 sample=137/192 sched=0.884730 loss=0.046294 dt=00:15:32 eta=05:57:30 |----------------------------------------------------------------------------->
train_opt_callback: iter= 234 sample=145/192 sched=0.883784 loss=0.047876 dt=00:15:36 eta=05:43:17 |----------------------------------------------------------------------------->
train_opt_callback: iter= 235 sample=153/192 sched=0.882834 loss=0.048621 dt=00:15:35 eta=05:27:27 |----------------------------------------------------------------------------->
train_opt_callback: iter= 236 sample=161/192 sched=0.881881 loss=0.046712 dt=00:15:30 eta=05:10:09 |----------------------------------------------------------------------------->
train_opt_callback: iter= 237 sample=169/192 sched=0.880924 loss=0.048654 dt=00:15:26 eta=04:53:31 |----------------------------------------------------------------------------->
train_opt_callback: iter= 238 sample=177/192 sched=0.879965 loss=0.044374 dt=00:15:39 eta=04:41:55 |----------------------------------------------------------------------------->
train_opt_callback: iter= 239 sample=185/192 sched=0.879002 loss=0.045983 dt=00:15:30 eta=04:23:39 |----------------------------------------------------------------------------->
train_opt_callback: reshuffle samples. completed epochs: 10
save_checkpoint_lora_file: saving to checkpoint-240.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 240 sample=1/192 sched=0.878036 loss=0.048949 dt=00:15:33 eta=04:08:52 |----------------------------------------------------------------------------->
train_opt_callback: iter= 241 sample=9/192 sched=0.877066 loss=0.044107 dt=00:15:26 eta=03:51:38 |----------------------------------------------------------------------------->
train_opt_callback: iter= 242 sample=17/192 sched=0.876094 loss=0.042922 dt=00:15:33 eta=03:37:55 |----------------------------------------------------------------------------->
train_opt_callback: iter= 243 sample=25/192 sched=0.875118 loss=0.048055 dt=00:15:30 eta=03:21:30 |----------------------------------------------------------------------------->
train_opt_callback: iter= 244 sample=33/192 sched=0.874139 loss=0.052699 dt=00:15:31 eta=03:06:22 |----------------------------------------------------------------------------->
train_opt_callback: iter= 245 sample=41/192 sched=0.873157 loss=0.056998 dt=00:15:33 eta=02:51:13 |----------------------------------------------------------------------------->
train_opt_callback: iter= 246 sample=49/192 sched=0.872171 loss=0.048870 dt=00:15:31 eta=02:35:19 |----------------------------------------------------------------------------->
train_opt_callback: iter= 247 sample=57/192 sched=0.871183 loss=0.043034 dt=00:15:32 eta=02:19:51 |----------------------------------------------------------------------------->
train_opt_callback: iter= 248 sample=65/192 sched=0.870191 loss=0.051389 dt=00:15:33 eta=02:04:27 |----------------------------------------------------------------------------->
train_opt_callback: iter= 249 sample=73/192 sched=0.869196 loss=0.046855 dt=00:15:38 eta=01:49:30 |----------------------------------------------------------------------------->
save_checkpoint_lora_file: saving to checkpoint-250.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin
train_opt_callback: iter= 250 sample=81/192 sched=0.868198 loss=0.045374 dt=00:15:34 eta=01:33:28 |----------------------------------------------------------------------------->
train_opt_callback: iter= 251 sample=89/192 sched=0.867197 loss=0.046577 dt=00:15:37 eta=01:18:09 |----------------------------------------------------------------------------->
train_opt_callback: iter= 252 sample=97/192 sched=0.866192 loss=0.044559 dt=00:15:29 eta=01:01:59 |----------------------------------------------------------------------------->
train_opt_callback: iter= 253 sample=105/192 sched=0.865185 loss=0.042789 dt=00:15:34 eta=00:46:42 |----------------------------------------------------------------------------->
train_opt_callback: iter= 254 sample=113/192 sched=0.864174 loss=0.048775 dt=00:15:35 eta=00:31:10 |----------------------------------------------------------------------------->
train_opt_callback: iter= 255 sample=121/192 sched=0.863161 loss=0.052028 dt=00:15:28 eta=00:15:28 |----------------------------------------------------------------------------->
train_opt_callback: iter= 256 sample=129/192 sched=0.862144 loss=0.048282 dt=00:15:34 eta=0.0ms |----------------------------------------------------------------------------->
main: total training time: 3d 05:13:40
save_checkpoint_lora_file: saving to checkpoint-256.gguf
save_checkpoint_lora_file: saving to checkpoint-LATEST.gguf
save_as_llama_lora: saving to lora.bin
save_as_llama_lora: saving to lora.bin