nielsbantilan commited on
Commit
fccfca7
1 Parent(s): a371d23

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -49,3 +49,11 @@ flyteraquk0cj/local_flytekit/776069c6405df68fd2755ce257e952ba/00000 filter=lfs d
49
  flyterpqo54fv/local_flytekit/fd49b76dd3b1ffbc62b1efcef00fd674/00000 filter=lfs diff=lfs merge=lfs -text
50
  flyteyao8jgm7/local_flytekit/67696dba0a579df645b5b2f987a9e4b9/00000 filter=lfs diff=lfs merge=lfs -text
51
  flyteyfv3rs04/local_flytekit/65aa521dee1e8da3c795348937da23ed/00000 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
49
  flyterpqo54fv/local_flytekit/fd49b76dd3b1ffbc62b1efcef00fd674/00000 filter=lfs diff=lfs merge=lfs -text
50
  flyteyao8jgm7/local_flytekit/67696dba0a579df645b5b2f987a9e4b9/00000 filter=lfs diff=lfs merge=lfs -text
51
  flyteyfv3rs04/local_flytekit/65aa521dee1e8da3c795348937da23ed/00000 filter=lfs diff=lfs merge=lfs -text
52
+ flyte19klvulo/local_flytekit/cf21fa2a2d01e72e3915dc8f3c6e1b32/00000 filter=lfs diff=lfs merge=lfs -text
53
+ flyte7htre9gj/local_flytekit/50076b0566bf12946e1eb1d4be15e838/00000 filter=lfs diff=lfs merge=lfs -text
54
+ flytegwz7gead/local_flytekit/b377c904d20fc45d4ae0fe507ae85019/00000 filter=lfs diff=lfs merge=lfs -text
55
+ flyteh3d_ydsb/local_flytekit/3656a2bdc0dbe9fdb5a43b6df6e8db08/00000 filter=lfs diff=lfs merge=lfs -text
56
+ flytehp3ce2w5/local_flytekit/fbce013b721d69eb9b098cdac3d5c001/00000 filter=lfs diff=lfs merge=lfs -text
57
+ flytemk5qri4f/local_flytekit/9836b038dd943755068a02464454db5a/00000 filter=lfs diff=lfs merge=lfs -text
58
+ flytepxngzev1/local_flytekit/41e697b21bf163ce6a04ce166aca0549/00000 filter=lfs diff=lfs merge=lfs -text
59
+ flytex31ghl6k/local_flytekit/92c9a08444022269ca292e8c396bdd23/00000 filter=lfs diff=lfs merge=lfs -text
flyte19klvulo/local_flytekit/cf21fa2a2d01e72e3915dc8f3c6e1b32/00000 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:067772915d011157436dc1ea88cb38756555e25be2d07616d1ee97dfac6e6535
3
+ size 133886409
flyte7htre9gj/local_flytekit/50076b0566bf12946e1eb1d4be15e838/00000 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:067772915d011157436dc1ea88cb38756555e25be2d07616d1ee97dfac6e6535
3
+ size 133886409
flytegwz7gead/local_flytekit/b377c904d20fc45d4ae0fe507ae85019/00000 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:067772915d011157436dc1ea88cb38756555e25be2d07616d1ee97dfac6e6535
3
+ size 133886409
flyteh3d_ydsb/local_flytekit/3656a2bdc0dbe9fdb5a43b6df6e8db08/00000 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:067772915d011157436dc1ea88cb38756555e25be2d07616d1ee97dfac6e6535
3
+ size 133886409
flytehp3ce2w5/local_flytekit/fbce013b721d69eb9b098cdac3d5c001/00000 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:067772915d011157436dc1ea88cb38756555e25be2d07616d1ee97dfac6e6535
3
+ size 133886409
flytemk5qri4f/local_flytekit/9836b038dd943755068a02464454db5a/00000 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:067772915d011157436dc1ea88cb38756555e25be2d07616d1ee97dfac6e6535
3
+ size 133886409
flytepxngzev1/local_flytekit/41e697b21bf163ce6a04ce166aca0549/00000 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:067772915d011157436dc1ea88cb38756555e25be2d07616d1ee97dfac6e6535
3
+ size 133886409
flytex31ghl6k/local_flytekit/92c9a08444022269ca292e8c396bdd23/00000 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:067772915d011157436dc1ea88cb38756555e25be2d07616d1ee97dfac6e6535
3
+ size 133886409
pytorch_model-00001-of-00003.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0edb9f1a102ad6501ee570b17824779e345dad58fa4c0fee69b413296923668b
3
  size 9877982386
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48c13c72d53fa11536598e333f28f673f7e03707ec8a1cc409d323c7c766973b
3
  size 9877982386
pytorch_model-00002-of-00003.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0210300fa233e838ff00964ab3e48ed9d867c21001e52949992e0ec55ed3cff
3
  size 9894793766
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b5853196ea334e2c41c5c5ce5b6113886062a88420ef7c26fe49f1a00db3a66
3
  size 9894793766
pytorch_model-00003-of-00003.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b9608bedcb9fd77131a6d81629f8caddaa607b616ac9440b5f1b515bb1a705db
3
  size 7180985861
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5adfd4a60606ecf168e9ca0effc61162f1c4c7eade22e01a3fcf8704eacbf578
3
  size 7180985861
tmp1irzl5w5/_remote_module_non_scriptable.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+
3
+ import torch
4
+ import torch.distributed.rpc as rpc
5
+ from torch import Tensor
6
+ from torch._jit_internal import Future
7
+ from torch.distributed.rpc import RRef
8
+ from typing import Tuple # pyre-ignore: unused import
9
+
10
+
11
+ module_interface_cls = None
12
+
13
+
14
+ def forward_async(self, *args, **kwargs):
15
+ args = (self.module_rref, self.device, self.is_device_map_set, *args)
16
+ kwargs = {**kwargs}
17
+ return rpc.rpc_async(
18
+ self.module_rref.owner(),
19
+ _remote_forward,
20
+ args,
21
+ kwargs,
22
+ )
23
+
24
+
25
+ def forward(self, *args, **kwargs):
26
+ args = (self.module_rref, self.device, self.is_device_map_set, *args)
27
+ kwargs = {**kwargs}
28
+ ret_fut = rpc.rpc_async(
29
+ self.module_rref.owner(),
30
+ _remote_forward,
31
+ args,
32
+ kwargs,
33
+ )
34
+ return ret_fut.wait()
35
+
36
+
37
+ _generated_methods = [
38
+ forward_async,
39
+ forward,
40
+ ]
41
+
42
+
43
+
44
+
45
+ def _remote_forward(
46
+ module_rref: RRef[module_interface_cls], device: str, is_device_map_set: bool, *args, **kwargs):
47
+ module = module_rref.local_value()
48
+ device = torch.device(device)
49
+
50
+ if device.type != "cuda":
51
+ return module.forward(*args, **kwargs)
52
+
53
+ # If the module is on a cuda device,
54
+ # move any CPU tensor in args or kwargs to the same cuda device.
55
+ # Since torch script does not support generator expression,
56
+ # have to use concatenation instead of
57
+ # ``tuple(i.to(device) if isinstance(i, Tensor) else i for i in *args)``.
58
+ args = (*args,)
59
+ out_args: Tuple[()] = ()
60
+ for arg in args:
61
+ arg = (arg.to(device),) if isinstance(arg, Tensor) else (arg,)
62
+ out_args = out_args + arg
63
+
64
+ kwargs = {**kwargs}
65
+ for k, v in kwargs.items():
66
+ if isinstance(v, Tensor):
67
+ kwargs[k] = kwargs[k].to(device)
68
+
69
+ if is_device_map_set:
70
+ return module.forward(*out_args, **kwargs)
71
+
72
+ # If the device map is empty, then only CPU tensors are allowed to send over wire,
73
+ # so have to move any GPU tensor to CPU in the output.
74
+ # Since torch script does not support generator expression,
75
+ # have to use concatenation instead of
76
+ # ``tuple(i.cpu() if isinstance(i, Tensor) else i for i in module.forward(*out_args, **kwargs))``.
77
+ ret: Tuple[()] = ()
78
+ for i in module.forward(*out_args, **kwargs):
79
+ i = (i.cpu(),) if isinstance(i, Tensor) else (i,)
80
+ ret = ret + i
81
+ return ret
tmpcwd32mn0/_remote_module_non_scriptable.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+
3
+ import torch
4
+ import torch.distributed.rpc as rpc
5
+ from torch import Tensor
6
+ from torch._jit_internal import Future
7
+ from torch.distributed.rpc import RRef
8
+ from typing import Tuple # pyre-ignore: unused import
9
+
10
+
11
+ module_interface_cls = None
12
+
13
+
14
+ def forward_async(self, *args, **kwargs):
15
+ args = (self.module_rref, self.device, self.is_device_map_set, *args)
16
+ kwargs = {**kwargs}
17
+ return rpc.rpc_async(
18
+ self.module_rref.owner(),
19
+ _remote_forward,
20
+ args,
21
+ kwargs,
22
+ )
23
+
24
+
25
+ def forward(self, *args, **kwargs):
26
+ args = (self.module_rref, self.device, self.is_device_map_set, *args)
27
+ kwargs = {**kwargs}
28
+ ret_fut = rpc.rpc_async(
29
+ self.module_rref.owner(),
30
+ _remote_forward,
31
+ args,
32
+ kwargs,
33
+ )
34
+ return ret_fut.wait()
35
+
36
+
37
+ _generated_methods = [
38
+ forward_async,
39
+ forward,
40
+ ]
41
+
42
+
43
+
44
+
45
+ def _remote_forward(
46
+ module_rref: RRef[module_interface_cls], device: str, is_device_map_set: bool, *args, **kwargs):
47
+ module = module_rref.local_value()
48
+ device = torch.device(device)
49
+
50
+ if device.type != "cuda":
51
+ return module.forward(*args, **kwargs)
52
+
53
+ # If the module is on a cuda device,
54
+ # move any CPU tensor in args or kwargs to the same cuda device.
55
+ # Since torch script does not support generator expression,
56
+ # have to use concatenation instead of
57
+ # ``tuple(i.to(device) if isinstance(i, Tensor) else i for i in *args)``.
58
+ args = (*args,)
59
+ out_args: Tuple[()] = ()
60
+ for arg in args:
61
+ arg = (arg.to(device),) if isinstance(arg, Tensor) else (arg,)
62
+ out_args = out_args + arg
63
+
64
+ kwargs = {**kwargs}
65
+ for k, v in kwargs.items():
66
+ if isinstance(v, Tensor):
67
+ kwargs[k] = kwargs[k].to(device)
68
+
69
+ if is_device_map_set:
70
+ return module.forward(*out_args, **kwargs)
71
+
72
+ # If the device map is empty, then only CPU tensors are allowed to send over wire,
73
+ # so have to move any GPU tensor to CPU in the output.
74
+ # Since torch script does not support generator expression,
75
+ # have to use concatenation instead of
76
+ # ``tuple(i.cpu() if isinstance(i, Tensor) else i for i in module.forward(*out_args, **kwargs))``.
77
+ ret: Tuple[()] = ()
78
+ for i in module.forward(*out_args, **kwargs):
79
+ i = (i.cpu(),) if isinstance(i, Tensor) else (i,)
80
+ ret = ret + i
81
+ return ret
tmpefjtsdm5/_remote_module_non_scriptable.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+
3
+ import torch
4
+ import torch.distributed.rpc as rpc
5
+ from torch import Tensor
6
+ from torch._jit_internal import Future
7
+ from torch.distributed.rpc import RRef
8
+ from typing import Tuple # pyre-ignore: unused import
9
+
10
+
11
+ module_interface_cls = None
12
+
13
+
14
+ def forward_async(self, *args, **kwargs):
15
+ args = (self.module_rref, self.device, self.is_device_map_set, *args)
16
+ kwargs = {**kwargs}
17
+ return rpc.rpc_async(
18
+ self.module_rref.owner(),
19
+ _remote_forward,
20
+ args,
21
+ kwargs,
22
+ )
23
+
24
+
25
+ def forward(self, *args, **kwargs):
26
+ args = (self.module_rref, self.device, self.is_device_map_set, *args)
27
+ kwargs = {**kwargs}
28
+ ret_fut = rpc.rpc_async(
29
+ self.module_rref.owner(),
30
+ _remote_forward,
31
+ args,
32
+ kwargs,
33
+ )
34
+ return ret_fut.wait()
35
+
36
+
37
+ _generated_methods = [
38
+ forward_async,
39
+ forward,
40
+ ]
41
+
42
+
43
+
44
+
45
+ def _remote_forward(
46
+ module_rref: RRef[module_interface_cls], device: str, is_device_map_set: bool, *args, **kwargs):
47
+ module = module_rref.local_value()
48
+ device = torch.device(device)
49
+
50
+ if device.type != "cuda":
51
+ return module.forward(*args, **kwargs)
52
+
53
+ # If the module is on a cuda device,
54
+ # move any CPU tensor in args or kwargs to the same cuda device.
55
+ # Since torch script does not support generator expression,
56
+ # have to use concatenation instead of
57
+ # ``tuple(i.to(device) if isinstance(i, Tensor) else i for i in *args)``.
58
+ args = (*args,)
59
+ out_args: Tuple[()] = ()
60
+ for arg in args:
61
+ arg = (arg.to(device),) if isinstance(arg, Tensor) else (arg,)
62
+ out_args = out_args + arg
63
+
64
+ kwargs = {**kwargs}
65
+ for k, v in kwargs.items():
66
+ if isinstance(v, Tensor):
67
+ kwargs[k] = kwargs[k].to(device)
68
+
69
+ if is_device_map_set:
70
+ return module.forward(*out_args, **kwargs)
71
+
72
+ # If the device map is empty, then only CPU tensors are allowed to send over wire,
73
+ # so have to move any GPU tensor to CPU in the output.
74
+ # Since torch script does not support generator expression,
75
+ # have to use concatenation instead of
76
+ # ``tuple(i.cpu() if isinstance(i, Tensor) else i for i in module.forward(*out_args, **kwargs))``.
77
+ ret: Tuple[()] = ()
78
+ for i in module.forward(*out_args, **kwargs):
79
+ i = (i.cpu(),) if isinstance(i, Tensor) else (i,)
80
+ ret = ret + i
81
+ return ret
tmpftz082d8/__pycache__/_remote_module_non_scriptable.cpython-310.pyc ADDED
Binary file (1.5 kB). View file
 
tmpftz082d8/_remote_module_non_scriptable.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+
3
+ import torch
4
+ import torch.distributed.rpc as rpc
5
+ from torch import Tensor
6
+ from torch._jit_internal import Future
7
+ from torch.distributed.rpc import RRef
8
+ from typing import Tuple # pyre-ignore: unused import
9
+
10
+
11
+ module_interface_cls = None
12
+
13
+
14
+ def forward_async(self, *args, **kwargs):
15
+ args = (self.module_rref, self.device, self.is_device_map_set, *args)
16
+ kwargs = {**kwargs}
17
+ return rpc.rpc_async(
18
+ self.module_rref.owner(),
19
+ _remote_forward,
20
+ args,
21
+ kwargs,
22
+ )
23
+
24
+
25
+ def forward(self, *args, **kwargs):
26
+ args = (self.module_rref, self.device, self.is_device_map_set, *args)
27
+ kwargs = {**kwargs}
28
+ ret_fut = rpc.rpc_async(
29
+ self.module_rref.owner(),
30
+ _remote_forward,
31
+ args,
32
+ kwargs,
33
+ )
34
+ return ret_fut.wait()
35
+
36
+
37
+ _generated_methods = [
38
+ forward_async,
39
+ forward,
40
+ ]
41
+
42
+
43
+
44
+
45
+ def _remote_forward(
46
+ module_rref: RRef[module_interface_cls], device: str, is_device_map_set: bool, *args, **kwargs):
47
+ module = module_rref.local_value()
48
+ device = torch.device(device)
49
+
50
+ if device.type != "cuda":
51
+ return module.forward(*args, **kwargs)
52
+
53
+ # If the module is on a cuda device,
54
+ # move any CPU tensor in args or kwargs to the same cuda device.
55
+ # Since torch script does not support generator expression,
56
+ # have to use concatenation instead of
57
+ # ``tuple(i.to(device) if isinstance(i, Tensor) else i for i in *args)``.
58
+ args = (*args,)
59
+ out_args: Tuple[()] = ()
60
+ for arg in args:
61
+ arg = (arg.to(device),) if isinstance(arg, Tensor) else (arg,)
62
+ out_args = out_args + arg
63
+
64
+ kwargs = {**kwargs}
65
+ for k, v in kwargs.items():
66
+ if isinstance(v, Tensor):
67
+ kwargs[k] = kwargs[k].to(device)
68
+
69
+ if is_device_map_set:
70
+ return module.forward(*out_args, **kwargs)
71
+
72
+ # If the device map is empty, then only CPU tensors are allowed to send over wire,
73
+ # so have to move any GPU tensor to CPU in the output.
74
+ # Since torch script does not support generator expression,
75
+ # have to use concatenation instead of
76
+ # ``tuple(i.cpu() if isinstance(i, Tensor) else i for i in module.forward(*out_args, **kwargs))``.
77
+ ret: Tuple[()] = ()
78
+ for i in module.forward(*out_args, **kwargs):
79
+ i = (i.cpu(),) if isinstance(i, Tensor) else (i,)
80
+ ret = ret + i
81
+ return ret
tmpj0u3x6ea/_remote_module_non_scriptable.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+
3
+ import torch
4
+ import torch.distributed.rpc as rpc
5
+ from torch import Tensor
6
+ from torch._jit_internal import Future
7
+ from torch.distributed.rpc import RRef
8
+ from typing import Tuple # pyre-ignore: unused import
9
+
10
+
11
+ module_interface_cls = None
12
+
13
+
14
+ def forward_async(self, *args, **kwargs):
15
+ args = (self.module_rref, self.device, self.is_device_map_set, *args)
16
+ kwargs = {**kwargs}
17
+ return rpc.rpc_async(
18
+ self.module_rref.owner(),
19
+ _remote_forward,
20
+ args,
21
+ kwargs,
22
+ )
23
+
24
+
25
+ def forward(self, *args, **kwargs):
26
+ args = (self.module_rref, self.device, self.is_device_map_set, *args)
27
+ kwargs = {**kwargs}
28
+ ret_fut = rpc.rpc_async(
29
+ self.module_rref.owner(),
30
+ _remote_forward,
31
+ args,
32
+ kwargs,
33
+ )
34
+ return ret_fut.wait()
35
+
36
+
37
+ _generated_methods = [
38
+ forward_async,
39
+ forward,
40
+ ]
41
+
42
+
43
+
44
+
45
+ def _remote_forward(
46
+ module_rref: RRef[module_interface_cls], device: str, is_device_map_set: bool, *args, **kwargs):
47
+ module = module_rref.local_value()
48
+ device = torch.device(device)
49
+
50
+ if device.type != "cuda":
51
+ return module.forward(*args, **kwargs)
52
+
53
+ # If the module is on a cuda device,
54
+ # move any CPU tensor in args or kwargs to the same cuda device.
55
+ # Since torch script does not support generator expression,
56
+ # have to use concatenation instead of
57
+ # ``tuple(i.to(device) if isinstance(i, Tensor) else i for i in *args)``.
58
+ args = (*args,)
59
+ out_args: Tuple[()] = ()
60
+ for arg in args:
61
+ arg = (arg.to(device),) if isinstance(arg, Tensor) else (arg,)
62
+ out_args = out_args + arg
63
+
64
+ kwargs = {**kwargs}
65
+ for k, v in kwargs.items():
66
+ if isinstance(v, Tensor):
67
+ kwargs[k] = kwargs[k].to(device)
68
+
69
+ if is_device_map_set:
70
+ return module.forward(*out_args, **kwargs)
71
+
72
+ # If the device map is empty, then only CPU tensors are allowed to send over wire,
73
+ # so have to move any GPU tensor to CPU in the output.
74
+ # Since torch script does not support generator expression,
75
+ # have to use concatenation instead of
76
+ # ``tuple(i.cpu() if isinstance(i, Tensor) else i for i in module.forward(*out_args, **kwargs))``.
77
+ ret: Tuple[()] = ()
78
+ for i in module.forward(*out_args, **kwargs):
79
+ i = (i.cpu(),) if isinstance(i, Tensor) else (i,)
80
+ ret = ret + i
81
+ return ret
tmpnmkfim5e/_remote_module_non_scriptable.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+
3
+ import torch
4
+ import torch.distributed.rpc as rpc
5
+ from torch import Tensor
6
+ from torch._jit_internal import Future
7
+ from torch.distributed.rpc import RRef
8
+ from typing import Tuple # pyre-ignore: unused import
9
+
10
+
11
+ module_interface_cls = None
12
+
13
+
14
+ def forward_async(self, *args, **kwargs):
15
+ args = (self.module_rref, self.device, self.is_device_map_set, *args)
16
+ kwargs = {**kwargs}
17
+ return rpc.rpc_async(
18
+ self.module_rref.owner(),
19
+ _remote_forward,
20
+ args,
21
+ kwargs,
22
+ )
23
+
24
+
25
+ def forward(self, *args, **kwargs):
26
+ args = (self.module_rref, self.device, self.is_device_map_set, *args)
27
+ kwargs = {**kwargs}
28
+ ret_fut = rpc.rpc_async(
29
+ self.module_rref.owner(),
30
+ _remote_forward,
31
+ args,
32
+ kwargs,
33
+ )
34
+ return ret_fut.wait()
35
+
36
+
37
+ _generated_methods = [
38
+ forward_async,
39
+ forward,
40
+ ]
41
+
42
+
43
+
44
+
45
+ def _remote_forward(
46
+ module_rref: RRef[module_interface_cls], device: str, is_device_map_set: bool, *args, **kwargs):
47
+ module = module_rref.local_value()
48
+ device = torch.device(device)
49
+
50
+ if device.type != "cuda":
51
+ return module.forward(*args, **kwargs)
52
+
53
+ # If the module is on a cuda device,
54
+ # move any CPU tensor in args or kwargs to the same cuda device.
55
+ # Since torch script does not support generator expression,
56
+ # have to use concatenation instead of
57
+ # ``tuple(i.to(device) if isinstance(i, Tensor) else i for i in *args)``.
58
+ args = (*args,)
59
+ out_args: Tuple[()] = ()
60
+ for arg in args:
61
+ arg = (arg.to(device),) if isinstance(arg, Tensor) else (arg,)
62
+ out_args = out_args + arg
63
+
64
+ kwargs = {**kwargs}
65
+ for k, v in kwargs.items():
66
+ if isinstance(v, Tensor):
67
+ kwargs[k] = kwargs[k].to(device)
68
+
69
+ if is_device_map_set:
70
+ return module.forward(*out_args, **kwargs)
71
+
72
+ # If the device map is empty, then only CPU tensors are allowed to send over wire,
73
+ # so have to move any GPU tensor to CPU in the output.
74
+ # Since torch script does not support generator expression,
75
+ # have to use concatenation instead of
76
+ # ``tuple(i.cpu() if isinstance(i, Tensor) else i for i in module.forward(*out_args, **kwargs))``.
77
+ ret: Tuple[()] = ()
78
+ for i in module.forward(*out_args, **kwargs):
79
+ i = (i.cpu(),) if isinstance(i, Tensor) else (i,)
80
+ ret = ret + i
81
+ return ret
trainer_state.json CHANGED
@@ -11,610 +11,610 @@
11
  {
12
  "epoch": 0.44,
13
  "learning_rate": 0,
14
- "loss": 1.7341,
15
  "step": 1
16
  },
17
  {
18
  "epoch": 0.89,
19
  "learning_rate": 0,
20
- "loss": 1.7223,
21
  "step": 2
22
  },
23
  {
24
  "epoch": 1.33,
25
  "learning_rate": 0,
26
- "loss": 1.7608,
27
  "step": 3
28
  },
29
  {
30
  "epoch": 1.78,
31
  "learning_rate": 0,
32
- "loss": 1.7115,
33
  "step": 4
34
  },
35
  {
36
  "epoch": 2.22,
37
  "learning_rate": 0,
38
- "loss": 1.7181,
39
  "step": 5
40
  },
41
  {
42
  "epoch": 2.67,
43
  "learning_rate": 0,
44
- "loss": 1.7022,
45
  "step": 6
46
  },
47
  {
48
  "epoch": 3.11,
49
  "learning_rate": 0,
50
- "loss": 1.7242,
51
  "step": 7
52
  },
53
  {
54
  "epoch": 3.56,
55
  "learning_rate": 0,
56
- "loss": 1.7352,
57
  "step": 8
58
  },
59
  {
60
  "epoch": 4.0,
61
  "learning_rate": 0,
62
- "loss": 1.7181,
63
  "step": 9
64
  },
65
  {
66
  "epoch": 4.44,
67
  "learning_rate": 0,
68
- "loss": 1.7213,
69
  "step": 10
70
  },
71
  {
72
  "epoch": 4.89,
73
  "learning_rate": 0,
74
- "loss": 1.6694,
75
  "step": 11
76
  },
77
  {
78
  "epoch": 5.33,
79
  "learning_rate": 0,
80
- "loss": 1.7046,
81
  "step": 12
82
  },
83
  {
84
  "epoch": 5.78,
85
  "learning_rate": 0,
86
- "loss": 1.7109,
87
  "step": 13
88
  },
89
  {
90
  "epoch": 6.22,
91
  "learning_rate": 0,
92
- "loss": 1.6948,
93
  "step": 14
94
  },
95
  {
96
  "epoch": 6.67,
97
  "learning_rate": 0,
98
- "loss": 1.6816,
99
  "step": 15
100
  },
101
  {
102
  "epoch": 7.11,
103
- "learning_rate": 0.0,
104
- "loss": 1.6851,
105
  "step": 16
106
  },
107
  {
108
  "epoch": 7.56,
109
- "learning_rate": 1.2618595071429148e-05,
110
- "loss": 1.6041,
111
  "step": 17
112
  },
113
  {
114
  "epoch": 8.0,
115
- "learning_rate": 2e-05,
116
- "loss": 1.5208,
117
  "step": 18
118
  },
119
  {
120
  "epoch": 8.44,
121
  "learning_rate": 2e-05,
122
- "loss": 1.4946,
123
  "step": 19
124
  },
125
  {
126
  "epoch": 8.89,
127
  "learning_rate": 2e-05,
128
- "loss": 1.492,
129
  "step": 20
130
  },
131
  {
132
  "epoch": 9.33,
133
  "learning_rate": 2e-05,
134
- "loss": 1.4501,
135
  "step": 21
136
  },
137
  {
138
  "epoch": 9.78,
139
  "learning_rate": 2e-05,
140
- "loss": 1.1894,
141
  "step": 22
142
  },
143
  {
144
  "epoch": 10.22,
145
  "learning_rate": 2e-05,
146
- "loss": 1.1437,
147
  "step": 23
148
  },
149
  {
150
  "epoch": 10.67,
151
  "learning_rate": 2e-05,
152
- "loss": 1.02,
153
  "step": 24
154
  },
155
  {
156
  "epoch": 11.11,
157
  "learning_rate": 2e-05,
158
- "loss": 0.926,
159
  "step": 25
160
  },
161
  {
162
  "epoch": 11.56,
163
  "learning_rate": 2e-05,
164
- "loss": 0.7794,
165
  "step": 26
166
  },
167
  {
168
  "epoch": 12.0,
169
  "learning_rate": 2e-05,
170
- "loss": 0.7719,
171
  "step": 27
172
  },
173
  {
174
  "epoch": 12.44,
175
  "learning_rate": 2e-05,
176
- "loss": 0.6107,
177
  "step": 28
178
  },
179
  {
180
  "epoch": 12.89,
181
  "learning_rate": 2e-05,
182
- "loss": 0.633,
183
  "step": 29
184
  },
185
  {
186
  "epoch": 13.33,
187
  "learning_rate": 2e-05,
188
- "loss": 0.4781,
189
  "step": 30
190
  },
191
  {
192
  "epoch": 13.78,
193
  "learning_rate": 2e-05,
194
- "loss": 0.4379,
195
  "step": 31
196
  },
197
  {
198
  "epoch": 14.22,
199
  "learning_rate": 2e-05,
200
- "loss": 0.3391,
201
  "step": 32
202
  },
203
  {
204
  "epoch": 14.67,
205
  "learning_rate": 2e-05,
206
- "loss": 0.2928,
207
  "step": 33
208
  },
209
  {
210
  "epoch": 15.11,
211
  "learning_rate": 2e-05,
212
- "loss": 0.2631,
213
  "step": 34
214
  },
215
  {
216
  "epoch": 15.56,
217
  "learning_rate": 2e-05,
218
- "loss": 0.2399,
219
  "step": 35
220
  },
221
  {
222
  "epoch": 16.0,
223
  "learning_rate": 2e-05,
224
- "loss": 0.2075,
225
  "step": 36
226
  },
227
  {
228
  "epoch": 16.44,
229
  "learning_rate": 2e-05,
230
- "loss": 0.186,
231
  "step": 37
232
  },
233
  {
234
  "epoch": 16.89,
235
  "learning_rate": 2e-05,
236
- "loss": 0.1782,
237
  "step": 38
238
  },
239
  {
240
  "epoch": 17.33,
241
  "learning_rate": 2e-05,
242
- "loss": 0.144,
243
  "step": 39
244
  },
245
  {
246
  "epoch": 17.78,
247
  "learning_rate": 2e-05,
248
- "loss": 0.1317,
249
  "step": 40
250
  },
251
  {
252
  "epoch": 18.22,
253
  "learning_rate": 2e-05,
254
- "loss": 0.1144,
255
  "step": 41
256
  },
257
  {
258
  "epoch": 18.67,
259
  "learning_rate": 2e-05,
260
- "loss": 0.1193,
261
  "step": 42
262
  },
263
  {
264
  "epoch": 19.11,
265
  "learning_rate": 2e-05,
266
- "loss": 0.1161,
267
  "step": 43
268
  },
269
  {
270
  "epoch": 19.56,
271
  "learning_rate": 2e-05,
272
- "loss": 0.0993,
273
  "step": 44
274
  },
275
  {
276
  "epoch": 20.0,
277
  "learning_rate": 2e-05,
278
- "loss": 0.1083,
279
  "step": 45
280
  },
281
  {
282
  "epoch": 20.44,
283
  "learning_rate": 2e-05,
284
- "loss": 0.101,
285
  "step": 46
286
  },
287
  {
288
  "epoch": 20.89,
289
  "learning_rate": 2e-05,
290
- "loss": 0.1013,
291
  "step": 47
292
  },
293
  {
294
  "epoch": 21.33,
295
  "learning_rate": 2e-05,
296
- "loss": 0.1066,
297
  "step": 48
298
  },
299
  {
300
  "epoch": 21.78,
301
  "learning_rate": 2e-05,
302
- "loss": 0.1005,
303
  "step": 49
304
  },
305
  {
306
  "epoch": 22.22,
307
  "learning_rate": 2e-05,
308
- "loss": 0.0882,
309
  "step": 50
310
  },
311
  {
312
  "epoch": 22.67,
313
  "learning_rate": 2e-05,
314
- "loss": 0.1067,
315
  "step": 51
316
  },
317
  {
318
  "epoch": 23.11,
319
  "learning_rate": 2e-05,
320
- "loss": 0.0797,
321
  "step": 52
322
  },
323
  {
324
  "epoch": 23.56,
325
  "learning_rate": 2e-05,
326
- "loss": 0.0943,
327
  "step": 53
328
  },
329
  {
330
  "epoch": 24.0,
331
  "learning_rate": 2e-05,
332
- "loss": 0.0769,
333
  "step": 54
334
  },
335
  {
336
  "epoch": 24.44,
337
  "learning_rate": 2e-05,
338
- "loss": 0.0855,
339
  "step": 55
340
  },
341
  {
342
  "epoch": 24.89,
343
  "learning_rate": 2e-05,
344
- "loss": 0.0735,
345
  "step": 56
346
  },
347
  {
348
  "epoch": 25.33,
349
  "learning_rate": 2e-05,
350
- "loss": 0.0833,
351
  "step": 57
352
  },
353
  {
354
  "epoch": 25.78,
355
  "learning_rate": 2e-05,
356
- "loss": 0.0811,
357
  "step": 58
358
  },
359
  {
360
  "epoch": 26.22,
361
  "learning_rate": 2e-05,
362
- "loss": 0.0772,
363
  "step": 59
364
  },
365
  {
366
  "epoch": 26.67,
367
  "learning_rate": 2e-05,
368
- "loss": 0.0721,
369
  "step": 60
370
  },
371
  {
372
  "epoch": 27.11,
373
  "learning_rate": 2e-05,
374
- "loss": 0.0825,
375
  "step": 61
376
  },
377
  {
378
  "epoch": 27.56,
379
  "learning_rate": 2e-05,
380
- "loss": 0.0758,
381
  "step": 62
382
  },
383
  {
384
  "epoch": 28.0,
385
  "learning_rate": 2e-05,
386
- "loss": 0.0725,
387
  "step": 63
388
  },
389
  {
390
  "epoch": 28.44,
391
  "learning_rate": 2e-05,
392
- "loss": 0.077,
393
  "step": 64
394
  },
395
  {
396
  "epoch": 28.89,
397
  "learning_rate": 2e-05,
398
- "loss": 0.0654,
399
  "step": 65
400
  },
401
  {
402
  "epoch": 29.33,
403
  "learning_rate": 2e-05,
404
- "loss": 0.0675,
405
  "step": 66
406
  },
407
  {
408
  "epoch": 29.78,
409
  "learning_rate": 2e-05,
410
- "loss": 0.0772,
411
  "step": 67
412
  },
413
  {
414
  "epoch": 30.22,
415
  "learning_rate": 2e-05,
416
- "loss": 0.0718,
417
  "step": 68
418
  },
419
  {
420
  "epoch": 30.67,
421
  "learning_rate": 2e-05,
422
- "loss": 0.0625,
423
  "step": 69
424
  },
425
  {
426
  "epoch": 31.11,
427
  "learning_rate": 2e-05,
428
- "loss": 0.0616,
429
  "step": 70
430
  },
431
  {
432
  "epoch": 31.56,
433
  "learning_rate": 2e-05,
434
- "loss": 0.071,
435
  "step": 71
436
  },
437
  {
438
  "epoch": 32.0,
439
  "learning_rate": 2e-05,
440
- "loss": 0.0655,
441
  "step": 72
442
  },
443
  {
444
  "epoch": 32.44,
445
  "learning_rate": 2e-05,
446
- "loss": 0.0591,
447
  "step": 73
448
  },
449
  {
450
  "epoch": 32.89,
451
  "learning_rate": 2e-05,
452
- "loss": 0.0669,
453
  "step": 74
454
  },
455
  {
456
  "epoch": 33.33,
457
  "learning_rate": 2e-05,
458
- "loss": 0.0653,
459
  "step": 75
460
  },
461
  {
462
  "epoch": 33.78,
463
  "learning_rate": 2e-05,
464
- "loss": 0.0662,
465
  "step": 76
466
  },
467
  {
468
  "epoch": 34.22,
469
  "learning_rate": 2e-05,
470
- "loss": 0.0688,
471
  "step": 77
472
  },
473
  {
474
  "epoch": 34.67,
475
  "learning_rate": 2e-05,
476
- "loss": 0.0498,
477
  "step": 78
478
  },
479
  {
480
  "epoch": 35.11,
481
  "learning_rate": 2e-05,
482
- "loss": 0.0576,
483
  "step": 79
484
  },
485
  {
486
  "epoch": 35.56,
487
  "learning_rate": 2e-05,
488
- "loss": 0.0737,
489
  "step": 80
490
  },
491
  {
492
  "epoch": 36.0,
493
  "learning_rate": 2e-05,
494
- "loss": 0.0609,
495
  "step": 81
496
  },
497
  {
498
  "epoch": 36.44,
499
  "learning_rate": 2e-05,
500
- "loss": 0.0594,
501
  "step": 82
502
  },
503
  {
504
  "epoch": 36.89,
505
  "learning_rate": 2e-05,
506
- "loss": 0.0725,
507
  "step": 83
508
  },
509
  {
510
  "epoch": 37.33,
511
  "learning_rate": 2e-05,
512
- "loss": 0.0598,
513
  "step": 84
514
  },
515
  {
516
  "epoch": 37.78,
517
  "learning_rate": 2e-05,
518
- "loss": 0.0652,
519
  "step": 85
520
  },
521
  {
522
  "epoch": 38.22,
523
  "learning_rate": 2e-05,
524
- "loss": 0.0588,
525
  "step": 86
526
  },
527
  {
528
  "epoch": 38.67,
529
  "learning_rate": 2e-05,
530
- "loss": 0.0671,
531
  "step": 87
532
  },
533
  {
534
  "epoch": 39.11,
535
  "learning_rate": 2e-05,
536
- "loss": 0.0596,
537
  "step": 88
538
  },
539
  {
540
  "epoch": 39.56,
541
  "learning_rate": 2e-05,
542
- "loss": 0.0518,
543
  "step": 89
544
  },
545
  {
546
  "epoch": 40.0,
547
  "learning_rate": 2e-05,
548
- "loss": 0.0612,
549
  "step": 90
550
  },
551
  {
552
  "epoch": 40.44,
553
  "learning_rate": 2e-05,
554
- "loss": 0.0593,
555
  "step": 91
556
  },
557
  {
558
  "epoch": 40.89,
559
  "learning_rate": 2e-05,
560
- "loss": 0.0521,
561
  "step": 92
562
  },
563
  {
564
  "epoch": 41.33,
565
  "learning_rate": 2e-05,
566
- "loss": 0.0536,
567
  "step": 93
568
  },
569
  {
570
  "epoch": 41.78,
571
  "learning_rate": 2e-05,
572
- "loss": 0.0548,
573
  "step": 94
574
  },
575
  {
576
  "epoch": 42.22,
577
  "learning_rate": 2e-05,
578
- "loss": 0.0507,
579
  "step": 95
580
  },
581
  {
582
  "epoch": 42.67,
583
  "learning_rate": 2e-05,
584
- "loss": 0.0588,
585
  "step": 96
586
  },
587
  {
588
  "epoch": 43.11,
589
  "learning_rate": 2e-05,
590
- "loss": 0.0506,
591
  "step": 97
592
  },
593
  {
594
  "epoch": 43.56,
595
  "learning_rate": 2e-05,
596
- "loss": 0.055,
597
  "step": 98
598
  },
599
  {
600
  "epoch": 44.0,
601
  "learning_rate": 2e-05,
602
- "loss": 0.0503,
603
  "step": 99
604
  },
605
  {
606
  "epoch": 44.44,
607
  "learning_rate": 2e-05,
608
- "loss": 0.054,
609
  "step": 100
610
  },
611
  {
612
  "epoch": 44.44,
613
  "step": 100,
614
- "total_flos": 7478779576320.0,
615
- "train_loss": 0.49326207719743254,
616
- "train_runtime": 9902.4306,
617
- "train_samples_per_second": 0.969,
618
  "train_steps_per_second": 0.01
619
  }
620
  ],
@@ -622,7 +622,7 @@
622
  "max_steps": 100,
623
  "num_train_epochs": 50,
624
  "save_steps": 200,
625
- "total_flos": 7478779576320.0,
626
  "trial_name": null,
627
  "trial_params": null
628
  }
 
11
  {
12
  "epoch": 0.44,
13
  "learning_rate": 0,
14
+ "loss": 1.6723,
15
  "step": 1
16
  },
17
  {
18
  "epoch": 0.89,
19
  "learning_rate": 0,
20
+ "loss": 1.7539,
21
  "step": 2
22
  },
23
  {
24
  "epoch": 1.33,
25
  "learning_rate": 0,
26
+ "loss": 1.7347,
27
  "step": 3
28
  },
29
  {
30
  "epoch": 1.78,
31
  "learning_rate": 0,
32
+ "loss": 1.709,
33
  "step": 4
34
  },
35
  {
36
  "epoch": 2.22,
37
  "learning_rate": 0,
38
+ "loss": 1.7275,
39
  "step": 5
40
  },
41
  {
42
  "epoch": 2.67,
43
  "learning_rate": 0,
44
+ "loss": 1.7085,
45
  "step": 6
46
  },
47
  {
48
  "epoch": 3.11,
49
  "learning_rate": 0,
50
+ "loss": 1.7304,
51
  "step": 7
52
  },
53
  {
54
  "epoch": 3.56,
55
  "learning_rate": 0,
56
+ "loss": 1.7121,
57
  "step": 8
58
  },
59
  {
60
  "epoch": 4.0,
61
  "learning_rate": 0,
62
+ "loss": 1.719,
63
  "step": 9
64
  },
65
  {
66
  "epoch": 4.44,
67
  "learning_rate": 0,
68
+ "loss": 1.7356,
69
  "step": 10
70
  },
71
  {
72
  "epoch": 4.89,
73
  "learning_rate": 0,
74
+ "loss": 1.7842,
75
  "step": 11
76
  },
77
  {
78
  "epoch": 5.33,
79
  "learning_rate": 0,
80
+ "loss": 1.7527,
81
  "step": 12
82
  },
83
  {
84
  "epoch": 5.78,
85
  "learning_rate": 0,
86
+ "loss": 1.6973,
87
  "step": 13
88
  },
89
  {
90
  "epoch": 6.22,
91
  "learning_rate": 0,
92
+ "loss": 1.7233,
93
  "step": 14
94
  },
95
  {
96
  "epoch": 6.67,
97
  "learning_rate": 0,
98
+ "loss": 1.7313,
99
  "step": 15
100
  },
101
  {
102
  "epoch": 7.11,
103
+ "learning_rate": 0,
104
+ "loss": 1.6788,
105
  "step": 16
106
  },
107
  {
108
  "epoch": 7.56,
109
+ "learning_rate": 0.0,
110
+ "loss": 1.7022,
111
  "step": 17
112
  },
113
  {
114
  "epoch": 8.0,
115
+ "learning_rate": 1.2618595071429148e-05,
116
+ "loss": 1.6138,
117
  "step": 18
118
  },
119
  {
120
  "epoch": 8.44,
121
  "learning_rate": 2e-05,
122
+ "loss": 1.5552,
123
  "step": 19
124
  },
125
  {
126
  "epoch": 8.89,
127
  "learning_rate": 2e-05,
128
+ "loss": 1.457,
129
  "step": 20
130
  },
131
  {
132
  "epoch": 9.33,
133
  "learning_rate": 2e-05,
134
+ "loss": 1.3525,
135
  "step": 21
136
  },
137
  {
138
  "epoch": 9.78,
139
  "learning_rate": 2e-05,
140
+ "loss": 1.249,
141
  "step": 22
142
  },
143
  {
144
  "epoch": 10.22,
145
  "learning_rate": 2e-05,
146
+ "loss": 1.148,
147
  "step": 23
148
  },
149
  {
150
  "epoch": 10.67,
151
  "learning_rate": 2e-05,
152
+ "loss": 0.9726,
153
  "step": 24
154
  },
155
  {
156
  "epoch": 11.11,
157
  "learning_rate": 2e-05,
158
+ "loss": 0.879,
159
  "step": 25
160
  },
161
  {
162
  "epoch": 11.56,
163
  "learning_rate": 2e-05,
164
+ "loss": 0.761,
165
  "step": 26
166
  },
167
  {
168
  "epoch": 12.0,
169
  "learning_rate": 2e-05,
170
+ "loss": 0.7408,
171
  "step": 27
172
  },
173
  {
174
  "epoch": 12.44,
175
  "learning_rate": 2e-05,
176
+ "loss": 0.6326,
177
  "step": 28
178
  },
179
  {
180
  "epoch": 12.89,
181
  "learning_rate": 2e-05,
182
+ "loss": 0.5798,
183
  "step": 29
184
  },
185
  {
186
  "epoch": 13.33,
187
  "learning_rate": 2e-05,
188
+ "loss": 0.5512,
189
  "step": 30
190
  },
191
  {
192
  "epoch": 13.78,
193
  "learning_rate": 2e-05,
194
+ "loss": 0.4236,
195
  "step": 31
196
  },
197
  {
198
  "epoch": 14.22,
199
  "learning_rate": 2e-05,
200
+ "loss": 0.3581,
201
  "step": 32
202
  },
203
  {
204
  "epoch": 14.67,
205
  "learning_rate": 2e-05,
206
+ "loss": 0.3329,
207
  "step": 33
208
  },
209
  {
210
  "epoch": 15.11,
211
  "learning_rate": 2e-05,
212
+ "loss": 0.2962,
213
  "step": 34
214
  },
215
  {
216
  "epoch": 15.56,
217
  "learning_rate": 2e-05,
218
+ "loss": 0.2572,
219
  "step": 35
220
  },
221
  {
222
  "epoch": 16.0,
223
  "learning_rate": 2e-05,
224
+ "loss": 0.2429,
225
  "step": 36
226
  },
227
  {
228
  "epoch": 16.44,
229
  "learning_rate": 2e-05,
230
+ "loss": 0.191,
231
  "step": 37
232
  },
233
  {
234
  "epoch": 16.89,
235
  "learning_rate": 2e-05,
236
+ "loss": 0.174,
237
  "step": 38
238
  },
239
  {
240
  "epoch": 17.33,
241
  "learning_rate": 2e-05,
242
+ "loss": 0.1721,
243
  "step": 39
244
  },
245
  {
246
  "epoch": 17.78,
247
  "learning_rate": 2e-05,
248
+ "loss": 0.1645,
249
  "step": 40
250
  },
251
  {
252
  "epoch": 18.22,
253
  "learning_rate": 2e-05,
254
+ "loss": 0.1313,
255
  "step": 41
256
  },
257
  {
258
  "epoch": 18.67,
259
  "learning_rate": 2e-05,
260
+ "loss": 0.1186,
261
  "step": 42
262
  },
263
  {
264
  "epoch": 19.11,
265
  "learning_rate": 2e-05,
266
+ "loss": 0.1309,
267
  "step": 43
268
  },
269
  {
270
  "epoch": 19.56,
271
  "learning_rate": 2e-05,
272
+ "loss": 0.1077,
273
  "step": 44
274
  },
275
  {
276
  "epoch": 20.0,
277
  "learning_rate": 2e-05,
278
+ "loss": 0.1156,
279
  "step": 45
280
  },
281
  {
282
  "epoch": 20.44,
283
  "learning_rate": 2e-05,
284
+ "loss": 0.1101,
285
  "step": 46
286
  },
287
  {
288
  "epoch": 20.89,
289
  "learning_rate": 2e-05,
290
+ "loss": 0.0979,
291
  "step": 47
292
  },
293
  {
294
  "epoch": 21.33,
295
  "learning_rate": 2e-05,
296
+ "loss": 0.101,
297
  "step": 48
298
  },
299
  {
300
  "epoch": 21.78,
301
  "learning_rate": 2e-05,
302
+ "loss": 0.1001,
303
  "step": 49
304
  },
305
  {
306
  "epoch": 22.22,
307
  "learning_rate": 2e-05,
308
+ "loss": 0.0894,
309
  "step": 50
310
  },
311
  {
312
  "epoch": 22.67,
313
  "learning_rate": 2e-05,
314
+ "loss": 0.0948,
315
  "step": 51
316
  },
317
  {
318
  "epoch": 23.11,
319
  "learning_rate": 2e-05,
320
+ "loss": 0.0861,
321
  "step": 52
322
  },
323
  {
324
  "epoch": 23.56,
325
  "learning_rate": 2e-05,
326
+ "loss": 0.0895,
327
  "step": 53
328
  },
329
  {
330
  "epoch": 24.0,
331
  "learning_rate": 2e-05,
332
+ "loss": 0.0918,
333
  "step": 54
334
  },
335
  {
336
  "epoch": 24.44,
337
  "learning_rate": 2e-05,
338
+ "loss": 0.0841,
339
  "step": 55
340
  },
341
  {
342
  "epoch": 24.89,
343
  "learning_rate": 2e-05,
344
+ "loss": 0.0756,
345
  "step": 56
346
  },
347
  {
348
  "epoch": 25.33,
349
  "learning_rate": 2e-05,
350
+ "loss": 0.0913,
351
  "step": 57
352
  },
353
  {
354
  "epoch": 25.78,
355
  "learning_rate": 2e-05,
356
+ "loss": 0.0796,
357
  "step": 58
358
  },
359
  {
360
  "epoch": 26.22,
361
  "learning_rate": 2e-05,
362
+ "loss": 0.0816,
363
  "step": 59
364
  },
365
  {
366
  "epoch": 26.67,
367
  "learning_rate": 2e-05,
368
+ "loss": 0.0728,
369
  "step": 60
370
  },
371
  {
372
  "epoch": 27.11,
373
  "learning_rate": 2e-05,
374
+ "loss": 0.0823,
375
  "step": 61
376
  },
377
  {
378
  "epoch": 27.56,
379
  "learning_rate": 2e-05,
380
+ "loss": 0.0798,
381
  "step": 62
382
  },
383
  {
384
  "epoch": 28.0,
385
  "learning_rate": 2e-05,
386
+ "loss": 0.0693,
387
  "step": 63
388
  },
389
  {
390
  "epoch": 28.44,
391
  "learning_rate": 2e-05,
392
+ "loss": 0.0805,
393
  "step": 64
394
  },
395
  {
396
  "epoch": 28.89,
397
  "learning_rate": 2e-05,
398
+ "loss": 0.0685,
399
  "step": 65
400
  },
401
  {
402
  "epoch": 29.33,
403
  "learning_rate": 2e-05,
404
+ "loss": 0.07,
405
  "step": 66
406
  },
407
  {
408
  "epoch": 29.78,
409
  "learning_rate": 2e-05,
410
+ "loss": 0.0779,
411
  "step": 67
412
  },
413
  {
414
  "epoch": 30.22,
415
  "learning_rate": 2e-05,
416
+ "loss": 0.0773,
417
  "step": 68
418
  },
419
  {
420
  "epoch": 30.67,
421
  "learning_rate": 2e-05,
422
+ "loss": 0.0631,
423
  "step": 69
424
  },
425
  {
426
  "epoch": 31.11,
427
  "learning_rate": 2e-05,
428
+ "loss": 0.0656,
429
  "step": 70
430
  },
431
  {
432
  "epoch": 31.56,
433
  "learning_rate": 2e-05,
434
+ "loss": 0.074,
435
  "step": 71
436
  },
437
  {
438
  "epoch": 32.0,
439
  "learning_rate": 2e-05,
440
+ "loss": 0.0651,
441
  "step": 72
442
  },
443
  {
444
  "epoch": 32.44,
445
  "learning_rate": 2e-05,
446
+ "loss": 0.0646,
447
  "step": 73
448
  },
449
  {
450
  "epoch": 32.89,
451
  "learning_rate": 2e-05,
452
+ "loss": 0.0699,
453
  "step": 74
454
  },
455
  {
456
  "epoch": 33.33,
457
  "learning_rate": 2e-05,
458
+ "loss": 0.0578,
459
  "step": 75
460
  },
461
  {
462
  "epoch": 33.78,
463
  "learning_rate": 2e-05,
464
+ "loss": 0.0763,
465
  "step": 76
466
  },
467
  {
468
  "epoch": 34.22,
469
  "learning_rate": 2e-05,
470
+ "loss": 0.0651,
471
  "step": 77
472
  },
473
  {
474
  "epoch": 34.67,
475
  "learning_rate": 2e-05,
476
+ "loss": 0.0565,
477
  "step": 78
478
  },
479
  {
480
  "epoch": 35.11,
481
  "learning_rate": 2e-05,
482
+ "loss": 0.0585,
483
  "step": 79
484
  },
485
  {
486
  "epoch": 35.56,
487
  "learning_rate": 2e-05,
488
+ "loss": 0.069,
489
  "step": 80
490
  },
491
  {
492
  "epoch": 36.0,
493
  "learning_rate": 2e-05,
494
+ "loss": 0.0571,
495
  "step": 81
496
  },
497
  {
498
  "epoch": 36.44,
499
  "learning_rate": 2e-05,
500
+ "loss": 0.0599,
501
  "step": 82
502
  },
503
  {
504
  "epoch": 36.89,
505
  "learning_rate": 2e-05,
506
+ "loss": 0.0639,
507
  "step": 83
508
  },
509
  {
510
  "epoch": 37.33,
511
  "learning_rate": 2e-05,
512
+ "loss": 0.0625,
513
  "step": 84
514
  },
515
  {
516
  "epoch": 37.78,
517
  "learning_rate": 2e-05,
518
+ "loss": 0.0631,
519
  "step": 85
520
  },
521
  {
522
  "epoch": 38.22,
523
  "learning_rate": 2e-05,
524
+ "loss": 0.0552,
525
  "step": 86
526
  },
527
  {
528
  "epoch": 38.67,
529
  "learning_rate": 2e-05,
530
+ "loss": 0.0681,
531
  "step": 87
532
  },
533
  {
534
  "epoch": 39.11,
535
  "learning_rate": 2e-05,
536
+ "loss": 0.0566,
537
  "step": 88
538
  },
539
  {
540
  "epoch": 39.56,
541
  "learning_rate": 2e-05,
542
+ "loss": 0.0594,
543
  "step": 89
544
  },
545
  {
546
  "epoch": 40.0,
547
  "learning_rate": 2e-05,
548
+ "loss": 0.0661,
549
  "step": 90
550
  },
551
  {
552
  "epoch": 40.44,
553
  "learning_rate": 2e-05,
554
+ "loss": 0.0632,
555
  "step": 91
556
  },
557
  {
558
  "epoch": 40.89,
559
  "learning_rate": 2e-05,
560
+ "loss": 0.0529,
561
  "step": 92
562
  },
563
  {
564
  "epoch": 41.33,
565
  "learning_rate": 2e-05,
566
+ "loss": 0.0574,
567
  "step": 93
568
  },
569
  {
570
  "epoch": 41.78,
571
  "learning_rate": 2e-05,
572
+ "loss": 0.055,
573
  "step": 94
574
  },
575
  {
576
  "epoch": 42.22,
577
  "learning_rate": 2e-05,
578
+ "loss": 0.0525,
579
  "step": 95
580
  },
581
  {
582
  "epoch": 42.67,
583
  "learning_rate": 2e-05,
584
+ "loss": 0.0625,
585
  "step": 96
586
  },
587
  {
588
  "epoch": 43.11,
589
  "learning_rate": 2e-05,
590
+ "loss": 0.0462,
591
  "step": 97
592
  },
593
  {
594
  "epoch": 43.56,
595
  "learning_rate": 2e-05,
596
+ "loss": 0.0615,
597
  "step": 98
598
  },
599
  {
600
  "epoch": 44.0,
601
  "learning_rate": 2e-05,
602
+ "loss": 0.0486,
603
  "step": 99
604
  },
605
  {
606
  "epoch": 44.44,
607
  "learning_rate": 2e-05,
608
+ "loss": 0.0539,
609
  "step": 100
610
  },
611
  {
612
  "epoch": 44.44,
613
  "step": 100,
614
+ "total_flos": 7439520890880.0,
615
+ "train_loss": 0.49884208038449285,
616
+ "train_runtime": 9870.4378,
617
+ "train_samples_per_second": 0.973,
618
  "train_steps_per_second": 0.01
619
  }
620
  ],
 
622
  "max_steps": 100,
623
  "num_train_epochs": 50,
624
  "save_steps": 200,
625
+ "total_flos": 7439520890880.0,
626
  "trial_name": null,
627
  "trial_params": null
628
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:135858efd4811a09b43593532cb735b8e0bee8450cc74446fdba3f0ec24a504a
3
  size 6523
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d82a4c57a79a807d2d8164061827388f4a5ee4587c5aa26830fa7c388af4e898
3
  size 6523