jbetker commited on
Commit
29b2f36
1 Parent(s): 14617f8

Remove entmax dep

Browse files
requirements.txt CHANGED
@@ -6,6 +6,5 @@ inflect
6
  progressbar
7
  einops
8
  unidecode
9
- entmax
10
  scipy
11
  librosa
6
  progressbar
7
  einops
8
  unidecode
 
9
  scipy
10
  librosa
setup.py CHANGED
@@ -21,7 +21,6 @@ setuptools.setup(
21
  'progressbar',
22
  'einops',
23
  'unidecode',
24
- 'entmax',
25
  'scipy',
26
  'librosa',
27
  'transformers',
21
  'progressbar',
22
  'einops',
23
  'unidecode',
 
24
  'scipy',
25
  'librosa',
26
  'transformers',
tortoise/models/xtransformers.py CHANGED
@@ -10,7 +10,6 @@ from collections import namedtuple
10
  from einops import rearrange, repeat, reduce
11
  from einops.layers.torch import Rearrange
12
 
13
- from entmax import entmax15
14
  from torch.utils.checkpoint import checkpoint
15
 
16
  DEFAULT_DIM_HEAD = 64
@@ -556,7 +555,7 @@ class Attention(nn.Module):
556
  self.sparse_topk = sparse_topk
557
 
558
  # entmax
559
- self.attn_fn = entmax15 if use_entmax15 else F.softmax
560
 
561
  # add memory key / values
562
  self.num_mem_kv = num_mem_kv
10
  from einops import rearrange, repeat, reduce
11
  from einops.layers.torch import Rearrange
12
 
 
13
  from torch.utils.checkpoint import checkpoint
14
 
15
  DEFAULT_DIM_HEAD = 64
555
  self.sparse_topk = sparse_topk
556
 
557
  # entmax
558
+ self.attn_fn = F.softmax
559
 
560
  # add memory key / values
561
  self.num_mem_kv = num_mem_kv