Skip to content

Commit 6d563af

Browse files
committed
chore: remove obsolete code for torch<2
Minimum torch version is 2.1 now.
1 parent 865a481 commit 6d563af

File tree

2 files changed

+1
-9
lines changed

2 files changed

+1
-9
lines changed

TTS/tts/layers/glow_tts/glow.py

+1-5
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import torch
2-
from packaging.version import Version
32
from torch import nn
43
from torch.nn import functional as F
54

@@ -90,10 +89,7 @@ def __init__(self, channels, num_splits=4, no_jacobian=False, **kwargs): # pyli
9089
self.no_jacobian = no_jacobian
9190
self.weight_inv = None
9291

93-
if Version(torch.__version__) < Version("1.9"):
94-
w_init = torch.qr(torch.FloatTensor(self.num_splits, self.num_splits).normal_())[0]
95-
else:
96-
w_init = torch.linalg.qr(torch.FloatTensor(self.num_splits, self.num_splits).normal_(), "complete")[0]
92+
w_init = torch.linalg.qr(torch.FloatTensor(self.num_splits, self.num_splits).normal_(), "complete")[0]
9793

9894
if torch.det(w_init) < 0:
9995
w_init[:, 0] = -1 * w_init[:, 0]

TTS/tts/layers/xtts/perceiver_encoder.py

-4
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
import torch.nn.functional as F
88
from einops import rearrange, repeat
99
from einops.layers.torch import Rearrange
10-
from packaging import version
1110
from torch import einsum, nn
1211

1312

@@ -44,9 +43,6 @@ def __init__(self, dropout=0.0, causal=False, use_flash=False):
4443
self.register_buffer("mask", None, persistent=False)
4544

4645
self.use_flash = use_flash
47-
assert not (
48-
use_flash and version.parse(torch.__version__) < version.parse("2.0.0")
49-
), "in order to use flash attention, you must be using pytorch 2.0 or above"
5046

5147
# determine efficient attention configs for cuda and cpu
5248
self.config = namedtuple("EfficientAttentionConfig", ["enable_flash", "enable_math", "enable_mem_efficient"])

0 commit comments

Comments
 (0)