-
Notifications
You must be signed in to change notification settings - Fork 13
[Transforms] Transform Registry Support #274
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
1adfa30
ab6101e
1e1760b
749420b
7ecb1b0
2988aba
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -22,12 +22,14 @@ | |
|
||
@Transforms.register("hadamard") | ||
class Hadamard(Transforms): | ||
def __new__( | ||
cls, | ||
def __init__( | ||
self, | ||
size: int, | ||
empty: Optional[bool] = False, | ||
device: Optional[Union[str, torch.device]] = "cuda", | ||
dtype: Optional[torch.dtype] = torch.bfloat16, | ||
*args, | ||
**kwargs, | ||
): | ||
""" | ||
Produces a hadamard matrix with dims (size, size), with values | ||
|
@@ -50,11 +52,23 @@ def __new__( | |
else: | ||
transform = torch.empty((size, size)) | ||
|
||
return super().__new__(cls, transform=transform, device=device, dtype=dtype) | ||
super().__init__(transform=transform, dtype=dtype, device=device) | ||
|
||
def apply( | ||
self, | ||
input_tensor: torch.Tensor, | ||
transpose: bool = False, | ||
first: bool = True, | ||
) -> torch.Tensor: | ||
return apply_matrix_transform( | ||
transform=self.transform, | ||
input_tensor=input_tensor, | ||
transpose=transpose, | ||
first=first, | ||
) | ||
|
||
@staticmethod | ||
def inverse_apply( | ||
transform: torch.Tensor, | ||
self, | ||
input_tensor: torch.Tensor, | ||
transpose: bool = False, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think having There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Afaict transpose is only used when apply an inverse hadamard, and therefore should not be controllable by the user to avoid footgunning |
||
first: bool = True, | ||
|
@@ -73,7 +87,7 @@ def inverse_apply( | |
# need to normalize before sending back | ||
return ( | ||
apply_matrix_transform( | ||
transform=transform, | ||
transform=self.transform, | ||
input_tensor=input_tensor, | ||
transpose=transpose, | ||
first=first, | ||
|
Uh oh!
There was an error while loading. Please reload this page.