Skip to content

Implemented SRE, SAM visual perturbation metrics #82

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: vision-perturbation
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
103 changes: 91 additions & 12 deletions code_soup/common/vision/perturbations.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,10 @@

class VisualPerturbation(Perturbation):
"""
Docstring for VisualPerturbations
An abstract method for various Visual Perturbation Metrics
Methods
__init__(self, original : Union[np.ndarray, torch.Tensor], perturbed: Union[np.ndarray, torch.Tensor])
- init method
"""

def __init__(
Expand All @@ -21,19 +24,95 @@ def __init__(
Docstring
#Automatically cast to Tensor using the torch.from_numpy() in the __init__ using if
"""
raise NotImplementedError
self.original = self.__totensor(original)
self.perturbed = self.__totensor(perturbed)
self.original = self.original.type(dtype=torch.float64)
self.perturbed = self.perturbed.type(dtype=torch.float64)

def calculate_LPNorm(self, p: Union[int, str]):
raise NotImplementedError
def __flatten(self, atensor: torch.Tensor) -> torch.Tensor:
"""
A method which will flatten out the inputted tensor

"""
return torch.flatten(atensor)

def __totensor(self, anarray: Union[np.ndarray, torch.Tensor]) -> torch.Tensor:
"""
A method which will convert anything inputted into a Torch tensor.
If it is already a torch tensor it will return itself

"""
if type(anarray) == torch.Tensor:
return anarray
else:
return torch.from_numpy(anarray)

def sam(self, convert_to_degree=True):
"""
Spectral Angle Mapper defines the spectral similarity by the angle between the image pixel spectrum
Expects the Images to be in (C,H,W) format

Parameters
----------
convert_to_degree
- will return the spectral angle in degrees(default true)

"""
original_img = self.original
new_img = self.perturbed

def calculate_PSNR(self):
raise NotImplementedError
assert (
original_img.size() == new_img.size()
), "Size of the inputs not same please give correct values to SAM metric"

def calculate_RMSE(self):
raise NotImplementedError
# assuming the image is in (C,H,W) method
numerator = torch.sum(torch.mul(new_img, original_img), axis=0)
denominator = torch.linalg.norm(original_img, axis=0) * torch.linalg.norm(
new_img, axis=0
)
val = torch.clip(numerator / denominator, -1, 1)
sam_angles = torch.arccos(val)
if convert_to_degree:
sam_angles = sam_angles * 180.0 / np.pi

# The original paper states that SAM values are expressed as radians, while e.g. Lanares
# et al. (2018) use degrees. We therefore made this configurable, with degree the default
return torch.mean(torch.nan_to_num(sam_angles)).item()

def sre(self):
"""
signal to reconstruction error ratio
Expects the Images to be in (C,H,W) format
"""
original_img = self.original
new_img = self.perturbed

def calculate_SAM(self):
raise NotImplementedError
assert (
original_img.size() == new_img.size()
), "Size of the inputs not same please give correct values to SRE"

def calculate_SRE(self):
raise NotImplementedError
sre_final = []
for i in range(original_img.shape[0]):
numerator = torch.square(
torch.mean(
original_img[
:,
:,
][i]
)
)
denominator = (
torch.linalg.norm(
original_img[
:,
:,
][i]
- new_img[
:,
:,
][i]
)
) / (original_img.shape[2] * original_img.shape[1])
sre_final.append(numerator / denominator)
sre_final = torch.as_tensor(sre_final)
return (10 * torch.log10(torch.mean(sre_final))).item()