-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathScript_203_test_128_MSELoss_pretrained_w_clean_best.py
94 lines (68 loc) · 3.19 KB
/
Script_203_test_128_MSELoss_pretrained_w_clean_best.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
## Restormer: Efficient Transformer for High-Resolution Image Restoration
## Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, and Ming-Hsuan Yang
## https://arxiv.org/abs/2111.09881
import numpy as np
import os
import argparse
from tqdm import tqdm
import torch.nn as nn
import torch
import torch.nn.functional as F
import utils
from natsort import natsorted
from glob import glob
from basicsr.models.archs.restormer_arch import Restormer
from skimage import img_as_ubyte
from pdb import set_trace as stx
parser = argparse.ArgumentParser(description='Single Image Motion Deblurring using Restormer')
parser.add_argument('--input_dir', default='./Datasets/', type=str, help='Directory of validation images')
parser.add_argument('--result_dir', default='../ResultMoco/results_mask_pretrained_w_clean_on_test_128_best/', type=str, help='Directory for results')
parser.add_argument('--weights', default='../experiments/Deblurring_1200k_with_overlap_downstream_last_128_clean/models/net_g_1168000.pth', type=str, help='Path to weights')
parser.add_argument('--dataset', default='moco_exp_20_s8', type=str, help='Test Dataset') # ['GoPro', 'HIDE', 'RealBlur_J', 'RealBlur_R']
args = parser.parse_args()
####### Load yaml #######
yaml_file = 'Options/Deblurring_1200k_with_overlap_downstream_last_128_clean.yml'
import yaml
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
x = yaml.load(open(yaml_file, mode='r'), Loader=Loader)
s = x['network_g'].pop('type')
##########################
model_restoration = Restormer(**x['network_g'])
checkpoint = torch.load(args.weights)
#print(**x['network_g'])
model_restoration.load_state_dict(checkpoint['params'])
print("===>Testing using weights: ",args.weights)
model_restoration.cuda()
model_restoration = nn.DataParallel(model_restoration)
model_restoration.eval()
factor = 8
dataset = args.dataset
result_dir = os.path.join(args.result_dir, dataset)
os.makedirs(result_dir, exist_ok=True)
#inp_dir = os.path.join(args.input_dir, 'test', dataset, 'input')
inp_dir = os.path.join(args.input_dir, 'test', dataset, 'corrupt_image_mean_norm')
print('Input dir is ', inp_dir)
files = natsorted(glob(os.path.join(inp_dir, '*.png')) + glob(os.path.join(inp_dir, '*.jpg')))
print(len(files))
print(files)
with torch.no_grad():
for file_ in tqdm(files):
torch.cuda.ipc_collect()
torch.cuda.empty_cache()
img = np.float32(utils.load_img(file_))/255.
img = torch.from_numpy(img).permute(2,0,1)
input_ = img.unsqueeze(0).cuda()
# Padding in case images are not multiples of 8
h,w = input_.shape[2], input_.shape[3]
H,W = ((h+factor)//factor)*factor, ((w+factor)//factor)*factor
padh = H-h if h%factor!=0 else 0
padw = W-w if w%factor!=0 else 0
input_ = F.pad(input_, (0,padw,0,padh), 'reflect')
restored = model_restoration(input_)
# Unpad images to original dimensions
restored = restored[:,:,:h,:w]
restored = torch.clamp(restored,0,1).cpu().detach().permute(0, 2, 3, 1).squeeze(0).numpy()
utils.save_img((os.path.join(result_dir, os.path.splitext(os.path.split(file_)[-1])[0]+'.png')), img_as_ubyte(restored))