-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathcalculate_fid.py
94 lines (80 loc) · 3.14 KB
/
calculate_fid.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import argparse
import math
import numpy as np
import os
import torch
from pathlib import Path
from torch.utils.data import DataLoader
from basicsr.data import build_dataset
from basicsr.metrics.fid import calculate_fid, extract_inception_features, load_patched_inception_v3
def calculate_fid_folder(args):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# inception model
inception = load_patched_inception_v3(device)
print(len(os.listdir(args.restored_folder)))
# create dataset
opt = {}
opt['name'] = 'SingleImageDataset'
opt['type'] = 'SingleImageDataset'
opt['dataroot_lq'] = args.restored_folder
opt['io_backend'] = dict(type=args.backend)
opt['mean'] = [0.5, 0.5, 0.5]
opt['std'] = [0.5, 0.5, 0.5]
dataset = build_dataset(opt)
# create dataloader
data_loader = DataLoader(
dataset=dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
sampler=None,
drop_last=False)
args.num_sample = min(args.num_sample, len(dataset))
total_batch = math.ceil(args.num_sample / args.batch_size)
def data_generator(data_loader, total_batch):
for idx, data in enumerate(data_loader):
if idx >= total_batch:
break
else:
yield data['lq']
features = extract_inception_features(data_generator(data_loader, total_batch), inception, total_batch, device)
features = features.numpy()
total_len = features.shape[0]
features = features[:args.num_sample]
print(f'Extracted {total_len} features, use the first {features.shape[0]} features to calculate stats.')
sample_mean = np.mean(features, 0)
sample_cov = np.cov(features, rowvar=False)
# load the dataset stats
stats = torch.load(args.fid_stats)
print('fid stats:',args.fid_stats)
real_mean = stats['mean']
real_cov = stats['cov']
# calculate FID metric
fid = calculate_fid(sample_mean, sample_cov, real_mean, real_cov)
print(args.restored_folder)
print('fid:', fid)
if args.out_path is not None:
output_text_file = Path(args.out_path) / 'fid.txt'
with open(output_text_file, 'a') as f:
f.write('FID \n')
f.write(f'stats file = {args.fid_stats}\n')
f.write(f'fid = {fid}\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-restored_folder', type=str, help='Path to the folder.', required=True)
parser.add_argument(
'--fid_stats',
type=str,
help='Path to the dataset fid statistics.',
default='weights/metrics/inception_FFHQ_512.pth')
parser.add_argument(
'--out_path',
type=str,
help='Path to the dataset fid statistics.',
default=None)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--num_sample', type=int, default=3000)
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--backend', type=str, default='disk', help='io backend for dataset. Option: disk, lmdb')
args = parser.parse_args()
calculate_fid_folder(args)