import fastai from fastai import * from fastai.vision import * from fastai.callbacks import * from torchvision.models import vgg16_bn path = untar_data(URLs.PETS) path_hr = path/'images' path_lr = path/'crappy' def crappify(fn,i): dest = path_lr/fn.relative_to(path_hr) dest.parent.mkdir(parents=True, exist_ok=True) img = PIL.Image.open(fn) targ_sz = resize_to(img, 96, use_min=True) img = img.resize(targ_sz, resample=PIL.Image.BILINEAR).convert('RGB') img.save(dest, quality=random.randint(10,70)) #il = ImageItemList.from_folder(path_hr) #parallel(crappify, il.items) #bs,size = 32,128 bs,size=8,256 arch = models.resnet34 classes = ['crappy', 'images'] src = ImageItemList.from_folder(path, include=classes).random_split_by_pct(0.1, seed=42) ll = src.label_from_folder(classes=classes) data_crit = (ll.transform(get_transforms(max_zoom=2.), size=size) .databunch(bs=bs).normalize(imagenet_stats)) data_crit.c = 3 data_crit.show_batch(rows=4, ds_type=DatasetType.Valid) def conv(ni:int, nf:int, ks:int=3, stride:int=1, **kwargs): return conv_layer(ni, nf, ks=ks, stride=stride, leaky=0.2, norm_type=NormType.Spectral, **kwargs) class DenseBlock(nn.Module): def __init__(self, nf): super().__init__() self.convs = nn.Sequential(conv(nf, nf), conv(nf, nf)) def forward(self, x): y = self.convs(x) return torch.cat([x,y],1) def critic(n_channels:int=3, nf:int=128, n_blocks:int=3, p:int=0.05): layers = [ conv(n_channels, nf, ks=4, stride=2), nn.Dropout2d(p/2), DenseBlock(nf)] nf *= 2 for i in range(n_blocks): layers += [ nn.Dropout2d(p), conv(nf, nf*2, ks=4, stride=2, self_attention=(i==0))] nf *= 2 layers += [ conv(nf, 1, ks=4, bias=False, padding=0, use_activ=False), #nn.AdaptiveMaxPool2d(1), Flatten()] return nn.Sequential(*layers) class AdaptiveLoss(nn.Module): def __init__(self, crit): super().__init__() self.crit = crit def forward(self, output, target): return self.crit(output, target[:,None].expand_as(output).float()) def accuracy_thresh_expand(y_pred:Tensor, y_true:Tensor, thresh:float=0.5, sigmoid:bool=True)->Rank0Tensor: "Compute accuracy when `y_pred` and `y_true` are the same size." if sigmoid: y_pred = y_pred.sigmoid() return ((y_pred>thresh)==y_true[:,None].expand_as(y_pred).byte()).float().mean() learn_critic = Learner(data_crit, critic(), metrics=accuracy_thresh_expand, loss_func=AdaptiveLoss(nn.BCEWithLogitsLoss())) learn_critic.fit_one_cycle(8, 1e-3) learn_critic.save('critic-pre') arch = models.resnet34 src = ImageImageList.from_folder(path_lr).random_split_by_pct(0.1, seed=42) def get_data(bs,size): data = (src.label_from_func(lambda x: path_hr/x.name) .transform(get_transforms(max_zoom=2.), size=size, tfm_y=True) .databunch(bs=bs).normalize(imagenet_stats, do_y=True)) data.c = 3 return data data_gen = get_data(bs,size) wd = 1e-3 learn_gen = unet_learner(data_gen, arch, wd=wd, blur=True, norm_type=NormType.Spectral, self_attention=True, loss_func=MSELossFlat()) learn_gen.fit_one_cycle(2, pct_start=0.8) learn_gen.unfreeze() learn_gen.fit_one_cycle(2, slice(1e-6,1e-3)) learn_gen.show_results(rows=8) learn_gen.save('gen-pre') from fastai.vision.gan import * loss_critic = AdaptiveLoss(nn.BCEWithLogitsLoss()) loss_gen = MSELossFlat() learn_crit = Learner(data_crit, critic(), loss_func=loss_critic).load('critic-pre') learn_gen = unet_learner(data_gen, arch, wd=wd, blur=True, norm_type=NormType.Spectral, self_attention=True, loss_func=loss_gen).load('gen-pre') switcher = partial(AdaptiveGANSwitcher, critic_thresh=0.5) learn = GANLearner.from_learners(learn_gen, learn_crit, weights_gen=(1.,50.), show_img=False, switcher=switcher, opt_func=partial(optim.Adam, betas=(0,0.99)), wd=0) #Adaptive schedule 0.5, 50 epochs, lr=1e-4 from size 256 dense block learn.fit(50,1e-4) learn.fit(20,1e-5) learn.show_results() #Adaptive schedule 0.5, 60 epochs, lr=1e-4 from size 256 learn.fit(80,5e-5) learn.show_results() learn.fit(20,1e-5) #Adaptive schedule 0.5, 60 epochs, lr=1e-4 + fine-tuned 20 epochs 1e-5 learn.show_results() #Adaptive schedule 0.5, 60 epochs, lr=1e-4 learn.show_results() #Fixed schedule 1 to 5, 60 epochs, lr=1e-4 learn.show_results() learn.save('size256f') torch.save(learn.gan_trainer.opt_critic.state_dict(), path/'models'/'opt_crit_256f.pth') torch.save(learn.gan_trainer.opt_gen.state_dict(), path/'models'/'opt_gen_256f.pth') learn.save('size256') torch.save(learn.gan_trainer.opt_critic.state_dict(), path/'models'/'opt_crit_256.pth') torch.save(learn.gan_trainer.opt_gen.state_dict(), path/'models'/'opt_gen_256.pth')