%matplotlib inline
%reload_ext autoreload
%autoreload 2
import argparse
import os
import shutil
import time
from fastai.transforms import *
from fastai.dataset import *
from fastai.fp16 import *
from fastai.conv_learner import *
from pathlib import *
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models
import models.cifar10 as cifar10models
from distributed import DistributedDataParallel as DDP
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
cifar10_names = sorted(name for name in cifar10models.__dict__
if name.islower() and not name.startswith("__")
and callable(cifar10models.__dict__[name]))
model_names = cifar10_names + model_names
#print(models.cifar10.__dict__)
#print(model_names)
# Example usage: python run_fastai.py /home/paperspace/ILSVRC/Data/CLS-LOC/ -a resnext_50_32x4d --epochs 1 -j 4 -b 64 --fp16
parser = argparse.ArgumentParser(description='PyTorch Cifar10 Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--save-dir', type=str, default=Path.home()/'imagenet_training',
help='Directory to save logs and models.')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet56',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet56)')
parser.add_argument('-j', '--workers', default=7, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=1, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--cycle-len', default=95, type=float, metavar='N',
help='Length of cycle to run')
# parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
# help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=512, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.8, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
# parser.add_argument('--print-freq', '-p', default=10, type=int,
# metavar='N', help='print frequency (default: 10)')
# parser.add_argument('--resume', default='', type=str, metavar='PATH',
# help='path to latest checkpoint (default: none)')
# parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
# help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model')
parser.add_argument('--fp16', action='store_true', help='Run model fp16 mode.')
parser.add_argument('--use-tta', default=True, type=bool, help='Validate model with TTA at the end of traiing.')
parser.add_argument('--train-half', action='store_true', help='Train model on half images. TODO: allow custom epochs and LR')
parser.add_argument('--sz', default=32, type=int, help='Size of transformed image.')
# parser.add_argument('--decay-int', default=30, type=int, help='Decay LR by 10 every decay-int epochs')
parser.add_argument('--use-clr', default='10,13.68,0.95,0.85', type=str,
help='div,pct,max_mom,min_mom. Pass in a string delimited by commas. Ex: "20,2,0.95,0.85"')
parser.add_argument('--loss-scale', type=float, default=128,
help='Loss scaling, positive power of 2 values can improve fp16 convergence.')
parser.add_argument('--prof', dest='prof', action='store_true', help='Only run a few iters for profiling.')
parser.add_argument('--dist-url', default='file://sync.file', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend')
parser.add_argument('--world-size', default=1, type=int,
help='Number of GPUs to use. Can either be manually set ' +
'or automatically set by using \'python -m multiproc\'.')
parser.add_argument('--rank', default=0, type=int,
help='Used for multi-process training. Can either be manually set ' +
'or automatically set by using \'python -m multiproc\'.')
class TorchModelData(ModelData):
def __init__(self, path, trn_dl, val_dl, aug_dl=None):
super().__init__(path, trn_dl, val_dl)
self.aug_dl = aug_dl
def torch_loader(data_path, size):
# Data loading code
traindir = os.path.join(data_path, 'train')
valdir = os.path.join(data_path, 'test')
normalize = transforms.Normalize(mean=[0.4914 , 0.48216, 0.44653], std=[0.24703, 0.24349, 0.26159])
scale_size = 40
padding = int((scale_size - size) / 2)
train_tfms = transforms.Compose([
transforms.RandomCrop(size, padding=padding),
transforms.ColorJitter(.25,.25,.25),
transforms.RandomRotation(2),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
train_dataset = datasets.ImageFolder(traindir, train_tfms)
train_sampler = (torch.utils.data.distributed.DistributedSampler(train_dataset)
if args.distributed else None)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_tfms = transforms.Compose([
# transforms.Resize(int(size*1.14)),
# transforms.CenterCrop(size),
transforms.ToTensor(),
normalize,
])
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, val_tfms),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
aug_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, train_tfms),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
train_loader = DataPrefetcher(train_loader)
val_loader = DataPrefetcher(val_loader)
aug_loader = DataPrefetcher(aug_loader)
if args.prof:
train_loader.stop_after = 200
val_loader.stop_after = 0
data = TorchModelData(data_path, train_loader, val_loader, aug_loader)
return data, train_sampler
# Seems to speed up training by ~2%
class DataPrefetcher():
def __init__(self, loader, stop_after=None):
self.loader = loader
self.dataset = loader.dataset
self.stream = torch.cuda.Stream()
self.stop_after = stop_after
self.next_input = None
self.next_target = None
def __len__(self):
return len(self.loader)
def preload(self):
try:
self.next_input, self.next_target = next(self.loaditer)
except StopIteration:
self.next_input = None
self.next_target = None
return
with torch.cuda.stream(self.stream):
self.next_input = self.next_input.cuda(async=True)
self.next_target = self.next_target.cuda(async=True)
def __iter__(self):
count = 0
self.loaditer = iter(self.loader)
self.preload()
while self.next_input is not None:
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
target = self.next_target
self.preload()
count += 1
yield input, target
if type(self.stop_after) is int and (count > self.stop_after):
break
def top5(output, target):
"""Computes the precision@k for the specified values of k"""
top5 = 5
batch_size = target.size(0)
_, pred = output.topk(top5, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
correct_k = correct[:top5].view(-1).float().sum(0, keepdim=True)
return correct_k.mul_(1.0 / batch_size)
class ImagenetLoggingCallback(Callback):
def __init__(self, save_path, print_every=50):
super().__init__()
self.save_path=save_path
self.print_every=print_every
def on_train_begin(self):
self.batch = 0
self.epoch = 0
self.f = open(self.save_path, "a", 1)
self.log("\ton_train_begin")
def on_epoch_end(self, metrics):
log_str = f'\tEpoch:{self.epoch}\ttrn_loss:{self.last_loss}'
for (k,v) in zip(['val_loss', 'acc', 'top5', ''], metrics): log_str += f'\t{k}:{v}'
self.log(log_str)
self.epoch += 1
def on_batch_end(self, metrics):
self.last_loss = metrics
self.batch += 1
if self.batch % self.print_every == 0:
self.log(f'Epoch: {self.epoch} Batch: {self.batch} Metrics: {metrics}')
def on_train_end(self):
self.log("\ton_train_end")
self.f.close()
def log(self, string):
self.f.write(time.strftime("%Y-%m-%dT%H:%M:%S")+"\t"+string+"\n")
class DisbleTransformCallback(Callback):
def __init__(self, dataset, disable_at=120):
super().__init__()
self.dataset = dataset
self.disable_at = disable_at
def on_epoch_end(self, metrics):
log_str = f'\tEpoch:{self.epoch}\ttrn_loss:{self.last_loss}'
for (k,v) in zip(['val_loss', 'acc', 'top5', ''], metrics): log_str += f'\t{k}:{v}'
self.log(log_str)
self.epoch += 1
print('Disabling dataset transforms')
if self.epoch > disable_at:
dataset.transform = None
# Logging + saving models
def save_args(name, save_dir):
if (args.rank != 0) or not args.save_dir: return {}
log_dir = f'{save_dir}/training_logs'
os.makedirs(log_dir, exist_ok=True)
return {
'best_save_name': f'{name}_best_model',
'cycle_save_name': f'{name}',
'callbacks': [
ImagenetLoggingCallback(f'{log_dir}/{name}_log.txt')
]
}
def save_sched(sched, save_dir):
if (args.rank != 0) or not args.save_dir: return {}
log_dir = f'{save_dir}/training_logs'
sched.save_path = log_dir
sched.plot_loss()
sched.plot_lr()
def update_model_dir(learner, base_dir):
learner.tmp_path = f'{base_dir}/tmp'
os.makedirs(learner.tmp_path, exist_ok=True)
learner.models_path = f'{base_dir}/models'
os.makedirs(learner.models_path, exist_ok=True)
args_input = [
'/home/ubuntu/data/cifar10',
'--save-dir', '/home/ubuntu/data/cf_train_save/65e_pre18_clr30_rollout20_lr12_wd_2e4_ls256',
'-a', 'preact_resnet18',
# '-j', '6',
# '--prof',
'-b', '512',
# '--sz', '32',
'--loss-scale', '256',
'--fp16',
'--cycle-len', '65',
# '--epochs', '1',
'--use-clr', '30, 20, 0.95, 0.85',
'--wd', '2e-4',
'--lr', '1.2',
# '--train-half' # With fp16, iterations are so fast this doesn't matter
]
# This is important for speed
cudnn.benchmark = True
global arg
args = parser.parse_args(args_input); args
if args.cycle_len > 1: args.cycle_len = int(args.cycle_len)
args.distributed = args.world_size > 1
args.gpu = 0
if args.distributed:
args.gpu = args.rank % torch.cuda.device_count()
if args.distributed:
torch.cuda.set_device(args.gpu)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size)
if args.fp16:
assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled."
# create model
model = cifar10models.__dict__[args.arch] if args.arch in cifar10_names else models.__dict__[args.arch]
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = model(pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = model()
=> creating model 'preact_resnet18'
model = model.cuda()
if args.distributed:
model = DDP(model)
if args.train_half:
data, train_sampler = torch_loader(args.data, 16)
else:
data, train_sampler = torch_loader(args.data, args.sz)
learner = Learner.from_model_data(model, data)
# learner.crit = F.nll_loss
learner.crit = F.cross_entropy
learner.metrics = [accuracy]
if args.fp16: learner.half()
if args.prof:
args.epochs = 1
args.cycle_len=.01
if args.use_clr:
args.use_clr = tuple(map(float, args.use_clr.split(',')))
# x,y = next(iter(data.trn_dl))
# plt.imshow(np.transpose(x[50], (1, 2, 0)))
# %pdb off
learner.lr_find()
HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))
81%|████████ | 79/98 [00:08<00:02, 9.44it/s, loss=12.8]
learner.sched.plot(n_skip=0)
# 128x128
if args.train_half:
save_dir = args.save_dir+'/128'
update_model_dir(learner, save_dir)
sargs = save_args('first_run_128', save_dir)
sargs['callbacks'] += [DisableTransformCallback(data.train_ds, 5)]
learner.fit(args.lr,args.epochs, cycle_len=45,
train_sampler=train_sampler,
wds=args.weight_decay,
use_clr_beta=args.use_clr,
loss_scale=args.loss_scale,
**sargs
)
save_sched(learner.sched, save_dir)
data, train_sampler = torch(args.data, args.sz)
learner.set_data(data)
# Full size
update_model_dir(learner, args.save_dir)
sargs = save_args('first_run', args.save_dir)
learner.fit(args.lr,args.epochs, cycle_len=args.cycle_len,
sampler=train_sampler,
wds=args.weight_decay,
use_clr_beta=args.use_clr,
loss_scale=args.loss_scale,
**sargs
)
save_sched(learner.sched, args.save_dir)
print('Finished!')
HBox(children=(IntProgress(value=0, description='Epoch', max=65), HTML(value='')))
/home/ubuntu/git/imagenet-fast/cifar10/fastai/core.py:31: UserWarning: volatile was removed and now has no effect. Use `with torch.no_grad():` instead. x = Variable(T(x), volatile=volatile, requires_grad=requires_grad)
epoch trn_loss val_loss accuracy
0 1.597727 1.542461 0.4633
1 1.258801 1.103473 0.6208
2 0.972911 0.935949 0.6776
3 0.792725 0.751558 0.7408
4 0.682035 0.813925 0.7252
5 0.601548 0.701359 0.759
6 0.549059 0.65844 0.7854
7 0.519294 0.619365 0.7902
8 0.496927 0.621373 0.7929
9 0.480748 0.589091 0.801
10 0.474157 0.525024 0.8264
11 0.466476 0.742185 0.7579
12 0.460254 0.696792 0.7754
13 0.449546 1.378255 0.6583
14 0.447112 0.599804 0.8026
15 0.435825 0.904703 0.7283
16 0.436086 0.604347 0.7967
17 0.438422 0.789051 0.7612
18 0.422497 0.502078 0.8328
19 0.419885 0.685179 0.7851
20 0.44072 0.521533 0.8191
21 0.419834 0.604293 0.8046
22 0.416162 0.557601 0.8197
23 0.427043 0.571396 0.8209
24 0.428038 0.621226 0.8058
25 0.407386 0.566577 0.8201
26 0.444162 0.67172 0.7917
27 0.40936 0.660644 0.8053
28 0.414098 0.492572 0.8376
29 0.40194 0.693754 0.7975
30 0.39849 0.901087 0.7494
31 0.371218 0.663193 0.8005
32 0.383486 0.608334 0.8029
33 0.348831 0.881796 0.7536
34 0.334356 0.555002 0.8299
35 0.34575 0.880541 0.7458
36 0.33493 0.560533 0.8208
37 0.322747 0.74743 0.7853
38 0.315978 0.555727 0.8309
39 0.312028 0.56871 0.8131
40 0.297595 0.433099 0.8556
41 0.286247 0.429109 0.864
42 0.262504 0.474889 0.8534
43 0.255202 0.378953 0.8759
44 0.238911 0.38378 0.8698
45 0.22766 0.354805 0.8869
46 0.211996 0.370351 0.8824
47 0.190193 0.330143 0.8917
48 0.16702 0.343959 0.8917
49 0.141143 0.321683 0.9015
50 0.106302 0.252937 0.9213
51 0.072987 0.228383 0.9333
52 0.054684 0.221551 0.9334
53 0.044502 0.22194 0.9377
54 0.035976 0.229909 0.9376
55 0.032249 0.224786 0.9371
56 0.028004 0.228738 0.9378
57 0.024184 0.225704 0.938
58 0.021198 0.232919 0.9411
59 0.017807 0.234266 0.938
60 0.016931 0.233138 0.9413
61 0.015856 0.23176 0.9401
62 0.013712 0.231135 0.9407
63 0.012768 0.230449 0.9414
64 0.011091 0.231521 0.9417
Finished!
learner.save('cifar10-resnext-aug-preact')
learner.sched.plot()
--------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-19-d5621b0b5f81> in <module>() ----> 1 learner.sched.plot() AttributeError: 'CircularLR_beta' object has no attribute 'plot'
learner.lr_find()
learner.sched.plot()
leaner.fig
learner.fit(1e-5,1, cycle_len=15,
wds=args.weight_decay,
loss_scale=args.loss_scale,
**sargs
)
if args.use_tta:
log_preds,y = learner.TTA()
preds = np.mean(np.exp(log_preds),0)
acc = accuracy(torch.FloatTensor(preds),torch.LongTensor(y))
print('TTA acc:', acc)
with open(args.save_dir+'/tta_accuracy.txt', "a", 1) as f:
f.write(time.strftime("%Y-%m-%dT%H:%M:%S")+f"\tTTA accuracty: {acc}\n")
if args.use_tta:
log_preds,y = learner.TTA()
preds = np.mean(np.exp(log_preds),0)
acc = accuracy(torch.FloatTensor(preds),torch.LongTensor(y))
print('TTA acc:', acc)
with open(args.save_dir+'/tta_accuracy.txt', "a", 1) as f:
f.write(time.strftime("%Y-%m-%dT%H:%M:%S")+f"\tTTA accuracty: {acc}\n")