%matplotlib inline
%reload_ext autoreload
%autoreload 2
import argparse
import os
import shutil
import time
from fastai.transforms import *
from fastai.dataset import *
from fastai.fp16 import *
from fastai.conv_learner import *
from pathlib import *
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models
import models.cifar10 as cifar10models
from distributed import DistributedDataParallel as DDP
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
cifar10_names = sorted(name for name in cifar10models.__dict__
if name.islower() and not name.startswith("__")
and callable(cifar10models.__dict__[name]))
model_names = cifar10_names + model_names
#print(models.cifar10.__dict__)
#print(model_names)
# Example usage: python run_fastai.py /home/paperspace/ILSVRC/Data/CLS-LOC/ -a resnext_50_32x4d --epochs 1 -j 4 -b 64 --fp16
parser = argparse.ArgumentParser(description='PyTorch Cifar10 Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--save-dir', type=str, default=Path.home()/'imagenet_training',
help='Directory to save logs and models.')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet56',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet56)')
parser.add_argument('-j', '--workers', default=7, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=1, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--cycle-len', default=95, type=float, metavar='N',
help='Length of cycle to run')
# parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
# help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=512, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.8, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
# parser.add_argument('--print-freq', '-p', default=10, type=int,
# metavar='N', help='print frequency (default: 10)')
# parser.add_argument('--resume', default='', type=str, metavar='PATH',
# help='path to latest checkpoint (default: none)')
# parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
# help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model')
parser.add_argument('--fp16', action='store_true', help='Run model fp16 mode.')
parser.add_argument('--use-tta', default=True, type=bool, help='Validate model with TTA at the end of traiing.')
parser.add_argument('--train-half', action='store_true', help='Train model on half images. TODO: allow custom epochs and LR')
parser.add_argument('--sz', default=32, type=int, help='Size of transformed image.')
# parser.add_argument('--decay-int', default=30, type=int, help='Decay LR by 10 every decay-int epochs')
parser.add_argument('--use-clr', default='10,13.68,0.95,0.85', type=str,
help='div,pct,max_mom,min_mom. Pass in a string delimited by commas. Ex: "20,2,0.95,0.85"')
parser.add_argument('--loss-scale', type=float, default=128,
help='Loss scaling, positive power of 2 values can improve fp16 convergence.')
parser.add_argument('--prof', dest='prof', action='store_true', help='Only run a few iters for profiling.')
parser.add_argument('--dist-url', default='file://sync.file', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend')
parser.add_argument('--world-size', default=1, type=int,
help='Number of GPUs to use. Can either be manually set ' +
'or automatically set by using \'python -m multiproc\'.')
parser.add_argument('--rank', default=0, type=int,
help='Used for multi-process training. Can either be manually set ' +
'or automatically set by using \'python -m multiproc\'.')
class TorchModelData(ModelData):
def __init__(self, path, trn_dl, val_dl, aug_dl=None):
super().__init__(path, trn_dl, val_dl)
self.aug_dl = aug_dl
def torch_loader(data_path, size):
# Data loading code
traindir = os.path.join(data_path, 'train')
valdir = os.path.join(data_path, 'test')
normalize = transforms.Normalize(mean=[0.4914 , 0.48216, 0.44653], std=[0.24703, 0.24349, 0.26159])
scale_size = 40
padding = int((scale_size - size) / 2)
train_tfms = transforms.Compose([
transforms.RandomCrop(size, padding=padding),
transforms.ColorJitter(.25,.25,.25),
transforms.RandomRotation(2),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
train_dataset = datasets.ImageFolder(traindir, train_tfms)
train_sampler = (torch.utils.data.distributed.DistributedSampler(train_dataset)
if args.distributed else None)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_tfms = transforms.Compose([
# transforms.Resize(int(size*1.14)),
# transforms.CenterCrop(size),
transforms.ToTensor(),
normalize,
])
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, val_tfms),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
aug_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, train_tfms),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
train_loader = DataPrefetcher(train_loader)
val_loader = DataPrefetcher(val_loader)
aug_loader = DataPrefetcher(aug_loader)
if args.prof:
train_loader.stop_after = 200
val_loader.stop_after = 0
data = TorchModelData(data_path, train_loader, val_loader, aug_loader)
return data, train_sampler
# Seems to speed up training by ~2%
class DataPrefetcher():
def __init__(self, loader, stop_after=None):
self.loader = loader
self.dataset = loader.dataset
self.stream = torch.cuda.Stream()
self.stop_after = stop_after
self.next_input = None
self.next_target = None
def __len__(self):
return len(self.loader)
def preload(self):
try:
self.next_input, self.next_target = next(self.loaditer)
except StopIteration:
self.next_input = None
self.next_target = None
return
with torch.cuda.stream(self.stream):
self.next_input = self.next_input.cuda(async=True)
self.next_target = self.next_target.cuda(async=True)
def __iter__(self):
count = 0
self.loaditer = iter(self.loader)
self.preload()
while self.next_input is not None:
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
target = self.next_target
self.preload()
count += 1
yield input, target
if type(self.stop_after) is int and (count > self.stop_after):
break
def top5(output, target):
"""Computes the precision@k for the specified values of k"""
top5 = 5
batch_size = target.size(0)
_, pred = output.topk(top5, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
correct_k = correct[:top5].view(-1).float().sum(0, keepdim=True)
return correct_k.mul_(1.0 / batch_size)
class ImagenetLoggingCallback(Callback):
def __init__(self, save_path, print_every=50):
super().__init__()
self.save_path=save_path
self.print_every=print_every
def on_train_begin(self):
self.batch = 0
self.epoch = 0
self.f = open(self.save_path, "a", 1)
self.log("\ton_train_begin")
def on_epoch_end(self, metrics):
log_str = f'\tEpoch:{self.epoch}\ttrn_loss:{self.last_loss}'
for (k,v) in zip(['val_loss', 'acc', 'top5', ''], metrics): log_str += f'\t{k}:{v}'
self.log(log_str)
self.epoch += 1
def on_batch_end(self, metrics):
self.last_loss = metrics
self.batch += 1
if self.batch % self.print_every == 0:
self.log(f'Epoch: {self.epoch} Batch: {self.batch} Metrics: {metrics}')
def on_train_end(self):
self.log("\ton_train_end")
self.f.close()
def log(self, string):
self.f.write(time.strftime("%Y-%m-%dT%H:%M:%S")+"\t"+string+"\n")
class DisbleTransformCallback(Callback):
def __init__(self, dataset, disable_at=120):
super().__init__()
self.dataset = dataset
self.disable_at = disable_at
def on_epoch_end(self, metrics):
log_str = f'\tEpoch:{self.epoch}\ttrn_loss:{self.last_loss}'
for (k,v) in zip(['val_loss', 'acc', 'top5', ''], metrics): log_str += f'\t{k}:{v}'
self.log(log_str)
self.epoch += 1
print('Disabling dataset transforms')
if self.epoch > disable_at:
dataset.transform = None
# Logging + saving models
def save_args(name, save_dir):
if (args.rank != 0) or not args.save_dir: return {}
log_dir = f'{save_dir}/training_logs'
os.makedirs(log_dir, exist_ok=True)
return {
'best_save_name': f'{name}_best_model',
'cycle_save_name': f'{name}',
'callbacks': [
ImagenetLoggingCallback(f'{log_dir}/{name}_log.txt')
]
}
def save_sched(sched, save_dir):
if (args.rank != 0) or not args.save_dir: return {}
log_dir = f'{save_dir}/training_logs'
sched.save_path = log_dir
sched.plot_loss()
sched.plot_lr()
def update_model_dir(learner, base_dir):
learner.tmp_path = f'{base_dir}/tmp'
os.makedirs(learner.tmp_path, exist_ok=True)
learner.models_path = f'{base_dir}/models'
os.makedirs(learner.models_path, exist_ok=True)
args_input = [
'/home/paperspace/data/cifar10',
'--save-dir', '/home/paperspace/data/cifar_training/65e_pre18_clr30_rollout20_lr12_wd_2e4_ls256',
'-a', 'preact_resnet18',
# '-j', '6',
# '--prof',
'-b', '512',
# '--sz', '32',
'--loss-scale', '256',
'--fp16',
'--cycle-len', '65',
# '--epochs', '1',
'--use-clr', '30, 20, 0.95, 0.85',
'--wd', '2e-4',
'--lr', '1.2',
# '--train-half' # With fp16, iterations are so fast this doesn't matter
]
# This is important for speed
cudnn.benchmark = True
global arg
args = parser.parse_args(args_input); args
if args.cycle_len > 1: args.cycle_len = int(args.cycle_len)
args.distributed = args.world_size > 1
args.gpu = 0
if args.distributed:
args.gpu = args.rank % torch.cuda.device_count()
if args.distributed:
torch.cuda.set_device(args.gpu)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size)
if args.fp16:
assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled."
# create model
model = cifar10models.__dict__[args.arch] if args.arch in cifar10_names else models.__dict__[args.arch]
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = model(pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = model()
=> creating model 'preact_resnet18'
model = model.cuda()
if args.distributed:
model = DDP(model)
if args.train_half:
data, train_sampler = torch_loader(args.data, 16)
else:
data, train_sampler = torch_loader(args.data, args.sz)
learner = Learner.from_model_data(model, data)
# learner.crit = F.nll_loss
learner.crit = F.cross_entropy
learner.metrics = [accuracy]
if args.fp16: learner.half()
if args.prof:
args.epochs = 1
args.cycle_len=.01
if args.use_clr:
args.use_clr = tuple(map(float, args.use_clr.split(',')))
# x,y = next(iter(data.trn_dl))
# plt.imshow(np.transpose(x[50], (1, 2, 0)))
# %pdb off
learner.lr_find()
HBox(children=(IntProgress(value=0, description='Epoch', max=1), HTML(value='')))
83%|████████▎ | 81/98 [00:09<00:01, 8.91it/s, loss=14.3]
learner.sched.plot(n_skip=0)
# 128x128
if args.train_half:
save_dir = args.save_dir+'/128'
update_model_dir(learner, save_dir)
sargs = save_args('first_run_128', save_dir)
sargs['callbacks'] += [DisableTransformCallback(data.train_ds, 5)]
learner.fit(args.lr,args.epochs, cycle_len=45,
train_sampler=train_sampler,
wds=args.weight_decay,
use_clr_beta=args.use_clr,
loss_scale=args.loss_scale,
**sargs
)
save_sched(learner.sched, save_dir)
data, train_sampler = torch(args.data, args.sz)
learner.set_data(data)
# Full size
update_model_dir(learner, args.save_dir)
sargs = save_args('first_run', args.save_dir)
learner.fit(args.lr,args.epochs, cycle_len=args.cycle_len,
sampler=train_sampler,
wds=args.weight_decay,
use_clr_beta=args.use_clr,
loss_scale=args.loss_scale,
**sargs
)
save_sched(learner.sched, args.save_dir)
print('Finished!')
HBox(children=(IntProgress(value=0, description='Epoch', max=65), HTML(value='')))
/home/paperspace/imagenet-fast/cifar10/fastai/core.py:30: UserWarning: volatile was removed and now has no effect. Use `with torch.no_grad():` instead. x = Variable(T(x), volatile=volatile, requires_grad=requires_grad)
epoch trn_loss val_loss accuracy
0 1.661849 1.469639 0.4651
1 1.34131 1.210569 0.5532
2 1.041136 1.016662 0.6513
3 0.846846 0.787478 0.7239
4 0.700724 0.964983 0.6916
5 0.612886 0.63153 0.7725
6 0.578661 0.71027 0.7559
7 0.550861 0.656968 0.7773
8 0.505063 0.594844 0.7973
9 0.490741 0.613804 0.793
10 0.488837 1.197419 0.6402
11 0.471137 0.611046 0.7974
12 0.454262 0.658435 0.7856
13 0.443972 1.153738 0.6889
14 0.44017 0.641931 0.7936
15 0.457579 0.844746 0.7143
16 0.440278 0.664006 0.7853
17 0.446422 0.963016 0.7048
18 0.44191 0.699741 0.7564
19 0.417722 0.601077 0.8129
20 0.427053 0.716967 0.7698
21 0.425452 0.64302 0.7892
22 0.416377 0.648804 0.7901
23 0.416044 0.579311 0.8139
24 0.437921 0.787166 0.7687
25 0.429666 0.904709 0.7246
26 0.420549 0.529497 0.8317
27 0.41769 0.601814 0.8111
28 0.408542 1.103057 0.6798
29 0.409291 0.856053 0.7504
30 0.3872 0.600037 0.8025
31 0.370547 0.717882 0.7698
32 0.363282 0.846418 0.7516
33 0.36301 0.602078 0.8207
34 0.355719 0.655262 0.8023
35 0.350565 0.579733 0.8201
36 0.33239 0.539564 0.8233
37 0.337597 0.554243 0.8301
38 0.321028 0.530299 0.8364
39 0.303657 0.45167 0.8537
40 0.291951 0.434836 0.858
41 0.279641 0.479449 0.8465
42 0.269081 0.42819 0.8614
43 0.252817 0.41228 0.868
44 0.243981 0.418044 0.8631
45 0.228508 0.43718 0.8616
46 0.204227 0.376154 0.8773
47 0.192266 0.366968 0.8845
48 0.170918 0.36947 0.8873
49 0.138047 0.282216 0.9124
50 0.104735 0.255416 0.9231
51 0.073477 0.231352 0.9331
52 0.05243 0.218864 0.938
53 0.044043 0.214278 0.9395
54 0.035945 0.219941 0.9381
55 0.030622 0.221848 0.9394
56 0.027717 0.221275 0.9402
57 0.023291 0.221015 0.9419
58 0.021033 0.229324 0.9388
59 0.018059 0.229549 0.9404
60 0.015811 0.233008 0.9409
61 0.014571 0.228178 0.9429
62 0.013953 0.227541 0.9431
63 0.012732 0.229159 0.9428
64 0.011158 0.228971 0.9431
Finished!
learner.save('cifar10-resnext-aug-preact')
learner.sched.plot()
--------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-19-d5621b0b5f81> in <module>() ----> 1 learner.sched.plot() AttributeError: 'CircularLR_beta' object has no attribute 'plot'
learner.lr_find()
learner.sched.plot()
leaner.fig
learner.fit(1e-5,1, cycle_len=15,
wds=args.weight_decay,
loss_scale=args.loss_scale,
**sargs
)
if args.use_tta:
log_preds,y = learner.TTA()
preds = np.mean(np.exp(log_preds),0)
acc = accuracy(torch.FloatTensor(preds),torch.LongTensor(y))
print('TTA acc:', acc)
with open(args.save_dir+'/tta_accuracy.txt', "a", 1) as f:
f.write(time.strftime("%Y-%m-%dT%H:%M:%S")+f"\tTTA accuracty: {acc}\n")
if args.use_tta:
log_preds,y = learner.TTA()
preds = np.mean(np.exp(log_preds),0)
acc = accuracy(torch.FloatTensor(preds),torch.LongTensor(y))
print('TTA acc:', acc)
with open(args.save_dir+'/tta_accuracy.txt', "a", 1) as f:
f.write(time.strftime("%Y-%m-%dT%H:%M:%S")+f"\tTTA accuracty: {acc}\n")