%reload_ext autoreload
%autoreload 2
%matplotlib inline
#export
from fastai import *
from fastai.vision import *
import fastai
tfms = get_transforms()
bs = 64
path = untar_data(URLs.PETS)/'images'
data = (ImageFileList.from_folder(path)
.label_from_re(r'^(.*)_\d+.jpg$')
.random_split_by_pct(0.2)
.datasets(ImageClassificationDataset)
.transform(tfms, size=224)
.databunch(bs=bs)
.normalize(imagenet_stats))
img = open_image(get_image_files(path)[0])
img.show()
data.show_batch(3)
learn = create_cnn(data, models.resnet34, metrics=accuracy)
# Run this once to create your model
# learn.fit_one_cycle(1)
# learn.save('one-epoch')
learn.load('one-epoch')
pred_class,pred_idx,outputs = learn.predict(img)
pred_class
fastai.defaults.device = torch.device('cpu')
# You wouldn't use data.classes really
# - instead load your classes list from somewhere else
data2 = ImageDataBunch.single_from_classes(
path, data.classes, tfms=tfms, size=224).normalize(imagenet_stats)
learn = create_cnn(data2, models.resnet34)
learn.load('one-epoch')
pred_class,pred_idx,outputs = learn.predict(img)
pred_class