from fastai import * from fastai.gen_doc.nbdoc import * jekyll_note("""As usual, this page is generated from a notebook that you can find in the docs_srs folder of the [fastai repo](https://github.com/fastai/fastai). We use the saved models from [this tutorial](/tutorial.data.html) to have this notebook run fast. """) from fastai import * from fastai.vision import * mnist = untar_data(URLs.MNIST_TINY) tfms = get_transforms(do_flip=False) data = (ImageItemList.from_folder(mnist) .split_by_folder() .label_from_folder() .transform(tfms, size=32) .databunch() .normalize(imagenet_stats)) data.export() empty_data = ImageDataBunch.load_empty(mnist, tfms=tfms[1],size=32).normalize(imagenet_stats) learn = create_cnn(empty_data, models.resnet18) learn.load('mini_train'); img = data.train_ds[0][0] learn.predict(img) planet = untar_data(URLs.PLANET_TINY) planet_tfms = get_transforms(flip_vert=True, max_lighting=0.1, max_zoom=1.05, max_warp=0.) data = (ImageItemList.from_csv(planet, 'labels.csv', folder='train', suffix='.jpg') .random_split_by_pct() .label_from_df(sep=' ') .transform(planet_tfms, size=128) .databunch() .normalize(imagenet_stats)) data.export() empty_data = ImageDataBunch.load_empty(planet, tfms=tfms[1],size=32).normalize(imagenet_stats) learn = create_cnn(empty_data, models.resnet18) learn.load('mini_train'); img = data.train_ds[0][0] learn.predict(img) learn.predict(img, thresh=0.3) biwi = untar_data(URLs.BIWI_SAMPLE) fn2ctr = pickle.load(open(biwi/'centers.pkl', 'rb')) data = (ImageItemList.from_folder(biwi) .random_split_by_pct() .label_from_func(lambda o:fn2ctr[o.name], label_cls=PointsItemList) .transform(get_transforms(), tfm_y=True, size=(120,160)) .databunch() .normalize(imagenet_stats)) data.export() empty_data = ImageDataBunch.load_empty(biwi, tfms=get_transforms()[1], tfm_y=True, size=(120,60)).normalize(imagenet_stats) learn = create_cnn(empty_data, models.resnet18) learn.load('mini_train'); img = data.train_ds[0][0] learn.predict(img) img.show(y=learn.predict(img)[0]) camvid = untar_data(URLs.CAMVID_TINY) path_lbl = camvid/'labels' path_img = camvid/'images' codes = np.loadtxt(camvid/'codes.txt', dtype=str) get_y_fn = lambda x: path_lbl/f'{x.stem}_P{x.suffix}' data = (SegmentationItemList.from_folder(path_img) .random_split_by_pct() .label_from_func(get_y_fn, classes=codes) .transform(get_transforms(), tfm_y=True, size=128) .databunch(bs=16, path=camvid) .normalize(imagenet_stats)) data.export() empty_data = ImageDataBunch.load_empty(camvid, tfms=get_transforms()[1], tfm_y=True, size=128).normalize(imagenet_stats) learn = Learner.create_unet(empty_data, models.resnet18) learn.load('mini_train'); img = data.train_ds[0][0] learn.predict(img) img.show(y=learn.predict(img)[0]) from fastai import * from fastai.text import * imdb = untar_data(URLs.IMDB_SAMPLE) vocab = Vocab(pickle.load(open(imdb/'tmp'/'itos.pkl', 'rb'))) data_lm = (TextList.from_csv(imdb, 'texts.csv', cols='text', vocab=vocab) .random_split_by_pct() .label_for_lm() .databunch()) data_lm.export() empty_data = TextLMDataBunch.load_empty(imdb) learn = language_model_learner(empty_data) learn.load('mini_train_lm'); learn.predict('This is a simple test of', n_words=20) data_clas = (TextList.from_csv(imdb, 'texts.csv', cols='text', vocab=vocab) .split_from_df(col='is_valid') .label_from_df(cols='label') .databunch(bs=42)) data_clas.export() empty_data = TextClasDataBunch.load_empty(imdb) learn = text_classifier_learner(empty_data) learn.load('mini_train_clas'); learn.predict('I really loved that movie!') from fastai import * from fastai.tabular import * adult = untar_data(URLs.ADULT_SAMPLE) df = pd.read_csv(adult/'adult.csv') dep_var = '>=50k' cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country'] cont_names = ['education-num', 'hours-per-week', 'age', 'capital-loss', 'fnlwgt', 'capital-gain'] procs = [FillMissing, Categorify, Normalize] data = (TabularList.from_df(df, path=adult, cat_names=cat_names, cont_names=cont_names, procs=procs) .split_by_idx(valid_idx=range(800,1000)) .label_from_df(cols=dep_var) .databunch()) learn = tabular_learner(data, layers=[200,100], metrics=accuracy) learn.fit(1, 1e-2) learn.save('mini_train') data.export() data = TabularDataBunch.load_empty(adult) learn = tabular_learner(data, layers=[200,100]) learn.load('mini_train'); learn.predict(df.iloc[0])