from fastai.basics import * from fastai.gen_doc.nbdoc import * jekyll_note("""As usual, this page is generated from a notebook that you can find in the docs_src folder of the fastai repo. The examples are all designed to run fast, which is why we use samples of the dataset, a resnet18 as a backbone and don't train for very long. You can change all of those parameters to run your own experiments! """) from fastai.vision import * mnist = untar_data(URLs.MNIST_TINY) tfms = get_transforms(do_flip=False) data = (ImageList.from_folder(mnist) .split_by_folder() .label_from_folder() .transform(tfms, size=32) .databunch() .normalize(imagenet_stats)) data.show_batch() data.show_batch(rows=3, figsize=(4,4)) learn = cnn_learner(data, models.resnet18, metrics=accuracy) learn.fit_one_cycle(1,1e-2) learn.save('mini_train') learn.show_results() learn.show_results(ds_type=DatasetType.Train, rows=4, figsize=(8,10)) planet = untar_data(URLs.PLANET_TINY) planet_tfms = get_transforms(flip_vert=True, max_lighting=0.1, max_zoom=1.05, max_warp=0.) data = (ImageList.from_csv(planet, 'labels.csv', folder='train', suffix='.jpg') .split_by_rand_pct() .label_from_df(label_delim=' ') .transform(planet_tfms, size=128) .databunch() .normalize(imagenet_stats)) data.show_batch(rows=2, figsize=(9,7)) learn = cnn_learner(data, models.resnet18) learn.fit_one_cycle(5,1e-2) learn.save('mini_train') learn.show_results(rows=3, figsize=(12,15)) biwi = untar_data(URLs.BIWI_SAMPLE) fn2ctr = pickle.load(open(biwi/'centers.pkl', 'rb')) data = (PointsItemList.from_folder(biwi) .split_by_rand_pct(seed=42) .label_from_func(lambda o:fn2ctr[o.name]) .transform(get_transforms(), tfm_y=True, size=(120,160)) .databunch() .normalize(imagenet_stats)) data.show_batch(rows=3, figsize=(9,6)) learn = cnn_learner(data, models.resnet18, lin_ftrs=[100], ps=0.05) learn.fit_one_cycle(5, 5e-2) learn.save('mini_train') learn.show_results(rows=3) camvid = untar_data(URLs.CAMVID_TINY) path_lbl = camvid/'labels' path_img = camvid/'images' codes = np.loadtxt(camvid/'codes.txt', dtype=str) get_y_fn = lambda x: path_lbl/f'{x.stem}_P{x.suffix}' data = (SegmentationItemList.from_folder(path_img) .split_by_rand_pct() .label_from_func(get_y_fn, classes=codes) .transform(get_transforms(), tfm_y=True, size=128) .databunch(bs=16, path=camvid) .normalize(imagenet_stats)) data.show_batch(rows=2, figsize=(7,5)) jekyll_warn("This training is fairly unstable, you should use more epochs and the full dataset to get better results.") learn = unet_learner(data, models.resnet18) learn.fit_one_cycle(3,1e-2) learn.save('mini_train') learn.show_results() from fastai.text import * imdb = untar_data(URLs.IMDB_SAMPLE) data_lm = (TextList.from_csv(imdb, 'texts.csv', cols='text') .split_by_rand_pct() .label_for_lm() .databunch()) data_lm.save() data_lm.show_batch() learn = language_model_learner(data_lm, AWD_LSTM) learn.fit_one_cycle(2, 1e-2) learn.save('mini_train_lm') learn.save_encoder('mini_train_encoder') learn.show_results() data_clas = (TextList.from_csv(imdb, 'texts.csv', cols='text', vocab=data_lm.vocab) .split_from_df(col='is_valid') .label_from_df(cols='label') .databunch(bs=42)) data_clas.show_batch() learn = text_classifier_learner(data_clas, AWD_LSTM) learn.load_encoder('mini_train_encoder') learn.fit_one_cycle(2, slice(1e-3,1e-2)) learn.save('mini_train_clas') learn.show_results() from fastai.tabular import * adult = untar_data(URLs.ADULT_SAMPLE) df = pd.read_csv(adult/'adult.csv') dep_var = 'salary' cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country'] cont_names = ['education-num', 'hours-per-week', 'age', 'capital-loss', 'fnlwgt', 'capital-gain'] procs = [FillMissing, Categorify, Normalize] data = (TabularList.from_df(df, path=adult, cat_names=cat_names, cont_names=cont_names, procs=procs) .split_by_idx(valid_idx=range(800,1000)) .label_from_df(cols=dep_var) .databunch()) data.show_batch() learn = tabular_learner(data, layers=[200,100], metrics=accuracy) learn.fit(5, 1e-2) learn.save('mini_train') learn.show_results()