#export
from nb_006b import *
from concurrent.futures import ProcessPoolExecutor
PATH = Path('data/camvid_orig')
PATH_X = PATH/'701_StillsRaw_full'
PATH_Y = PATH/'LabeledApproved_full'
PATH_Y_PROCESSED = PATH/'LabelProcessed'
label_csv = PATH/'label_colors.txt'
PATH_Y_PROCESSED.mkdir(exist_ok=True)
list(PATH_Y.iterdir())[0]
def get_y_fn(x_fn): return PATH_Y/f'{x_fn.name[:-4]}_L.png'
def get_y_proc_fn(y_fn): return PATH_Y_PROCESSED/f'{y_fn.name[:-6]}_P.png'
x_fns = get_image_files(PATH_X)
y_fns = [get_y_fn(o) for o in x_fns]
y_proc_fns = [get_y_proc_fn(o) for o in y_fns]
x_fns[:3],y_fns[:3],y_proc_fns[:3]
def parse_code(l):
a,b = [c for c in l.strip().split("\t") if c]
return tuple(int(o) for o in a.split(' ')), b
label_codes,label_names = zip(*[parse_code(l) for l in open(PATH/"label_colors.txt")])
label_t = tensor(label_codes)
n_labels = len(label_codes)
label_codes[:5],label_names[:5], n_labels
for o in label_names: print(o)
name2code = dict(zip(label_names, label_codes))
name2id = {v:k for k,v in enumerate(label_names)}
void_code = name2id['Void']
code2id = ByteTensor(255,255,255).zero_()+void_code
for i,code in enumerate(label_codes):
if not code == void_code: code2id[code]=i
def colors_to_codes(color_data):
n = len(color_data)
idxs = tuple(color_data.reshape(n,-1).long())
return code2id[idxs].view(color_data.shape[1:])
i = 0
x_img = open_image(x_fns[i])
y_img_mask = open_mask(y_fns[i])
y_img = Image(y_img_mask.data.int())
y_code = colors_to_codes(y_img.data)
def codes_to_colors(label_data):
h,w = label_data.shape
idxs = label_data.flatten().long()
return Image(label_t.index_select(0, idxs).reshape(h,w,3).permute(2,0,1))
y_img2 = codes_to_colors(y_code)
y_img.show(), y_img2.show()
def process_file(fns):
yfn, pfn = fns
if not pfn.exists():
y_data = open_mask(yfn).px.long()
proc_data = colors_to_codes(y_data)
img = PIL.Image.fromarray(proc_data.numpy())
img.save(pfn)
return pfn
def process_label_files(y_fns, y_proc_fns):
ex = ProcessPoolExecutor(16)
for pfn in ex.map(process_file, zip(y_fns, y_proc_fns)):
pass
%time process_label_files(y_fns, y_proc_fns)
def get_datasets(path, valid_pct=0.2):
x_fns = get_image_files(path)
y_fns = [get_y_fn(o) for o in x_fns]
y_proc_fns = [get_y_proc_fn(o) for o in y_fns]
total = len(x_fns)
train, valid = random_split(valid_pct, x_fns, y_proc_fns)
return (MatchedImageDataset(*train), MatchedImageDataset(*valid))
def get_tfm_datasets(size):
datasets = get_datasets(PATH_X)
tfms = get_transforms(do_flip=True, max_rotate=4, max_lighting=0.2)
return transform_datasets(*datasets, tfms=tfms, tfm_y=True, size=size)
default_norm,default_denorm = normalize_funcs(*imagenet_stats)
bs = 8
size = 512
tfms = get_transforms(do_flip=True, max_rotate=4, max_lighting=0.2)
def get_data(size, bs):
return DataBunch.create(*get_tfm_datasets(size), bs=bs, tfms=default_norm)
data = get_data(size, bs)
x, y = data.train_ds[0]
x.shape, y.shape, y.data.dtype
def accuracy_no_void(input, target):
target = target.squeeze()
mask = target != void_code
return (input.argmax(dim=1)[mask]==target[mask]).float().mean()
accuracy_no_void(p,y)
metrics=[accuracy_no_void]
lr = 1e-3
body = create_body(tvm.resnet34(True), 2)
model = DynamicUnet(body, n_classes=len(label_codes)).cuda()
learn = Learner(data, model, metrics=metrics, loss_fn=CrossEntropyFlat())
learn.split([model[0][6], model[1]])
learn.freeze()
lr_find(learn)
learn.recorder.plot()
lr = 1e-2
learn.fit_one_cycle(6, slice(lr), pct_start=0.05)
learn.save('u0')
learn.load('u0')
x,y = next(iter(learn.data.valid_dl))
py = learn.model(x).detach()
py = py.softmax(dim=1).max(dim=1, keepdim=True)[1]
x,y,py = x.cpu(),y.cpu(),py.cpu()
x = default_denorm(x)
n = 4
fig, axs = plt.subplots(n,3,figsize=(10,10), sharey=True)
for i in range(n):
Image(x[i]).show(ax=axs[i][0])
codes_to_image(y[i].numpy()).show(ax=axs[i][1])
codes_to_image(py[i].numpy()).show(ax=axs[i][2])
learn.unfreeze()
lr=1e-2
learn.fit_one_cycle(6, slice(lr/100,lr), pct_start=0.05)
size=640
bs = 4
learn.data = get_data(size, bs)
#learn.freeze()
learn.fit_one_cycle(6, slice(lr), pct_start=0.05)
id2code