---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-10-e85309452eba> in <module>
----> 1 learn.fit_one_cycle(1,1e-2)
/mnt/nvme1/fast.ai-1/br/fastai/master/fastai/train.py in fit_one_cycle(learn, cyc_len, max_lr, moms, div_factor, pct_start, wd, callbacks, **kwargs)
20 callbacks.append(OneCycleScheduler(learn, max_lr, moms=moms, div_factor=div_factor,
21 pct_start=pct_start, **kwargs))
---> 22 learn.fit(cyc_len, max_lr, wd=wd, callbacks=callbacks)
23
24 def lr_find(learn:Learner, start_lr:Floats=1e-7, end_lr:Floats=10, num_it:int=100, stop_div:bool=True, **kwargs:Any):
/mnt/nvme1/fast.ai-1/br/fastai/master/fastai/basic_train.py in fit(self, epochs, lr, wd, callbacks)
174 callbacks = [cb(self) for cb in self.callback_fns] + listify(callbacks)
175 fit(epochs, self.model, self.loss_func, opt=self.opt, data=self.data, metrics=self.metrics,
--> 176 callbacks=self.callbacks+callbacks)
177
178 def create_opt(self, lr:Floats, wd:Floats=0.)->None:
/mnt/nvme1/fast.ai-1/br/fastai/master/fastai/utils/mem.py in wrapper(*args, **kwargs)
68 except:
69 type, val, tb = get_ref_free_exc_info() # must!
---> 70 raise type(val).with_traceback(tb) from None
71 return wrapper
/mnt/nvme1/fast.ai-1/br/fastai/master/fastai/utils/mem.py in wrapper(*args, **kwargs)
65 def wrapper(*args, **kwargs):
66 try:
---> 67 return func(*args, **kwargs)
68 except:
69 type, val, tb = get_ref_free_exc_info() # must!
/mnt/nvme1/fast.ai-1/br/fastai/master/fastai/basic_train.py in fit(***failed resolving arguments***)
96 except Exception as e:
97 exception = e
---> 98 raise e
99 finally: cb_handler.on_train_end(exception)
100
/mnt/nvme1/fast.ai-1/br/fastai/master/fastai/basic_train.py in fit(***failed resolving arguments***)
86 for xb,yb in progress_bar(data.train_dl, parent=pbar):
87 xb, yb = cb_handler.on_batch_begin(xb, yb)
---> 88 loss = loss_batch(model, xb, yb, loss_func, opt, cb_handler)
89 if cb_handler.on_batch_end(loss): break
90
/mnt/nvme1/fast.ai-1/br/fastai/master/fastai/basic_train.py in loss_batch(***failed resolving arguments***)
18 if not is_listy(xb): xb = [xb]
19 if not is_listy(yb): yb = [yb]
---> 20 out = model(*xb)
21 out = cb_handler.on_loss_begin(out)
22
~/anaconda3/envs/fastai/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(***failed resolving arguments***)
487 result = self._slow_forward(*input, **kwargs)
488 else:
--> 489 result = self.forward(*input, **kwargs)
490 for hook in self._forward_hooks.values():
491 hook_result = hook(self, input, result)
~/anaconda3/envs/fastai/lib/python3.7/site-packages/torch/nn/modules/container.py in forward(***failed resolving arguments***)
90 def forward(self, input):
91 for module in self._modules.values():
---> 92 input = module(input)
93 return input
94
~/anaconda3/envs/fastai/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(***failed resolving arguments***)
487 result = self._slow_forward(*input, **kwargs)
488 else:
--> 489 result = self.forward(*input, **kwargs)
490 for hook in self._forward_hooks.values():
491 hook_result = hook(self, input, result)
~/anaconda3/envs/fastai/lib/python3.7/site-packages/torch/nn/modules/container.py in forward(***failed resolving arguments***)
90 def forward(self, input):
91 for module in self._modules.values():
---> 92 input = module(input)
93 return input
94
~/anaconda3/envs/fastai/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(***failed resolving arguments***)
487 result = self._slow_forward(*input, **kwargs)
488 else:
--> 489 result = self.forward(*input, **kwargs)
490 for hook in self._forward_hooks.values():
491 hook_result = hook(self, input, result)
~/anaconda3/envs/fastai/lib/python3.7/site-packages/torch/nn/modules/container.py in forward(***failed resolving arguments***)
90 def forward(self, input):
91 for module in self._modules.values():
---> 92 input = module(input)
93 return input
94
~/anaconda3/envs/fastai/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(***failed resolving arguments***)
487 result = self._slow_forward(*input, **kwargs)
488 else:
--> 489 result = self.forward(*input, **kwargs)
490 for hook in self._forward_hooks.values():
491 hook_result = hook(self, input, result)
~/anaconda3/envs/fastai/lib/python3.7/site-packages/torchvision/models/resnet.py in forward(***failed resolving arguments***)
47
48 if self.downsample is not None:
---> 49 residual = self.downsample(x)
50
51 out += residual
~/anaconda3/envs/fastai/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(***failed resolving arguments***)
487 result = self._slow_forward(*input, **kwargs)
488 else:
--> 489 result = self.forward(*input, **kwargs)
490 for hook in self._forward_hooks.values():
491 hook_result = hook(self, input, result)
~/anaconda3/envs/fastai/lib/python3.7/site-packages/torch/nn/modules/container.py in forward(***failed resolving arguments***)
90 def forward(self, input):
91 for module in self._modules.values():
---> 92 input = module(input)
93 return input
94
~/anaconda3/envs/fastai/lib/python3.7/site-packages/torch/nn/modules/module.py in __call__(***failed resolving arguments***)
487 result = self._slow_forward(*input, **kwargs)
488 else:
--> 489 result = self.forward(*input, **kwargs)
490 for hook in self._forward_hooks.values():
491 hook_result = hook(self, input, result)
~/anaconda3/envs/fastai/lib/python3.7/site-packages/torch/nn/modules/batchnorm.py in forward(***failed resolving arguments***)
74 input, self.running_mean, self.running_var, self.weight, self.bias,
75 self.training or not self.track_running_stats,
---> 76 exponential_average_factor, self.eps)
77
78 def extra_repr(self):
~/anaconda3/envs/fastai/lib/python3.7/site-packages/torch/nn/functional.py in batch_norm(***failed resolving arguments***)
1621 return torch.batch_norm(
1622 input, weight, bias, running_mean, running_var,
-> 1623 training, momentum, eps, torch.backends.cudnn.enabled
1624 )
1625
RuntimeError: CUDA out of memory. Tried to allocate 6.12 MiB (GPU 0; 7.93 GiB total capacity; 7.34 GiB already allocated; 2.56 MiB free; 3.15 MiB cached)