import pickle,gzip,math,os,time,shutil,torch,matplotlib as mpl, numpy as np
from pathlib import Path
from torch import tensor
mpl.rcParams['image.cmap'] = 'gray'
torch.set_printoptions(precision=2, linewidth=140, sci_mode=False)
np.set_printoptions(precision=2, linewidth=140)
path_data = Path('data')
path_gz = path_data/'mnist.pkl.gz'
with gzip.open(path_gz, 'rb') as f: ((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding='latin-1')
x_train, y_train, x_valid, y_valid = map(tensor, [x_train, y_train, x_valid, y_valid])
n,m = x_train.shape
c = y_train.max()+1
n,m,c
(50000, 784, tensor(10))
# num hidden
nh = 50
w1 = torch.randn(m,nh)
b1 = torch.zeros(nh)
w2 = torch.randn(nh,1)
b2 = torch.zeros(1)
def lin(x, w, b): return x@w + b
t = lin(x_valid, w1, b1)
t.shape
torch.Size([10000, 50])
def relu(x): return x.clamp_min(0.)
t = relu(lin(x_valid, w1, b1))
t
tensor([[ 0.66, 0.00, 0.00, ..., 0.00, 0.00, 0.00],
[ 0.00, 10.93, 0.00, ..., 0.08, 0.00, 0.00],
[ 5.45, 3.59, 0.00, ..., 10.75, 8.27, 0.00],
...,
[ 0.00, 3.35, 5.53, ..., 0.00, 0.00, 0.00],
[ 0.00, 4.37, 5.65, ..., 0.97, 9.48, 0.00],
[ 5.59, 0.00, 3.30, ..., 0.00, 0.00, 0.00]])
def model(xb):
l1 = lin(xb, w1, b1)
l2 = relu(l1)
return lin(l2, w2, b2)
res = model(x_valid)
res.shape
torch.Size([10000, 1])
We need to get rid of that trailing (,1), in order to use mse.
res[:,0].shape
torch.Size([10000])
(Of course, mse is not a suitable loss function for multi-class classification; we'll use a better loss function soon. We'll use mse for now to keep things simple.)
def mse(output, targ): return (output[:,0]-targ).pow(2).mean()
y_train,y_valid = y_train.float(),y_valid.float()
preds = model(x_train)
preds.shape
torch.Size([50000, 1])
mse(preds, y_train)
tensor(1869.67)
from sympy import symbols,diff
x,y = symbols('x y')
diff(x**2, x)
def lin_grad(inp, out, w, b):
# grad of matmul with respect to input
inp.g = out.g @ w.t()
w.g = (inp.unsqueeze(-1) * out.g.unsqueeze(1)).sum(0)
b.g = out.g.sum(0)
def forward_and_backward(inp, targ):
# forward pass:
l1 = inp @ w1 + b1
l2 = relu(l1)
out = l2 @ w2 + b2
diff = out[:,0]-targ
loss = res.pow(2).mean()
# backward pass:
out.g = 2.*diff[:,None] / inp.shape[0]
lin_grad(l2, out, w2, b2)
l1.g = (l1>0).float() * l2.g
lin_grad(inp, l1, w1, b1)
forward_and_backward(x_train, y_train)
# Save for testing against later
w1g = w1.g.clone()
w2g = w2.g.clone()
b1g = b1.g.clone()
b2g = b2.g.clone()
ig = x_train.g.clone()
We cheat a little bit and use PyTorch autograd to check our results.
xt2 = x_train.clone().requires_grad_(True)
w12 = w1.clone().requires_grad_(True)
w22 = w2.clone().requires_grad_(True)
b12 = b1.clone().requires_grad_(True)
b22 = b2.clone().requires_grad_(True)
def forward(inp, targ):
l1 = inp @ w12 + b12
l2 = relu(l1)
out = l2 @ w22 + b22
return mse(out, targ)
loss = forward(xt2, y_train)
loss.backward()
from fastcore.test import test_close
test_close(w22.grad, w2g, eps=0.01)
test_close(b22.grad, b2g, eps=0.01)
test_close(w12.grad, w1g, eps=0.01)
test_close(b12.grad, b1g, eps=0.01)
test_close(xt2.grad, ig , eps=0.01)
class Relu():
def __call__(self, inp):
self.inp = inp
self.out = inp.clamp_min(0.)
return self.out
def backward(self): self.inp.g = (self.inp>0).float() * self.out.g
class Lin():
def __init__(self, w, b): self.w,self.b = w,b
def __call__(self, inp):
self.inp = inp
self.out = inp@self.w + self.b
return self.out
def backward(self):
self.inp.g = self.out.g @ self.w.t()
self.w.g = self.inp.t() @ self.out.g
self.b.g = self.out.g.sum(0)
class Mse():
def __call__(self, inp, targ):
self.inp = inp
self.targ = targ
self.out = (inp.squeeze() - targ).pow(2).mean()
return self.out
def backward(self):
self.inp.g = 2. * (self.inp.squeeze() - self.targ).unsqueeze(-1) / self.targ.shape[0]
class Model():
def __init__(self, w1, b1, w2, b2):
self.layers = [Lin(w1,b1), Relu(), Lin(w2,b2)]
self.loss = Mse()
def __call__(self, x, targ):
for l in self.layers: x = l(x)
return self.loss(x, targ)
def backward(self):
self.loss.backward()
for l in reversed(self.layers): l.backward()
model = Model(w1, b1, w2, b2)
%time loss = model(x_train, y_train)
CPU times: user 677 ms, sys: 42.1 ms, total: 719 ms Wall time: 22.5 ms
%time model.backward()
CPU times: user 1.97 s, sys: 157 ms, total: 2.12 s Wall time: 66.4 ms
test_close(w2g, w2.g, eps=0.01)
test_close(b2g, b2.g, eps=0.01)
test_close(w1g, w1.g, eps=0.01)
test_close(b1g, b1.g, eps=0.01)
test_close(ig, x_train.g, eps=0.01)
class Module():
def __call__(self, *args):
self.args = args
self.out = self.forward(*args)
return self.out
def forward(self): raise Exception('not implemented')
def bwd(self): raise Exception('not implemented')
def backward(self): self.bwd(self.out, *self.args)
class Relu(Module):
def forward(self, inp): return inp.clamp_min(0.)
def bwd(self, out, inp): inp.g = (inp>0).float() * out.g
class Lin(Module):
def __init__(self, w, b): self.w,self.b = w,b
def forward(self, inp): return inp@self.w + self.b
def bwd(self, out, inp):
inp.g = self.out.g @ self.w.t()
self.w.g = inp.t() @ self.out.g
self.b.g = self.out.g.sum(0)
class Mse(Module):
def forward (self, inp, targ): return (inp.squeeze() - targ).pow(2).mean()
def bwd(self, out, inp, targ): inp.g = 2*(inp.squeeze()-targ).unsqueeze(-1) / targ.shape[0]
model = Model(w1, b1, w2, b2)
%time loss = model(x_train, y_train)
CPU times: user 725 ms, sys: 0 ns, total: 725 ms Wall time: 22.6 ms
%time model.backward()
CPU times: user 2 s, sys: 154 ms, total: 2.15 s Wall time: 67.2 ms
test_close(w2g, w2.g, eps=0.01)
test_close(b2g, b2.g, eps=0.01)
test_close(w1g, w1.g, eps=0.01)
test_close(b1g, b1.g, eps=0.01)
test_close(ig, x_train.g, eps=0.01)
from torch import nn
import torch.nn.functional as F
class Linear(nn.Module):
def __init__(self, n_in, n_out):
super().__init__()
self.w = torch.randn(n_in,n_out).requires_grad_()
self.b = torch.zeros(n_out).requires_grad_()
def forward(self, inp): return inp@self.w + self.b
class Model(nn.Module):
def __init__(self, n_in, nh, n_out):
super().__init__()
self.layers = [Linear(n_in,nh), nn.ReLU(), Linear(nh,n_out)]
def __call__(self, x, targ):
for l in self.layers: x = l(x)
return F.mse_loss(x, targ[:,None])
model = Model(m, nh, 1)
loss = model(x_train, y_train)
loss.backward()
l0 = model.layers[0]
l0.b.grad
tensor([ -0.18, 5.17, -13.02, 3.17, -3.52, 8.76, -1.48, 11.78, -0.38, -1.35, -11.37, -2.88, 11.22, 0.80, 2.72, -2.02,
-5.94, 3.50, 0.52, -8.99, -0.72, 2.58, 0.65, 19.21, 0.83, 1.28, 1.27, -3.17, 16.17, 12.27, 1.43, 0.62,
24.14, 10.15, 8.03, -2.05, 3.81, 1.89, -2.60, 3.19, 8.60, -0.33, -0.06, 5.92, 47.10, -5.18, 3.15, 5.82,
-0.95, -1.54])