!pip install git+https://github.com/netbrainml/nbml.git
from nbml.pytorch import *
from nbml.workshops.cifar100.utils import *
from IPython.display import clear_output
clear_output()
x_train, x_test, y_train, y_test = getCIFAR100()
tdl, vdl = torchCIFAR100(x_train, x_test, y_train, y_test, bs=128)
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from fastai.torch_core import Module
class noop(Module):
def __call__(self, x): return x
class Flatten(Module):
def __call__(self,x): return torch.flatten(x, start_dim=1)
class resblock(Module):
def __init__(self, ni, nf, mp=False):
self.res = nn.Conv2d(ni,nf,3, padding=1)
self.id = nn.Conv2d(ni,nf,1) if ni!=nf else noop()
self.act = nn.ReLU(inplace=True)
self.mp = nn.MaxPool2d(2,2) if mp else noop()
def forward(self,x): return self.mp(self.act(self.res(x)+self.id(x)))
class ResConv(BasicTrainableClassifier):
def __init__(self,ni, nc):
super().__init__()
self.model = nn.Sequential(resblock(ni,64),
resblock(64,64,mp=True),
resblock(64,128),
resblock(128,128,mp=True),
resblock(128,256),
resblock(256,256,mp=True),
nn.AdaptiveMaxPool2d(1),
Flatten(),
nn.Linear(256, nc),
)
def forward(self,x): return self.model(x)
resnet = ResConv(3,100).cuda()
resnet.fit(tdl, vdl, cbs=True, epochs=10)
class denseblock(Module):
def __init__(self, ni, nf, mp=False):
self.res = nn.Conv2d(ni,nf,3, padding=1)
self.id = nn.Conv2d(ni,nf,1) if ni!=nf else noop()
self.act = nn.ReLU(inplace=True)
self.mp = nn.MaxPool2d(2,2) if mp else noop()
def forward(self,x): return self.mp(self.act(torch.cat([self.res(x),self.id(x)], dim=1)))
class DenseConv(BasicTrainableClassifier):
def __init__(self,ni, nc):
super().__init__()
self.model = nn.Sequential(denseblock(ni,64),
nn.Conv2d(128,128,3),
nn.ReLU(inplace=True),
denseblock(128,128,mp=True),
nn.Conv2d(256,256,3),
nn.ReLU(inplace=True),
denseblock(256,512,mp=True),
nn.AdaptiveMaxPool2d(1),
Flatten(),
nn.Linear(1024, nc),
)
def forward(self,x): return self.model(x)
densenet = DenseConv(3,100).cuda()
densenet.fit(tdl, vdl, cbs=True, epochs=10)
def conv(nc, nf, ks=3, stride=1, pad=0, mp=False):
return nn.Sequential(nn.Conv2d(nc,nf,ks,stride,pad),
nn.ReLU(inplace=True),
nn.MaxPool2d(2,2) if mp else noop())
class ConvNet(BasicTrainableClassifier):
def __init__(self,ni, nc):
super().__init__()
self.model = nn.Sequential(conv(ni,64),
conv(64,64, mp=True),
conv(64,128),
conv(128,128,mp=True),
conv(128,256, pad=1),
conv(256,256,mp=True),
nn.AdaptiveMaxPool2d(1),
Flatten(),
nn.Linear(256, nc),
)
def forward(self,x): return self.model(x)
noskip = ConvNet(3,100).cuda()
noskip.fit(tdl, vdl, cbs=True, epochs=10)
noskip.plot
resnet.plot
densenet.plot