This notebook defines layers similar to those defined in the Swift for TensorFlow Deep Learning Library, but with some experimental extra features for the fast.ai course.
%install '.package(path: "$cwd/FastaiNotebook_00_load_data")' FastaiNotebook_00_load_data
Installing packages: .package(path: "/home/ubuntu/fastai_docs/dev_swift/FastaiNotebook_00_load_data") FastaiNotebook_00_load_data With SwiftPM flags: [] Working in: /tmp/tmpdhnj0dux/swift-install Fetching https://github.com/mxcl/Path.swift Fetching https://github.com/JustHTTP/Just Completed resolution in 1.22s Cloning https://github.com/JustHTTP/Just Resolving https://github.com/JustHTTP/Just at 0.7.1 Cloning https://github.com/mxcl/Path.swift Resolving https://github.com/mxcl/Path.swift at 0.16.2 Compile Swift Module 'Path' (9 sources) Compile Swift Module 'Just' (1 sources) Compile Swift Module 'FastaiNotebook_00_load_data' (1 sources) Compile Swift Module 'jupyterInstalledPackages' (1 sources) Linking ./.build/x86_64-unknown-linux/debug/libjupyterInstalledPackages.so Initializing Swift... Installation complete!
//export
import TensorFlow
public protocol FALayer: Layer {
var delegate: LayerDelegate<Output> { get set }
@differentiable
func forward(_ input: Input) -> Output
}
// TODO: This doesn't actually work. So we'll just paste it into every layer definition for now.
// extension FALayer {
// @differentiable
// public func applied(to input: Input, in context: Context) -> Output {
// let activation = forward(input, in: context)
// delegate.didProduceActivation(activation, in: context)
// return activation
// }
// }
open class LayerDelegate<Output> {
public init() {}
open func didProduceActivation(_ activation: Output) {}
}
//export
@_fixed_layout
public struct FADense<Scalar: TensorFlowFloatingPoint>: FALayer {
public var weight: Tensor<Scalar>
public var bias: Tensor<Scalar>
public typealias Activation = @differentiable (Tensor<Scalar>) -> Tensor<Scalar>
@noDerivative public let activation: Activation
@noDerivative public var delegate: LayerDelegate<Output> = LayerDelegate()
public init(
weight: Tensor<Scalar>,
bias: Tensor<Scalar>,
activation: @escaping Activation
) {
self.weight = weight
self.bias = bias
self.activation = activation
}
@differentiable
public func forward(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
return activation(matmul(input, weight) + bias)
}
@differentiable
public func applied(to input: Tensor<Scalar>) -> Tensor<Scalar> {
let activation = forward(input)
delegate.didProduceActivation(activation)
return activation
}
}
public extension FADense {
init(
inputSize: Int,
outputSize: Int,
activation: @escaping Activation = identity,
seed: (Int64, Int64) = (Int64.random(in: Int64.min..<Int64.max),
Int64.random(in: Int64.min..<Int64.max))
) {
self.init(weight: Tensor(glorotUniform: [inputSize, outputSize],
seed: seed),
bias: Tensor(zeros: [outputSize]),
activation: activation)
}
}
//export
@_fixed_layout
public struct FANoBiasConv2D<Scalar: TensorFlowFloatingPoint>: FALayer {
public var filter: Tensor<Scalar>
public typealias Activation = @differentiable (Tensor<Scalar>) -> Tensor<Scalar>
@noDerivative public let activation: Activation
@noDerivative public let strides: (Int, Int)
@noDerivative public let padding: Padding
@noDerivative public var delegate: LayerDelegate<Output> = LayerDelegate()
public init(
filter: Tensor<Scalar>,
activation: @escaping Activation,
strides: (Int, Int),
padding: Padding
) {
self.filter = filter
self.activation = activation
self.strides = strides
self.padding = padding
}
@differentiable
public func forward(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
return activation(input.convolved2D(withFilter: filter,
strides: (1, strides.0, strides.1, 1),
padding: padding))
}
@differentiable
public func applied(to input: Tensor<Scalar>) -> Tensor<Scalar> {
let activation = forward(input)
delegate.didProduceActivation(activation)
return activation
}
}
public extension FANoBiasConv2D {
init<G: RandomNumberGenerator>(
filterShape: (Int, Int, Int, Int),
strides: (Int, Int) = (1, 1),
padding: Padding = .valid,
activation: @escaping Activation = identity,
generator: inout G
) {
let filterTensorShape = TensorShape([
filterShape.0, filterShape.1,
filterShape.2, filterShape.3])
self.init(
filter: Tensor(glorotUniform: filterTensorShape, generator: &generator),
activation: activation,
strides: strides,
padding: padding)
}
}
public extension FANoBiasConv2D {
init(
filterShape: (Int, Int, Int, Int),
strides: (Int, Int) = (1, 1),
padding: Padding = .valid,
activation: @escaping Activation = identity,
seed: (Int64, Int64) = (Int64.random(in: Int64.min..<Int64.max),
Int64.random(in: Int64.min..<Int64.max))
) {
let filterTensorShape = TensorShape([
filterShape.0, filterShape.1,
filterShape.2, filterShape.3])
self.init(
filter: Tensor(glorotUniform: filterTensorShape, seed: seed),
activation: activation,
strides: (strides.0, strides.1),
padding: padding)
}
}
//export
@_fixed_layout
public struct FAConv2D<Scalar: TensorFlowFloatingPoint>: FALayer {
public var filter: Tensor<Scalar>
public var bias: Tensor<Scalar>
public typealias Activation = @differentiable (Tensor<Scalar>) -> Tensor<Scalar>
@noDerivative public let activation: Activation
@noDerivative public let strides: (Int, Int)
@noDerivative public let padding: Padding
@noDerivative public var delegate: LayerDelegate<Output> = LayerDelegate()
public init(
filter: Tensor<Scalar>,
bias: Tensor<Scalar>,
activation: @escaping Activation,
strides: (Int, Int),
padding: Padding
) {
self.filter = filter
self.bias = bias
self.activation = activation
self.strides = strides
self.padding = padding
}
@differentiable
public func forward(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
return activation(input.convolved2D(withFilter: filter,
strides: (1, strides.0, strides.1, 1),
padding: padding) + bias)
}
@differentiable
public func applied(to input: Tensor<Scalar>) -> Tensor<Scalar> {
let activation = forward(input)
delegate.didProduceActivation(activation)
return activation
}
}
public extension FAConv2D {
init<G: RandomNumberGenerator>(
filterShape: (Int, Int, Int, Int),
strides: (Int, Int) = (1, 1),
padding: Padding = .valid,
activation: @escaping Activation = identity,
generator: inout G
) {
let filterTensorShape = TensorShape([
filterShape.0, filterShape.1,
filterShape.2, filterShape.3])
self.init(
filter: Tensor(glorotUniform: filterTensorShape, generator: &generator),
bias: Tensor(zeros: TensorShape([filterShape.3])),
activation: activation,
strides: strides,
padding: padding)
}
}
public extension FAConv2D {
init(
filterShape: (Int, Int, Int, Int),
strides: (Int, Int) = (1, 1),
padding: Padding = .valid,
activation: @escaping Activation = identity,
seed: (Int64, Int64) = (Int64.random(in: Int64.min..<Int64.max),
Int64.random(in: Int64.min..<Int64.max))
) {
let filterTensorShape = TensorShape([
filterShape.0, filterShape.1,
filterShape.2, filterShape.3])
self.init(
filter: Tensor(glorotUniform: filterTensorShape, seed: seed),
bias: Tensor(zeros: TensorShape([filterShape.3])),
activation: activation,
strides: (strides.0, strides.1),
padding: padding)
}
}
//export
@_fixed_layout
public struct FAAvgPool2D<Scalar: TensorFlowFloatingPoint>: FALayer {
@noDerivative let poolSize: (Int, Int, Int, Int)
@noDerivative let strides: (Int, Int, Int, Int)
@noDerivative let padding: Padding
@noDerivative public var delegate: LayerDelegate<Output> = LayerDelegate()
public init(
poolSize: (Int, Int, Int, Int),
strides: (Int, Int, Int, Int),
padding: Padding
) {
self.poolSize = poolSize
self.strides = strides
self.padding = padding
}
public init(poolSize: (Int, Int), strides: (Int, Int), padding: Padding = .valid) {
self.poolSize = (1, poolSize.0, poolSize.1, 1)
self.strides = (1, strides.0, strides.1, 1)
self.padding = padding
}
@differentiable
public func forward(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
return input.averagePooled(kernelSize: poolSize, strides: strides, padding: padding)
}
@differentiable
public func applied(to input: Tensor<Scalar>) -> Tensor<Scalar> {
let activation = forward(input)
delegate.didProduceActivation(activation)
return activation
}
}
//export
@_fixed_layout
public struct FAAdaptiveAvgPool2D<Scalar: TensorFlowFloatingPoint>: FALayer {
@noDerivative public var delegate: LayerDelegate<Output> = LayerDelegate()
public init() {}
@differentiable
public func forward(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
return input.mean(alongAxes: [1,2])
}
@differentiable
public func applied(to input: Tensor<Scalar>) -> Tensor<Scalar> {
let activation = forward(input)
delegate.didProduceActivation(activation)
return activation
}
}
import FastaiNotebook_00_load_data
import Path
notebookToScript(fname: (Path.cwd / "01a_fastai_layers.ipynb").string)