import TensorFlow
public struct MyModel: Layer {
public var layer1: Dense<Float>
public var layer2: Dense<Float>
public init(nIn: Int, nHid: Int, nOut: Int){
layer1 = Dense(inputSize: nIn, outputSize: nHid)
layer2 = Dense(inputSize: nHid, outputSize: nOut)
}
@differentiable
public func callAsFunction(_ input: Tensor<Float>) -> Tensor<Float> {
return input.sequenced(through: layer1, layer2)
}
}
var model = MyModel(nIn: 20, nHid: 30, nOut: 10)
let x: Tensor<Float> = Tensor(randomNormal: TensorShape([10,20]))
let y: Tensor<Float> = Tensor(randomNormal: TensorShape([10,10]))
let (loss, grads) = model.valueWithGradient { model in
meanAbsoluteError(predicted: model(x), expected: y)
}
loss
// FALayer is a layer that supports callbacks through its LayerDelegate.
public protocol FALayer: Layer {
var delegates: [(Output) -> ()] { get set }
// FALayer's will implement this instead of `func callAsFunc`.
@differentiable
func forward(_ input: Input) -> Output
associatedtype Input
associatedtype Output
}
//export
public extension FALayer {
@differentiable(vjp: callGrad)
func callAsFunction(_ input: Input) -> Output {
let activation = forward(input)
for d in delegates { d(activation) }
return activation
}
// NOTE: AutoDiff synthesizes a leaking VJP for this, so we define a custom VJP.
// TF-475: https://bugs.swift.org/browse/TF-475
// NOTE: If we use `@differentiating`, then there is a linker error. So we use `@differentiable` instead.
// TF-476: https://bugs.swift.org/browse/TF-476
func callGrad(_ input: Input) ->
(Output, (Self.Output.TangentVector) -> (Self.TangentVector, Self.Input.TangentVector)) {
return Swift.valueWithPullback(at: self, input) { (m, i) in m(i) }
}
//We also add a default init to our `delegates` variable, so that we don't have to define it each time, as
//well as a function to easily add a delegate.
var delegates: [(Output) -> ()] {
get { return [] }
set {}
}
mutating func addDelegate(_ d: @escaping (Output) -> ()) { delegates.append(d) }
}
//export
@frozen
public struct FADense<Scalar: TensorFlowFloatingPoint>: FALayer {
// Note: remove the explicit typealiases after TF-603 is resolved.
public typealias Input = Tensor<Scalar>
public typealias Output = Tensor<Scalar>
public var weight: Tensor<Scalar>
public var bias: Tensor<Scalar>
public typealias Activation = @differentiable (Tensor<Scalar>) -> Tensor<Scalar>
@noDerivative public let activation: Activation
public init(
weight: Tensor<Scalar>,
bias: Tensor<Scalar>,
activation: @escaping Activation
) {
self.weight = weight
self.bias = bias
self.activation = activation
}
@differentiable
public func forward(_ input: Tensor<Scalar>) -> Tensor<Scalar> {
return activation(input • weight + bias)
}
}
public extension FADense {
init(_ nIn: Int, _ nOut: Int, activation: @escaping Activation = identity) {
self.init(weight: Tensor(randomNormal: [nIn, nOut]),
bias: Tensor(zeros: [nOut]),
activation: activation)
}
}
public struct MyModel: Layer {
public var layer1: FADense<Float>
public var layer2: FADense<Float>
public init(nIn: Int, nHid: Int, nOut: Int){
layer1 = FADense(nIn, nHid, activation: relu)
layer2 = FADense(nHid, nOut)
}
@differentiable
public func callAsFunction(_ input: Tensor<Float>) -> Tensor<Float> {
return input.sequenced(through: layer1, layer2)
}
}
var model = MyModel(nIn: 20, nHid: 30, nOut: 10)
let (loss, grads) = model.valueWithGradient { model in
meanAbsoluteError(predicted: model(x), expected: y)
}