function sigmoidCrossEntropyWithLogits(target, output) { var maxOutput = tfc.maximum(output, tfc.zerosLike(output)); var outputXTarget = tfc.mul(output, target); var sigmoidOutput = tfc.log(tfc.add(getScalar(1), tfc.exp(tfc.neg(tfc.abs(output))))); var result = tfc.add(tfc.sub(maxOutput, outputXTarget), sigmoidOutput); return result; }
function decodeBoxesLayer(x0, x1) { var _a = getCenterCoordinatesAndSizesLayer(x0), sizes = _a.sizes, centers = _a.centers; var vec = tf.unstack(tf.transpose(x1, [1, 0])); var div0_out = tf.div(tf.mul(tf.exp(tf.div(vec[2], tf.scalar(5))), sizes[0]), tf.scalar(2)); var add0_out = tf.add(tf.mul(tf.div(vec[0], tf.scalar(10)), sizes[0]), centers[0]); var div1_out = tf.div(tf.mul(tf.exp(tf.div(vec[3], tf.scalar(5))), sizes[1]), tf.scalar(2)); var add1_out = tf.add(tf.mul(tf.div(vec[1], tf.scalar(10)), sizes[1]), centers[1]); return tf.transpose(tf.stack([ tf.sub(add0_out, div0_out), tf.sub(add1_out, div1_out), tf.add(add0_out, div0_out), tf.add(add1_out, div1_out) ]), [1, 0]); }
function getCenterCoordinatesAndSizesLayer(x) { var vec = tf.unstack(tf.transpose(x, [1, 0])); var sizes = [ tf.sub(vec[2], vec[0]), tf.sub(vec[3], vec[1]) ]; var centers = [ tf.add(vec[0], tf.div(sizes[0], tf.scalar(2))), tf.add(vec[1], tf.div(sizes[1], tf.scalar(2))) ]; return { sizes: sizes, centers: centers }; }
function residual(x, params) { var out = convLayer_1.conv(x, params.conv1); out = convLayer_1.convNoRelu(out, params.conv2); out = tf.add(out, x); out = tf.relu(out); return out; }
function convLayer(x, params, strides, withRelu, padding) { if (padding === void 0) { padding = 'same'; } var _a = params.conv, filters = _a.filters, bias = _a.bias; var out = tf.conv2d(x, filters, strides, padding); out = tf.add(out, bias); out = scaleLayer_1.scale(out, params.scale); return withRelu ? tf.relu(out) : out; }
function biasAdd(x, bias, dataFormat) { common_1.checkDataFormat(dataFormat); if (ndim(bias) !== 1 && ndim(bias) !== ndim(x)) { throw new errors_1.ValueError('Unexpected bias dimensions: ' + ndim(bias) + '; expected it to be 1 or ' + ndim(x)); } if (dataFormat) { throw new errors_1.NotImplementedError('dataFormat logic is not yet implemented.'); } return tfc.add(x, bias); }
function dropout(x, level, noiseShape, seed) { if (noiseShape != null && !_.isEqual(x.shape, noiseShape)) { throw new errors_1.NotImplementedError('Non-default noise shape is not implemented yet: ' + JSON.stringify(noiseShape)); } if (seed != null) { throw new errors_1.NotImplementedError('seed is not implemented for dropout yet.'); } var multiplier = tfc.step(tfc.add(neg(level), randomUniform(x.shape, 0, 1, types_1.DType.float32))); multiplier = tfc.mul(divide(getScalar(1), subtract(getScalar(1), level)), multiplier); return tfc.mul(x, multiplier); }
function residualDown(x, params) { var out = convLayer_1.convDown(x, params.conv1); out = convLayer_1.convNoRelu(out, params.conv2); var pooled = tf.avgPool(x, 2, 2, 'valid'); var zeros = tf.zeros(pooled.shape); var isPad = pooled.shape[3] !== out.shape[3]; var isAdjustShape = pooled.shape[1] !== out.shape[1] || pooled.shape[2] !== out.shape[2]; if (isAdjustShape) { var padShapeX = out.shape.slice(); padShapeX[1] = 1; var zerosW = tf.zeros(padShapeX); out = tf.concat([out, zerosW], 1); var padShapeY = out.shape.slice(); padShapeY[2] = 1; var zerosH = tf.zeros(padShapeY); out = tf.concat([out, zerosH], 2); } pooled = isPad ? tf.concat([pooled, zeros], 3) : pooled; out = tf.add(pooled, out); out = tf.relu(out); return out; }
function softsign(x) { return tfc.div(x, tfc.add(getScalar(1), tfc.abs(x))); }
function softplus(x) { return tfc.log(tfc.add(getScalar(1), tfc.exp(x))); }
function updateAdd(x, increment) { return x.write(tfc.add(x.read(), increment)); }
function scalarPlusArray(c, x) { return tfc.add(c, x); }
function add(x, y) { return tfc.add(x, y); }
return tf.tidy(function () { var out = tf.conv2d(x, params.filters, strides, 'same'); out = tf.add(out, params.batch_norm_offset); return tf.clipByValue(out, 0, 6); });