TediousStore.prototype.set = function(sid, sess, fn){ var self=this; var operation=retry.operation(self.retryOptions); operation.attempt(function() { self.pool.acquire(function(err, db) { if (operation.retry(err) || err) { return fn(err); } var duration = sess.cookie.maxAge || oneDay; var r = new tedious.Request( 'MERGE INTO ' + self.tableName + ' WITH (HOLDLOCK) s' + ' USING (VALUES(@sid, @sess)) ns(' + self.sidColumnName + ', ' + self.sessColumnName + ') ON (s.' + self.sidColumnName + '=ns.' + self.sidColumnName + ')' + ' WHEN MATCHED THEN UPDATE SET s.' + self.sessColumnName + '=@sess, s.' + self.expiresColumnName + '=DATEADD(ms, @duration, SYSUTCDATETIME())' + ' WHEN NOT MATCHED THEN INSERT (' + self.sidColumnName + ', ' + self.sessColumnName + ', ' + self.expiresColumnName + ') VALUES (@sid, @sess, DATEADD(ms, @duration, SYSUTCDATETIME()));', function(err) { debug.sql('Executed MERGE'); db.release(); if (operation.retry(err) || err) { return fn(err); } fn.apply(self, arguments); } ); r.addParameter('sid', tedious.TYPES.VarChar, sid); r.addParameter('sess', tedious.TYPES.NVarChar, JSON.stringify(sess)); r.addParameter('duration', tedious.TYPES.BigInt, duration); debugSql(r); db.execSql(r); }); }); };
mkdir(path.dirname(tmp), function (er) { if (er) return cb(er) // Tuned to spread 3 attempts over about a minute. // See formula at <https://github.com/tim-kos/node-retry>. var operation = retry.operation ( { retries: npm.config.get("fetch-retries") , factor: npm.config.get("fetch-retry-factor") , minTimeout: npm.config.get("fetch-retry-mintimeout") , maxTimeout: npm.config.get("fetch-retry-maxtimeout") }) operation.attempt(function (currentAttempt) { log.info("retry", "fetch attempt " + currentAttempt + " at " + (new Date()).toLocaleTimeString()) fetchAndShaCheck(u, tmp, shasum, function (er, response) { // Only retry on 408, 5xx or no `response`. var statusCode = response && response.statusCode var statusRetry = !statusCode || (statusCode === 408 || statusCode >= 500) if (er && statusRetry && operation.retry(er)) { log.info("retry", "will retry, error on last attempt: " + er) return } done(er) }) }) })
Elasticsearch.prototype.checkEsConnection = function() { var thiz = this; thiz.esConnection = false; var operation = retry.operation({ retries: 3, factor: 3, minTimeout: 1 * 1000, maxTimeout: 60 * 1000, randomize: false }); return new Promise(function(fulfill, reject) { operation.attempt(function(currentAttempt) { thiz.client.ping().then( function(res) { thiz.esConnection = true; // Ensure mapping template is existing if desired if (thiz.options.ensureMappingTemplate) { thiz.ensureMappingTemplate(fulfill, reject); } else { fulfill(true); } }, function(err) { if (operation.retry(err)) { return; } thiz.esConnection = false; thiz.emit('error', err); reject(false); }); }); }); };
function addRemoteTarball_ (u, tmp, shasum, auth, cb) { // Tuned to spread 3 attempts over about a minute. // See formula at <https://github.com/tim-kos/node-retry>. var operation = retry.operation({ retries: npm.config.get('fetch-retries'), factor: npm.config.get('fetch-retry-factor'), minTimeout: npm.config.get('fetch-retry-mintimeout'), maxTimeout: npm.config.get('fetch-retry-maxtimeout') }) operation.attempt(function (currentAttempt) { log.info( 'retry', 'fetch attempt', currentAttempt, 'at', (new Date()).toLocaleTimeString() ) fetchAndShaCheck(u, tmp, shasum, auth, function (er, response, shasum) { // Only retry on 408, 5xx or no `response`. var sc = response && response.statusCode var statusRetry = !sc || (sc === 408 || sc >= 500) if (er && statusRetry && operation.retry(er)) { log.warn('retry', 'will retry, error on last attempt: ' + er) return } cb(er, response, shasum) }) }) }
data: ['options', function authenticate(fn, r) { const backend = backends[options.backend]; const ops = r.options; const operation = retry.operation(ops.retry); operation.attempt(function() { backend.login(self, ops.options, function(err, auth) { if (err) { const errCode = _.get(err, 'message'); const defaultStatus = /timeout/.test(errCode) ? 503 : 499; const status = _.get(err, 'status', defaultStatus); if (status === 401 || status == 499) { if (operation._timeout) { clearTimeout(operation._timeout); } operation._timeouts = []; if (status === 401) { err = new Error(`Incorrect username/password combination`); err.type = 'ERROR:LOGIN'; } return fn(err); } if (operation.retry(err)) { return; } err = operation.mainError() || err; return fn(err); } fn(null, auth); }); }); }],
Manager.prototype.pull = function pull(fn) { var operation = retry.operation({ retries: this.retries , factor: this.factor , minTimeout: this.minTimeout , maxTimeout: this.maxTimeout , randomize: this.randomize }) , self = this; /** * Small wrapper around pulling a connection * * @param {Error} err * @param {Socket} connection * @api private */ function allocate(err, connection) { if (operation.retry(err)) return; fn(err, connection); } operation.attempt(function attempt() { self.allocate(allocate); }); return this; };
Hub.op = function (options) { var opKey = Object.keys(options).join('|'); if (!Hub._opCache[opKey]) { Hub._opCache[opKey] = retry.operation(options); } return Hub._opCache[opKey]; };
TediousStore.prototype.touch = function (sid, sess, fn) { var self = this; var operation=retry.operation(self.retryOptions); operation.attempt(function() { self.pool.acquire(function (err, db) { if (operation.retry(err) || err) { return fn(err); } var duration = sess.cookie.maxAge || oneDay; var r = new tedious.Request( 'UPDATE ' + self.tableName + ' SET ' + self.expiresColumnName + '=DATEADD(ms, @duration, SYSUTCDATETIME()) WHERE ' + self.sidColumnName + '=@sid', function (err) { debug.sql('Executed UPDATE'); db.release(); if (operation.retry(err) || err) { return fn(err); } fn(null, true); } ); r.addParameter('duration', tedious.TYPES.BigInt, duration); r.addParameter('sid', tedious.TYPES.VarChar, sid); debugSql(r); db.execSql(r); }); }); };
TediousStore.prototype.clear = function(fn) { var self=this; var operation=retry.operation(self.retryOptions); operation.attempt(function() { self.pool.acquire(function(err, db) { if (operation.retry(err) || err) { return fn(err); } var r = new tedious.Request( 'TRUNCATE TABLE ' + self.tableName, function(err) { debug.sql('Executed TRUNCATE'); db.release(); if (operation.retry(err) || err) { return fn(err); } fn(null, true); } ); debugSql(r); db.execSql(r); }); }); };
TediousStore.prototype.length = function(fn) { var self=this; var operation=retry.operation(self.retryOptions); operation.attempt(function() { self.pool.acquire(function(err, db) { if (err) return fn(err); var r = new tedious.Request( 'SELECT @count=COUNT(*) FROM ' + self.tableName, function(err, rowCount) { debug.sql('Executed SELECT'); db.release(); if (operation.retry(err) || err) { return fn(err); } if (!rowCount || rowCount!==1) return fn(); } ); r.on('returnValue', function(parameterName, value) { if (!value) return fn(); return fn(null, value); }); r.addOutputParameter('count', tedious.TYPES.Int); debugSql(r); db.execSql(r); }); }); };
TediousStore.prototype.destroy = function(sid, fn) { var self=this; var operation=retry.operation(self.retryOptions); operation.attempt(function() { self.pool.acquire(function(err, db) { if (operation.retry(err) || err) { return fn(err); } var r = new tedious.Request( 'DELETE s FROM ' + self.tableName + ' s WHERE s.' + self.sidColumnName + '=@sid', function(err) { debug.sql('Executed DELETE'); db.release(); if (operation.retry(err) || err) { return fn(err); } return fn(null, true); } ); r.addParameter('sid', tedious.TYPES.VarChar, sid); debugSql(r); db.execSql(r); }); }); };
app.post('/proxy', function (req, res) { // Note that the "buildApiFrom(path)" helper in main.js sends the API endpoint // as a query parameter to our proxy. We read that in here and build the real // endpoint we want to hit. var opts = { retries: 5, factor: 2, minTimeout: 1 * 1000, maxTimeout: 2 * 1000 }; var operation = retry.operation(opts); //console.log("Query Offset: " + JSON.stringify(req.body.offset)); var realPath = req.query.endpoint, options = { url: realPath, body: "query=" + JSON.stringify(req.body), method: "POST", headers: { Accept: 'application/json', // Client-side code sends us exactly what we need for authentication. Authorization: req.header('authorization'), 'User-Agent': 'brightedge-wdc/0.0.0' } }; //make the retry attempt operation.attempt(function(currentAttempt){ // Make an HTTP request using the above specified options. console.log('Attempting to proxy request to ' + options.url); request(options, function (error, response, body) { var header; if (!error && response.statusCode === 200) { // Proxy all response headers. for (header in response.headers) { if (response.headers.hasOwnProperty(header)) { res.set(header, response.headers[header]); } } // Send the response body. res.send(body); } else { error = error || response.statusMessage || response.statusCode; console.log('Error fulfilling request: "' + error.toString() + '"'); res.sendStatus(response.statusCode); } }); }); });
function rebalance () { debug('rebalance() %s is rebalancing: %s ready: %s', self.id, self.rebalancing, self.ready); if (!self.rebalancing && !self.closing) { deregister(); self.emit('rebalancing'); self.rebalancing = true; // Nasty hack to retry 3 times to re-balance - TBD fix this var oldTopicPayloads = self.topicPayloads; var operation = retry.operation({ retries: 10, factor: 2, minTimeout: 1 * 100, maxTimeout: 1 * 1000, randomize: true }); operation.attempt(function (currentAttempt) { self.rebalanceAttempt(oldTopicPayloads, function (err) { if (operation.retry(err)) { return; } if (err) { self.rebalancing = false; return self.emit('error', new errors.FailedToRebalanceConsumerError(operation.mainError().toString())); } else { var topicNames = self.topicPayloads.map(function (p) { return p.topic; }); self.client.refreshMetadata(topicNames, function (err) { register(); if (err) { self.rebalancing = false; self.emit('error', err); return; } if (self.topicPayloads.length) { fetchAndUpdateOffsets(function (err) { self.rebalancing = false; if (err) { self.emit('error', new errors.FailedToRebalanceConsumerError(err.message)); return; } self.fetch(); self.emit('rebalanced'); }); } else { // was not assigned any partitions during rebalance self.rebalancing = false; self.emit('rebalanced'); } }); } }); }); } }
function executeRetryableCql() { /* istanbul ignore next: ignroe coalesce */ var consistency = options.consistency || self.poolConfig.consistencyLevel || self.consistencyLevel.one; var operation = retry.operation(self.config.retryOptions); operation.attempt(function (currentAttempt) { var queryRequestId = uuid.v4(); self.execCql(cqlQuery, dataParams, options, queryRequestId, function (err, result) { var canRetryErrorType = !!err && self.canRetryError(err); var enableConsistencyFailover = (self.config.enableConsistencyFailover !== false); if (canRetryErrorType && operation.retry(err)) { self.logger.warn('priam.Cql: Retryable error condition encountered. Executing retry #' + currentAttempt + '...', { name: err.name, code: err.code, error: err.message, stack: err.stack }); self.emit('queryRetried', queryRequestId, cqlQuery, dataParams, options); return; } // retries are complete. lets fallback on quorum if needed if (canRetryErrorType && enableConsistencyFailover) { // Fallback from all to localQuorum via additional retries if (err && consistency === self.consistencyLevel.all) { options.consistency = self.consistencyLevel.eachQuorum; return void setTimeout(executeRetryableCql, retryDelay); } if (err && ( consistency === self.consistencyLevel.eachQuorum || consistency === self.consistencyLevel.quorum)) { options.consistency = self.consistencyLevel.localQuorum; return void setTimeout(executeRetryableCql, retryDelay); } } self.emit(err ? 'queryFailed' : 'queryCompleted', queryRequestId); // all retries are finally complete if (captureMetrics) { var duration = process.hrtime(start); duration = (duration[0] * 1e3) + (duration[1] / 1e6); metrics.measurement('query.' + options.queryName, duration, 'ms'); } if (typeof callback === 'function') { if (result && result.length) { if (options.resultTransformers && options.resultTransformers.length) { for (var i = 0; i < options.resultTransformers.length; i++) { result = result.map(options.resultTransformers[i]); } } } callback(err, result); } }); }); }
function download(url, file, options) { var operation; var deferred = Q.defer(); var progressDelay = 8000; options = mout.object.mixIn( { retries: 5, factor: 2, minTimeout: 1000, maxTimeout: 35000, randomize: true, progressDelay: progressDelay, gzip: true }, options || {} ); // Retry on network errors operation = retry.operation(options); operation.attempt(function() { Q.fcall(fetch, url, file, options) .then(function(response) { deferred.resolve(response); }) .progress(function(status) { deferred.notify(status); }) .fail(function(error) { // Save timeout before retrying to report var timeout = operation._timeouts[0]; // Reject if error is not a network error if (errorCodes.indexOf(error.code) === -1) { return deferred.reject(error); } // Next attempt will start reporting download progress immediately progressDelay = 0; // This will schedule next retry or return false if (operation.retry(error)) { deferred.notify({ retry: true, delay: timeout, error: error }); } else { deferred.reject(error); } }); }); return deferred.promise; }
exports.extractWithRetry = thunkify(function(url, options, callback) { var op = retry.operation({retries: 1}) op.attempt(function(attempt) { co(exports.extract(url))(function(err, data) { if (op.retry(err)) return callback(err ? op.mainError() : null, data) }) }) })
Client.prototype.createTopics = function (topics, isAsync, cb) { topics = typeof topics === 'string' ? [topics] : topics; if (typeof isAsync === 'function' && typeof cb === 'undefined') { cb = isAsync; isAsync = true; } try { validateKafkaTopics(topics); } catch (e) { if (isAsync) return cb(e); throw e; } cb = _.once(cb); const getTopicsFromKafka = (topics, callback) => { this.loadMetadataForTopics(topics, function (error, resp) { if (error) { return callback(error); } callback(null, Object.keys(resp[1].metadata)); }); }; const operation = retry.operation({ minTimeout: 200, maxTimeout: 2000 }); operation.attempt(currentAttempt => { logger.debug('create topics currentAttempt', currentAttempt); getTopicsFromKafka(topics, function (error, kafkaTopics) { if (error) { if (operation.retry(error)) { return; } } logger.debug('kafka reported topics', kafkaTopics); const left = _.difference(topics, kafkaTopics); if (left.length === 0) { logger.debug(`Topics created ${kafkaTopics}`); return cb(null, kafkaTopics); } logger.debug(`Topics left ${left.join(', ')}`); if (!operation.retry(new Error(`Topics not created ${left}`))) { cb(operation.mainError()); } }); }); if (!isAsync) { cb(null); } };
function requestReplay(request, options) { var originalEmit = request.emit; var operation; var attempts = 0; // Default options options = deepExtend({ errorCodes: errorCodes, retries: 5, factor: 3, minTimeout: 2000, maxTimeout: 35000, randomize: true }, options || {}); // Init retry operation = retry.operation(options); operation.attempt(function () { if (attempts) { request.start(); } attempts++; }); // Increase maxListeners because start() adds a new listener each time request._maxListeners += options.retries + 1; // Monkey patch emit to catch errors and retry request.emit = function (name, error) { // If not an error, pass-through if (name !== 'error') { return originalEmit.apply(this, arguments); } // If not a retry error code, pass-through if (options.errorCodes.indexOf(error.code) === -1) { return originalEmit.call(this, name, error); } // Retry if (operation.retry(error)) { this.emit('replay', attempts - 1, error); return 0; } // No more retries available, error out error.replays = attempts - 1; return originalEmit.call(this, name, error); }; return request; }
return new Promise(function (resolve, reject) { // Retry because readFile can fail if the file is being saved to const op = retry.operation(); op.attempt(function () { fs.readFile(file, function (err, contents) { if (op.retry(err)) return; resolve(contents.toString('utf8')); }); }, { minTimeout: 50 }); });
new RedisQueue(config.redis, function (data, err, callback) { if (_.isUndefined(data)) { logger.error("Received empty object", data); logger.error("Possible elements in processing queue", data); return callback(new Error("Empty Object received")); } var operation = retry.operation(config.jms.retry); operation.attempt(function (currentAttempt) { if (config.jms.type == 'QUEUE') { jmsClient.sendMessageToQueueAsync(data, "text", config.jms.staticHeaders, function (err) { if (operation.retry(err)) { logger.error('Retry failed with error:', error, 'Attempt:', currentAttempt); return; } if (err) { logger.error('Retry failed with error:', error, 'Attempt:', currentAttempt); logger.error('Attempting Rollback..'); return new Callback(new Error('Retry failed with error:' + error + ' Attempt:' + currentAttempt)); } logger.info('Sent Message to JMS Queue:' + config.jms.destinationJndiName + ' Message:' + data); // Listen again logger.info('Listening on Channel:' + config.redis.channel); if (!_.isUndefined(config.audit)) { audit(data); } return callback(); }); } else if (config.jms.type == 'TOPIC') { jmsClient.sendMessageToTopicAsync(data, "text", config.jms.staticHeaders, function (err) { if (operation.retry(err)) { logger.error('Retry failed with error:', error, 'Attempt:', currentAttempt); return; } if (err) { logger.error('Retry failed with error:', error, 'Attempt:', currentAttempt); logger.error('Attempting Rollback..'); return new Callback(new Error('Retry failed with error:' + error + ' Attempt:' + currentAttempt)); } logger.info('Sent Message to JMS Topic:' + config.jms.destinationJndiName + ' Message:' + data); // Listen again logger.info('Listening on Channel:' + config.redis.channel); if (!_.isUndefined(config.audit)) { audit(data); } return callback(); }); } else { throw new Error("jms.config.type has to be specified as either TOPIC or QUEUE"); } }); });
return new Promise(function (resolve, reject) { var operation = retry.operation(options); operation.attempt(function () { opFn().then(function (result) { operation.retry(); resolve(result); }).catch(function (err) { if (!operation.retry(err)) { reject(err); } }); }); });
function attempt(cb) { // Tuned to spread 3 attempts over about a minute. // See formula at <https://github.com/tim-kos/node-retry>. var operation = retry.operation(this.config.retry) var client = this operation.attempt(function (currentAttempt) { client.log.info("attempt", "registry request try #"+currentAttempt+ " at "+(new Date()).toLocaleTimeString()) cb(operation) }) }
Client.prototype._request = function(action, options, cb) { var self = this; var operation = retry.operation({retries: 10, factor: 2, minTimeout: 50}); operation.attempt(function(currentAttempt) { self.request.send(action, options, function(err, resp) { if(err && err.retry && operation.retry(err)) return; // check to see if should retry request cb(err, resp); }); }); };
exports.renderWorker = function renderWorker(window, task, done) { const { webContents } = window; let waitOperation = null; const timeoutTimer = setTimeout(() => webContents.emit('timeout'), TIMEOUT * 1000); if (task.waitForText !== false) { waitOperation = retry.operation({ retries: TIMEOUT, factor: 1, minTimeout: 750, maxTimeout: 1000 }); } webContents.once('finished', (type, ...args) => { clearTimeout(timeoutTimer); function renderIt() { validateResult(task.url, type, ...args) // Page loaded successfully .then(() => (task.type === 'pdf' ? renderPDF : renderImage).call(window, task, done)) .catch(ex => done(ex)); } // Delay rendering n seconds if (task.delay > 0) { console.log('delaying pdf generation by %sms', task.delay * 1000); setTimeout(renderIt, task.delay * 1000); // Look for specific string before rendering } else if (task.waitForText) { console.log('delaying pdf generation, waiting for text "%s" to appear', task.waitForText); waitOperation.attempt(() => webContents.findInPage(task.waitForText)); webContents.on('found-in-page', function foundInPage(event, result) { if (result.matches === 0) { waitOperation.retry(new Error('not ready to render')); return; } if (result.finalUpdate) { webContents.stopFindInPage('clearSelection'); webContents.removeListener('found-in-page', foundInPage); renderIt(); } }); } else { renderIt(); } }); webContents.loadURL(task.url, { extraHeaders: DEFAULT_HEADERS }); };
BaseAPIClient.prototype._callTransport = function _callTransport(task, queueCb) { var self = this; var retryOp = retry.operation(self.retryConfig); var retryArgs = { client: self, task: task, queueCb: queueCb, retryOp: retryOp }; retryOp.attempt(function attemptTransportCall() { self.logger('verbose', 'BaseAPIClient _callTransport - Retrying ' + pick(task.args, 'url')); self.transport(task.args, partial(callTransport.handleTransportResponse, retryArgs)); }); this.logger('debug', 'BaseAPIClient _callTransport end'); };
module.exports = function retryMe(fn, options, callback) { if (!callback && typeof options === 'function') { callback = options options = {} } var operation = retry.operation(options) operation.attempt(function() { fn(function(err, result) { if (operation.retry(err)) return callback(err, result, operation.errors()) }) }) }
function rebalance() { if (!self.rebalancing) { deregister(); self.ready = false; self.emit('rebalancing'); self.rebalancing = true; // Nasty hack to retry 3 times to re-balance - TBD fix this var oldTopicPayloads = self.topicPayloads; var operation = retry.operation({ retries: 10, factor: 2, minTimeout: 1 * 100, maxTimeout: 1 * 1000, randomize: true }); operation.attempt(function (currentAttempt) { self.rebalanceAttempt(oldTopicPayloads, function (err) { if (operation.retry(err)) { return; } if (err) { self.rebalancing = false; return self.emit('error', new errors.FailedToRebalanceConsumerError(operation.mainError().toString())); } else { var topicNames = self.topicPayloads.map(function (p) { return p.topic; }); self.client.refreshMetadata(topicNames, function (err) { register(); self.rebalancing = false; if (err) { self.emit('error', err); } else { self.emit('rebalanced'); } }); } }); }); } }
TediousStore.prototype.get = function(sid, fn) { var self=this; var operation=retry.operation(self.retryOptions); operation.attempt(function() { self.pool.acquire(function(err, db) { if (operation.retry(err) || err) { return fn(err); } var r = new tedious.Request( 'SELECT s.' + self.expiresColumnName + ', s.' + self.sessColumnName + ' FROM ' + self.tableName + ' s WHERE s.' + self.sidColumnName + '=@sid AND s.' + self.expiresColumnName + '>=SYSUTCDATETIME()', function(err, rowCount) { debug.sql('Executed SELECT'); db.release(); if (operation.retry(err) || err) { return fn(err); } if (!rowCount || rowCount!==1) { return fn(); } } ); r.on('row', function(columns) { if (!columns || columns.length!==2) return fn(); var expires = columns[0].value; var sess = columns[1].value; if (!expires || !sess) return fn(); var dExpires = new Date(expires).toISOString(); var oSess = JSON.parse(sess); oSess.cookie.expires = dExpires; debug.sql('Returning ', oSess); return fn(null, oSess); }); r.addParameter('sid', tedious.TYPES.VarChar, sid); debugSql(r); db.execSql(r); }); }); };
var upload = function(path, headers, body, callback) { callback = callback || function() {}; pendingUploads++; var operation = retry.operation({ retries: 5, minTimeout: 50, maxTimeout: 1000 }); // add S3-specific headers headers["x-amz-acl"] = "public-read"; headers["x-amz-storage-class"] = "REDUCED_REDUNDANCY"; return operation.attempt(function(currentAttempt) { return request.put({ uri: util.format("http://%s.s3.amazonaws.com%s%s", S3_BUCKET, PATH_PREFIX, path), aws: { key: ACCESS_KEY_ID, secret: SECRET_ACCESS_KEY, bucket: S3_BUCKET }, headers: headers, body: body, timeout: 5000 }, function(err, response, body) { if (operation.retry(err)) { return; } pendingUploads--; if (err) { return callback(operation.mainError()); } if (response.statusCode === 200) { return callback(); } else { return callback(new Error(util.format("%d: %s", response.statusCode, body))); } }); }); };
function attemptRequestMetadata (topics, cb) { var operation = retry.operation({ minTimeout: 200, maxTimeout: 1000 }); operation.attempt(function(currentAttempt) { debug('refresh metadata currentAttempt', currentAttempt); self.loadMetadataForTopics(topics, function (err, resp) { err = err || resp[1].error; if (operation.retry(err)) { return; } if (err) { debug('refresh metadata error', err.message) return cb(err); } self.updateMetadatas(resp); cb(); }); }); }