var getUserFromRedis = function(userId, callback) { Redis.getClient().hgetall(userId, function(err, hash) { if (err) { return callback({'code': 500, 'msg': err}); } else if (!hash) { return callback({'code': 404, 'msg': 'Principal not found in redis'}); } // We don't need to do validation as that has already happened. hash.isGlobalAdmin = (hash.isGlobalAdmin === 'true'); hash.isTenantAdmin = (hash.isTenantAdmin === 'true'); hash.principalId = userId; var user = hashToUser(hash); // Any potential extra fields. if (hash.extra) { try { user.extra = JSON.parse(hash.extra); } catch (err) { log().error({'err': err}, 'Failed to parse the stringified extra object for %s', userId); } } return callback(null, user); }); };
var _updateCachedUser = function(userId, fields, callback) { callback = callback || function(err) { if (err) { log().warn({'err': err}, 'Error updating cached user in Redis'); } }; _transformUserFieldTypes(fields); // Clean out null and undefined values _.each(fields, function(value, key) { if (OaeUtil.isUnspecified(value)) { delete fields[key]; } else { // Ensure we have all strings fields[key] = '' + value; } }); if (_.isEmpty(fields)) { return callback(); } return Redis.getClient().hmset(userId, fields, callback); };
var _pushCountsToRedis = function(callback) { if (_.isEmpty(stats.counts)) { return callback(); } var countHashKey = _getTelemetryCountHashKey(); var multi = Redis.getClient().multi(); // Iterate through each local count and increment the global redis copy by the amount stored locally _.each(stats.counts, function(nameValue, module) { _.each(nameValue, function(value, name) { multi.hincrby(countHashKey, _getTelemetryCountKey(module, name), value); }); }); // Reset the counts in this process tick to avoid losing counts _resetLocalCounts(); multi.exec(function(err) { if (err) { log().error({'err': err}, 'Error pushing local counts to redis'); } return callback(); }); };
strategy.saveAssociation(function(handle, provider, algorithm, secret, expiresIn, done) { var key = util.format('openid:%s:%s', strategyId, handle); var value = JSON.stringify({ 'provider': provider, 'algorithm': algorithm, 'secret': secret }); Redis.getClient().setex(key, expiresIn, value, done); });
const refreshConfiguration = function(config, callback) { callback = callback || function(err) { if (err) { log().error({ err }, 'Error refreshing activities configuration'); } }; config = ActivitySystemConfig.refreshConfiguration(config); log().info({ config }, 'Refreshing activity configuration'); // Configure redis. Either uses the main connection pool, or a dedicated one if config.activity.redis was configured let redisClient = Redis.getClient(); if (config.redis) { redisClient = Redis.createClient(config.redis); } ActivityDAO.init(redisClient); // Reset the collection polling interval clearInterval(collectionPollingTimer); if (config.processActivityJobs && config.collectionPollingFrequency > 0) { const collectionPollingFrequencyInMs = config.collectionPollingFrequency * 1000; // Delegate to the aggregator to collect/aggregate all buckets collectionPollingTimer = setInterval(ActivityAggregator.collectAllBuckets, collectionPollingFrequencyInMs); } // Reset the mail polling interval clearInterval(mailPollingTimer); if (config.processActivityJobs && config.mail.pollingFrequency > 0) { const mailPollingFrequencyInMs = config.mail.pollingFrequency * 1000; // Collect and send email mailPollingTimer = setInterval(ActivityEmail.collectAllBuckets, mailPollingFrequencyInMs); } // Enable / Disable the worker binding, if necessary if (config.processActivityJobs && !boundWorker) { boundWorker = true; // Bind directly to the `routeActivity` router method return TaskQueue.bind( ActivityConstants.mq.TASK_ACTIVITY, ActivityRouter.routeActivity, { subscribe: { prefetchCount: config.maxConcurrentRouters } }, callback ); } if (!config.processActivityJobs && boundWorker) { boundWorker = false; return TaskQueue.unbind(ActivityConstants.mq.TASK_ACTIVITY, callback); } return callback(); };
const hasUserEditedSpreadsheet = function(contentId, userId, callback) { const key = _getEditMappingKey(contentId); const client = Redis.getClient(); client.exists(key, function(err, exists) { if (err) { log().error({ err, contentId, userId }, 'Failed to check whether user has edited Ethercalc spreadsheet'); return callback({ code: 500, msg: 'Failed to check whether user has edited Ethercalc spreadsheet' }); } if (exists) { client.lrange(key, 0, -1, function(err, replies) { if (err) { log().error({ err, contentId, userId }, 'Failed to fetch editors for Ethercalc spreadsheet'); return callback({ code: 500, msg: 'Failed to fetch editors for Ethercalc spreadsheet' }); } if (_.contains(replies, userId)) { // Let's take out the references to this user's edits since we're sending out a notification client.lrem(key, 0, userId, function(err) { if (err) { log().error( { err, contentId, userId }, 'Failed purge cache of user edits to Ethercalc spreadsheet' ); return callback({ code: 500, msg: 'Failed purge cache of user edits to Ethercalc spreadsheet' }); } // This user has edited the document return callback(null, true); }); } // There are edits, but not from this user return callback(null, false); }); } // There are no edits recorded for this document return callback(null, false); }); };
var init = module.exports.init = function(emailSystemConfig, callback) { // Email configuration customEmailTemplatesDir = emailSystemConfig.customEmailTemplatesDir; debug = (emailSystemConfig.debug !== false); deduplicationInterval = emailSystemConfig.deduplicationInterval || (7 * 24 * 60 * 60); emailSystemConfig.throttling = emailSystemConfig.throttling || {}; throttleConfig.count = emailSystemConfig.throttling.count || 10; throttleConfig.timespan = emailSystemConfig.throttling.timespan || 2 * 60; // Create the Redback rate limiter for emails var EmailRedback = redback.use(Redis.getClient(), {'namespace': 'oae-email:redback'}); /*! * For robust unit tests, any provided timespan needs to cover at least 2 buckets so that when * we do a count on the rate, we don't risk rolling over to a new interval and miss the emails * we just sent, resetting the frequency to 0 and intermittently failing the test. Therefore * we set the bucket interval to be (timespan / 2). * * Additionally, when a bucket is incremented in redback, the following 2 buckets are cleared. * Therefore in order to ensure we don't roll over to a new bucket while incrementing and risking * our previous bucket getting cleared, we must ensure we have at least 5 buckets so that the * clearing of the "next 2" buckets does not impact the counting of the "previous 2". (e.g., if * the current time bucket is 2, redback will clear buckets 3 and 4 while we count back from 0, * 1 and 2). */ var bucketInterval = Math.ceil(throttleConfig.timespan / 2); EmailRateLimiter = EmailRedback.createRateLimit('email', { // The rate limiter seems to need at least 5 buckets to work, so lets give it exactly 5 (there are exactly bucket_span / bucket_interval buckets) 'bucket_span': bucketInterval * 5, 'bucket_interval': bucketInterval, 'subject_expiry': throttleConfig.timespan }); // If there was an existing email transport, we close it. if (emailTransport) { emailTransport.close(); emailTransport = null; } // Open an email transport if we're not in debug mode if (!debug) { if (emailSystemConfig.transport === 'SMTP') { log().info({'data': emailSystemConfig.smtpTransport}, 'Configuring SMTP email transport.'); emailTransport = nodemailer.createTransport('SMTP', emailSystemConfig.smtpTransport); } else if (emailSystemConfig.transport === 'sendmail') { log().info({'data': emailSystemConfig.sendmailTransport}, 'Configuring Sendmail email transport.'); emailTransport = nodemailer.createTransport('Sendmail', emailSystemConfig.sendmailTransport.path); } else { return callback({'code': 400, 'msg': 'Misconfigured mail transport'}); } } return refreshTemplates(callback); };
Cassandra.runQuery('SELECT * FROM Principals USING CONSISTENCY QUORUM WHERE principalId = ?', [principalId], function (err, rows) { if (err) { return callback(err); } var principal = getPrincipalFromRow(rows[0]); if (!principal) { return callback({'code': 404, 'msg': 'Couldn\'t find principal: ' + principalId}); } else if (isGroup(principal.id)) { return callback(null, principal); } // Deep-copy the User object *before* we apply the visibility settings. // this allows us to retrieve a full object from redis later. var hash = clone(principal); // Immediately callback, storing something in redis can happen async. callback(null, principal); // Redis takes only strings as hash field values so convert non-strings. // Convert or remove values that shouldn't be in redis. delete hash.isAdmin; hash.isGlobalAdmin = '' + hash.isGlobalAdmin(); hash.isTenantAdmin = '' + hash.isTenantAdmin(hash.tenant); // The picture object is always present. // It's faster to store plain hashes rather than doing JSON.parse/stringify. if (hash.picture.smallUri) { hash.smallPictureUri = hash.picture.smallUri; } if (hash.picture.mediumUri) { hash.mediumPictureUri = hash.picture.mediumUri; } if (hash.picture.largeUri) { hash.largePictureUri = hash.picture.largeUri; } delete hash.picture; // Convert the extra object. if (hash.extra) { try { hash.extra = JSON.stringify(hash.extra); } catch (err) { log().error({'err': err}, 'Failed to stringify the extra object for %s, bypassing the cache.', principalId); // return to avoid caching an incomplete object return; } } // Store it in redis Redis.getClient().hmset(principalId, hash); });
var _getUserFromRedis = function(userId, callback) { Redis.getClient().hgetall(userId, function(err, hash) { if (err) { return callback({'code': 500, 'msg': err}); // Since we also push updates into redis, use the user id as a slug to ensure that the user doesn't exist in // cache by virtue of an upsert } else if (!hash || !hash.principalId) { return callback({'code': 404, 'msg': 'Principal not found in redis'}); } return callback(null, _hashToUser(hash)); }); };
Redis.getClient().del(_getTelemetryCountResetLock(), function(resetErr) { if (resetErr) { log().error({'err': resetErr}, 'Error trying to reset the count reset lock'); } Redis.getClient().del(_getTelemetryCountPublishLock(), function(publishErr) { if (publishErr) { log().error({'err': publishErr}, 'Error trying to reset the telemetry publish lock'); } return callback(resetErr || publishErr); }); });
const setEditedBy = function(contentId, userId, callback) { const key = _getEditMappingKey(contentId); Redis.getClient().rpush(key, userId, function(err) { if (err) { log().error({ err, contentId, userId }, 'Failed to store Ethercalc user edits'); return callback({ code: 500, msg: 'Failed to store Ethercalc user edits' }); } return callback(); }); };
var _getCounts = function(callback) { Redis.getClient().hgetall(_getTelemetryCountHashKey(), function(err, countsHash) { if (err) { log().error({'err': err}, 'Error querying telemetry counts from redis'); return callback(err); } // Redis will return each value as a string, so we need to cast them to integers _.each(countsHash, function(value, key) { countsHash[key] = parseInt(value, 10); }); return callback(null, countsHash); }); };
strategy.loadAssociation(function(handle, done) { var key = util.format('openid:%s:%s', strategyId, handle); Redis.getClient().get(key, function(err, val) { if (err) { return done(err); } else if (!val) { // Data probably expired, now user is sad return done(); } // Parse the cached association data, it is the direct result try { val = JSON.parse(val); return done(null, val.provider, val.algorithm, val.secret); } catch (err) { return done(err); } }); });
_resetGlobalCounts(function() { _resetLocalHistograms(); _resetLocalCounts(); // Also reset the locks Redis.getClient().del(_getTelemetryCountResetLock(), function(resetErr) { if (resetErr) { log().error({'err': resetErr}, 'Error trying to reset the count reset lock'); } Redis.getClient().del(_getTelemetryCountPublishLock(), function(publishErr) { if (publishErr) { log().error({'err': publishErr}, 'Error trying to reset the telemetry publish lock'); } return callback(resetErr || publishErr); }); }); });
var _updateCachedUser = function(userId, fields, callback) { callback = callback || function(err) { if (err) { log().warn({'err': err}, 'Error updating cached user in Redis'); } }; // Clean out null and undefined values _.each(fields, function(value, key) { if (value === null || value === undefined) { delete fields[key]; } else { // Ensure we have all strings fields[key] = '' + value; } }); if (_.isEmpty(fields)) { return callback(); } Redis.getClient().hmset(userId, fields, callback); };
'del': function(id, callback) { var key = util.format('shibboleth:%s:%s', strategyId, id); Redis.getClient().del(key, callback); }
'set': function(id, data, callback) { var key = util.format('shibboleth:%s:%s', strategyId, id); Redis.getClient().setex(key, 5 * 60, data, callback); },
var invalidateCachedUsers = module.exports.invalidateCachedUsers = function(userIds, callback) { Redis.getClient().del(userIds, callback); };
var _resetGlobalCounts = function(callback) { Redis.getClient().del(_getTelemetryCountHashKey(), callback); };