Cassandra.runQuery(q.query, q.parameters, err => { if (err) { return callback(err); } // This event is not strictly necessary as it will be emitted by our PubSub publisher // as well. We emit it before we return to the caller so our unit tests can keep track // of config updates BEFORE the REST request completes. If we didn't there would be a // short period between the REST request returning and the config re-caching where the // config would be out-of-date. The length of this period is determined by how fast Redis // can broadcast the pubsub messages. When the system is under load and suffering from IO // starvation (such as during unit tests) this could lead to intermittent test failures. // The downside of emitting this event here is that it will lead to authentication strategies // and config elements being recached twice TenantsAPI.emit('created', tenant); // Indicate that a caching operation is pending TenantsAPI.emit('preCache'); // Send a message to all the app servers in the cluster notifying them that the tenant should be started Pubsub.publish('oae-tenants', 'created ' + tenant.alias, err => { if (err) { return callback(err); } return callback(null, tenant); }); });
_updateCachedTenant(alias, err => { if (err) { log().error({ err, cmd, alias }, 'An error occurred while refreshing a tenant after update'); } TenantsAPI.emit(cmd, getTenant(alias)); });
Cassandra.runAutoPagedQuery(queryAllTenants, false, (err, rows) => { if (err) { return callback(err); } // Reset the previously cached tenants tenants = {}; tenantsByHost = {}; tenantsNotInteractable = {}; // Build the email domain index for all tenants tenantEmailDomainIndex = new TenantEmailDomainIndex(); // Create a dummy tenant object that can serve as the global admin tenant object globalTenant = new Tenant(serverConfig.globalAdminAlias, 'Global admin server', serverConfig.globalAdminHost, { isGlobalAdminServer: true }); // Cache it as part of the available tenants tenants[globalTenant.alias] = globalTenant; tenantsByHost[globalTenant.host] = globalTenant; tenantsNotInteractable[globalTenant.alias] = globalTenant; _.chain(rows) .map(Cassandra.rowToHash) .map(hash => { return _storageHashToTenant(hash.alias, hash); }) .each(tenant => { // Cache all tenants tenants[tenant.alias] = tenant; tenantsByHost[tenant.host] = tenant; // Insert the tenant into the email domain index _.each(tenant.emailDomains, emailDomain => { tenantEmailDomainIndex.update(tenant.alias, emailDomain); }); // Keep a cache of all tenants that are private and disabled so we know which ones // cannot be interacted with if (!tenant.active || tenant.deleted || TenantsUtil.isPrivate(tenant.alias)) { tenantsNotInteractable[tenant.alias] = tenant; } }) .value(); // Cache the sorted list tenantsSorted = _.sortBy(tenants, 'alias'); // Build the search index for all tenants tenantSearchIndex = new TenantIndex(tenants); // Indicate that all tenants have been cached TenantsAPI.emit('cached'); return callback(null, tenants); });
UserAPI.deleteOrRestoreUsersByTenancy(ctx, eachAlias, disabled, err => { if (err) { transformed(err); } else { // Indicate that a caching operation is pending TenantsAPI.emit('preCache'); // Broadcast an event around the cluster to start or stop a tenant Pubsub.publish('oae-tenants', cmd + ' ' + eachAlias, transformed); } });
Cassandra.runQuery(q.query, q.parameters, err => { if (err) { return callback(err); } // Indicate that a caching operation is pending TenantsAPI.emit('preCache'); // Emit a cluster-wide event to let the app servers re-cache the tenant's metadata Pubsub.publish('oae-tenants', 'refresh ' + alias, callback); });
Cassandra.runQuery('SELECT * FROM "Tenant" WHERE "alias" = ?', [tenantAlias], (err, rows) => { if (err) { return callback(err); } if (_.isEmpty(rows)) { TenantsAPI.emit('cached'); return callback(); } const hash = Cassandra.rowToHash(rows[0]); const tenant = _storageHashToTenant(tenantAlias, hash); // Remove the old tenant from the tenantsByHost cache if it previously existed. If the host // was updated, we want to make sure it gets removed here const oldTenant = tenants[tenantAlias]; if (oldTenant) { delete tenantsByHost[oldTenant.host]; // Remove the old email domains _.each(oldTenant.emailDomains, emailDomain => { tenantEmailDomainIndex.delete(emailDomain); }); } // Re-cache the tenant we pulled from storage tenants[tenant.alias] = tenant; tenantsByHost[tenant.host] = tenant; // Update the tenant in the email domain index _.each(tenant.emailDomains, emailDomain => { const conflictingTenantAlias = tenantEmailDomainIndex.update(tenantAlias, emailDomain); if (conflictingTenantAlias) { log().warn( { tenant, oldTenant, conflictingTenantAlias }, 'Failed to update tenant in the email domain index due to a conflicting domain' ); } }); // Synchronize the cache of all tenants that are private and disabled so we know which ones // cannot be interacted with if (tenant.isGlobalAdminServer || !tenant.active || tenant.deleted || TenantsUtil.isPrivate(tenant.alias)) { tenantsNotInteractable[tenant.alias] = tenant; } else { delete tenantsNotInteractable[tenant.alias]; } // Insert at the correct location in the sorted list let index = _.findIndex(tenantsSorted, tenant => { return tenant.alias === tenantAlias; }); if (index === -1) { index = _.sortedIndex(tenantsSorted, tenant, 'alias'); tenantsSorted.splice(index, 0, tenant); } else { tenantsSorted[index] = tenant; } // Update the tenant in the search index tenantSearchIndex.update(tenant); // Indicate that all tenants have been cached TenantsAPI.emit('cached'); return callback(null, tenants); });
redisSubscriber.on('pmessage', (pattern, channel, message) => { emitter.emit(channel, message); });
checkFiles(paths, time, () => { Cleaner.emit('cleaned', directory); });