Example #1
0
const migrateS3Offloaded = (client, txid, tableModel) => go(function*() {
  const config = configHolder.getConfig();
  const s3 = createS3();
  const bucket = config.get('s3_offload.bucket');
  const threshold = config.get('s3_offload.threshold');
  const path = config.get('s3_offload.path');
  const keyPrefix = config.get('s3_offload.key_prefix');
  const table = tableModel.table;
  const geomColumn = tableModel.columns.find(col => name(col) === 'geom');
  assert(geomColumn);
  // generate blobrefs, which identifies the objects in S3.
  yield client.query(
    `UPDATE ${table} SET geom_blobref = $2 || uuid_generate_v4()
     WHERE char_length(st_asgeojson(geom)::text) > $1
     `, [threshold, keyPrefix]);

  // update blobref table, so we can look up a blobref
  yield client.query(`INSERT INTO blobref(blobid, txid, entity, columnName) 
(SELECT geom_blobref, $1, $2, $3 
FROM ${table} where geom_blobref is not null)`,
    [txid, tableModel.entity, 'geom']);

  // get the geometries from the db so we can upload them to S3. For now, we keep simple and do not
  // do this in a streaming fashion, as stuff fits acceptably into memory.
  const geometries = yield client.queryRows(`
   SELECT geom_blobref as blob_id, st_asgeojson(geom) as geom_json
   FROM ${table} where  geom_blobref is not null`);

  // upload the geometries
  for(let {blob_id, geom_json} of geometries) {
    yield uploadToS3(s3, bucket, path, blob_id, geom_json);
  }

  // Deleting local copies of the geometry is not done until AFTER applying changes to main table,
  // so that the geometry is copied correctly to main table.
  yield initChangeTable(client, txid, tableModel);
  yield client.query(`update  ${table}_changes set geom = null where geom_blobref is not null`)
});
Example #2
0
process.once('message', msg => {
  configHolder.initialize(configSchema, [], JSON.parse(msg));
  logger.initialize(configHolder.getConfig().get('logging'));
  const config = configHolder.getConfig();
  const errorMessages = require('../haproxy/errorMessages');
  const proddb = require('../psql/proddb');
  const { databaseQueryLimiter, databaseConnectionLimiter } = require('../psql/requestLimiter');
  const databasePools = require('@dawadk/common/src/postgres/database-pools');

  const dawaPgApi      = require('../dawaPgApi');
  const documentation = require('../documentation');
  require('../apiSpecification/allSpecs');
  const isalive = require('../isalive/isalive-worker')

  const dboptions = {
    max: config.get('pg.pool.max'),
    idleTimeoutMillis: config.get('pg.pool.idle_timeout_millis'),
    maxWaitingClients: config.get('pg.pool.max_waiting_clients'),
    acquireTimeoutMillis: config.get('pg.pool.acquire_timeout_millis'),
    statementTimeoutMillis: config.get('pg.pool.statement_timeout_millis'),
    connString: config.get('database_url'),
    pooled: true,
    databaseQueryLimiter,
    databaseConnectionLimiter
  };
  proddb.init(dboptions);
  databasePools.create('prod', dboptions);

  const app = express();

  function socketTimeoutMiddleware(timeoutMillis) {
    return function(req, res, next) {
      res.socket.setTimeout(timeoutMillis);
      res.socket.setKeepAlive(true, 1000);
      next();
    };
  }


  app.use(socketTimeoutMiddleware(config.get('socket_timeout_millis')));

// Hackish: We reduce memlevel to prevent zLib from caching too much internally
// Otherwise, it will take too long for our application to start responding to JSON requests,
// potentially resulting in a TCP disconnection.
  app.use(compression( {
    memLevel: 3
  }));
  app.use(express.static(path.join(__dirname, '../dist'), {maxAge: 10000}));


  const listenPort = config.get('port');

  app.use('', dawaPgApi.setupRoutes());
  app.use('', documentation);

  app.get('/error/:error', function(req, res) {
    const error = req.params.error;
    if(Object.keys(errorMessages).includes(error)) {
      res.status(errorMessages[error].status);
      res.set('Content-Type', 'application/json');
      res.send(errorMessages[error].content);
    }
    else {
      res.sendStatus(404);
    }
  });

  const server = http.createServer(app);
  isalive.setup(server);
  server.listen(listenPort);
  logger.info("startup", "Express server listening for connections", {listenPort: listenPort, mode: app.settings.env});
});