value: function createConsumer(sub) {
			var _this3 = this;

			var client = new _kafkaNode.Client(this.getConnectionString(), this.getClientId(sub));
			var payloads = _underscore2.default.map(sub.topics, function (topic) {
				return {
					topic: topic
				};
			});

			this.logger.info('[KafkaForwarder] creating consumer');
			var consumer = new _kafkaNode.HighLevelConsumer(client, payloads, {
				autoCommit: true,
				autoCommitIntervalMs: 5000,
				encoding: 'utf8'
			});
			this.logger.info('[KafkaForwarder] created consumer');

			//Handle consumer connection error
			consumer.on("error", function (err) {
				//Waiting for kafka to timeout and clear previous connection
				if (KAFKA_ERROR.isNodeExists(err)) {
					_this3.logger.info('Waiting for kafka to clear previous connection');
					setTimeout(_this3.createConsumer.bind(_this3, sub), 5000);
				} else if (KAFKA_ERROR.isCouldNotFindBroker(err)) {
					//Waiting for KAFKA to spin up (possibly)
					_this3.logger.info('Waiting for kafka to spin up');
					setTimeout(_this3.createConsumer.bind(_this3, sub), 5000);
				}
			});

			consumer.on('message', function (msg) {
				if (!msg.value) {
					return;
				}

				_this3.send(msg.value, sub.host, parseInt(sub.port));
			});

			consumer.on('connect', function () {
				_this3.connections[_this3.getClientId(sub)] = {
					host: sub.host,
					port: sub.unicastport,
					topics: sub.topics,
					consumer: consumer,
					liveStatus: 1, // 0 - unresponsive, 1 - live, 2 - pending check
					subInfo: sub
				};
				_this3.logger.info('Subscribed ' + _this3.getClientId(sub));
			});

			this.logger.info('[KafkaForwarder] Attached all required callbacks to consumer');
		}
		value: function createConsumer(sub, monitoring) {
			var _this4 = this;

			var connStr = this.getConnectionString(monitoring);

			this.logger.info('[KafkaPuller] Creating consumer for ' + connStr + ', ' + sub.topics.join(' ') + ' => ' + sub.port);
			var defer = _q2.default.defer();
			var client = new _kafkaNode.Client(connStr, _nodeUuid2.default.v4());
			var FIFO = new _dequeue2.default();

			var payloads = _underscore2.default.map(sub.topics, function (topic) {
				return { topic: topic };
			});

			var consumer = new _kafkaNode.HighLevelConsumer(client, payloads, {
				groupId: this.getClientId(),
				id: this.getClientId(),
				autoCommit: true,
				autoCommitIntervalMs: 5000,
				encoding: 'utf8'
			});

			this.logger.info('[KafkaPuller] created consumer');

			//Handle consumer connection error
			consumer.on('error', function (err) {
				_this4.logger.warn('[KafkaPuller] Consumer error ' + JSON.stringify(err));
				defer.reject(err);
			});

			consumer.on('rebalancing', function () {
				_this4.logger.info('[KafkaPuller] Rebalancing consumer');
			});

			client.on('ready', function () {
				_this4.logger.info('[KafkaPuller] Client is ready');
			});

			client.on('brokersChanged', function () {
				_this4.logger.info('[KafkaPuller] brokers changed');
			});

			defer.resolve({
				consumer: consumer,
				FIFO: FIFO,
				port: parseInt(sub.port)
			});

			return defer.promise;
		}
Example #3
0
reset_kafka = function () {
    var kafka_client = new kafka.Client(zk_nodes,
                                    'sentiment-analysis',
                                    { 'retries': 10 });
    var on_error = function (error) {
        console.log("Got kafka error:", error);
        if (kafka_consumer) {
            try {
                kafka_consumer.removeAllListeners();
                kafka_consumer.close();
            } catch (e) {
                console.log("Could not close kafka consumer:", e);
            }
        }
        try {
            kafka_client.removeAllListeners();
            kafka_client.close();
        } catch (e) {
            console.log("Could not close kafka client:", e);
        }
        reset_kafka();
    };
    kafka_client.on('error', on_error);

    kafka_consumer = new kafka.HighLevelConsumer(kafka_client, kafkaQueues);
    kafka_consumer.on('message', function (message) {
        console.log("Got message from kafka:", message)
        if (queue.length >= queue_size) {
            clearTimeout(dump_timeout);
            dump_queue();
        }
        try {
            queue.push(JSON.parse(message['value']));
        }
        catch (e) {
            console.log("ERROR parsing JSON:", e)
        }
    });

    kafka_consumer.on('error', on_error);
    kafka_consumer.on('offsetOutOfRange', function (err) {
        // no idea how this happens, but just start over if it does
        // I can't find a way to get the max offset
        console.log('Resetting offset for', err['topic'], 'partition:', err['partition']);
        kafka_consumer.setOffset(err['topic'], err['partition'], 0);
    });
}
Example #4
0
module.exports = function(client, topic, partitions, message, callback) {
	var consumer_payloads = [{topic: topic, offset: -1}],
	    consumer_options = {groupId: 'healthcheck', fromOffset: false};

	var received = 0,
	    target_bitmap = Math.pow(2, partitions) - 1,
	    callback_guard = false;

	var consumer = new kafka.HighLevelConsumer(client, consumer_payloads, consumer_options);
	consumer.on("message", function (consumer_message) {
		debug(consumer_message);

		if (consumer_message.value == message) {
			debug('consumed test message');
			received |= Math.pow(2, consumer_message.partition);

			if (received == target_bitmap) {
				async.series([
					function (cb) {
						consumer.commit(true, cb);
					},
					function (cb) {
						consumer.close(true, cb);
					},
					function (cb) {
						client.close(cb);
					}
				], function (err, results) {
					if (!callback_guard) {
						callback_guard = true;
						callback(err);
					}
				});
			}
		}
	});
	consumer.on("error", function (err) {
		debug("error", err);
		if (!callback_guard) {
			callback_guard = true;
			callback(err);
		}
	});
};
Example #5
0
    process.on('SIGINT', function() {
        log('Caught interrupt signal');
        kafkaConsumer.close(true, function() {
          kafkaClient.close(function() {
            process.exit();
          });
        });

        process.exit();
    });
Example #6
0
  return function(serverPort, zookeeperConnect, topic) {
    // default topic is 'rcstream'
    topic = topic || 'rcstream'

    var MAX_SUBSCRIPTIONS = 10;
    var server = require('http').createServer();
    var namespace = require('socket.io')(server).of('/rc');
    var kafka = require('kafka-node');


    var log = debug('rcstream:main');
    var debugKafka = debug('rcstream:kafka');
    var subscriptionStore = {};

    server.listen(serverPort);
    log('Listening on port %d', serverPort);
    setTimeout(function() {
        log('Waiting 2 seconds for websocket clients to reconnect');
    }, 2000);
    log('Connecting to zookeeper at %s', zookeeperConnect);

    // Set up Kafka Consumer
    // TODO: Wait for websocket clients to reconnect, as consumption
    // will happen faster, and clients may miss messages.
    var kafkaClient = new kafka.Client(zookeeperConnect, 'rcstream')
    var kafkaConsumer = new kafka.HighLevelConsumer(
      kafkaClient,
      [
          { topic: topic }
      ],
      {
        autoCommit: true,
        autoCommitIntervalMs: 1000,
        groupId: 'rcstream',
      }
    );


    kafkaConsumer.on('error', function() {
      log('Kafka error %o', arguments);
      process.exit(1);
    });

    // Consume message from kafka and send to ws client
    kafkaConsumer.on('message', function(message) {
      var change, wiki, id, subscriptions;
      try {
        change = JSON.parse(message.value);
      } catch (e) {
        log('Failed to decode: %o', message.value);
        return;
      }
      debugKafka('message %o', arguments);

      wiki = change.server_name;

      // Send this message to socket.io clients that want it.
      for (id in namespace.connected) {
        subscriptions = subscriptionStore[id];
        if (subscriptions && matchAny(wiki, subscriptions)) {
          namespace.connected[id].emit('change', change);
        }
      }
    });

    // Provide a map with server metrics
    server.on('request', function(req, resp) {
      if (req.url === '/rcstream_status') {
        resp.writeHead(200, { 'Content-Type': 'application/json' });
        resp.write(JSON.stringify({
          connected_clients: Object.keys(namespace.connected).length,
        }, null, 2));
        resp.end();
        return;
      }
    });

    // Handle socket.io clients
    namespace.on('connection', function(socket) {
      var subscriptions = subscriptionStore[socket.id] = [];
      socket.on('subscribe', function(wikis) {
        var i, wiki;
        if (!Array.isArray(wikis)) {
          wikis = [wikis];
        }
        i = wikis.length;
        while (i--) {
          wiki = wikis[i];
          if (typeof wiki !== 'string') {
            continue;
          }
          if (subscriptions.indexOf(wiki) !== -1) {
            continue;
          }
          if (subscriptions.length >= MAX_SUBSCRIPTIONS) {
            socket.error('subscribe_error');
            return;
          }
          subscriptions.push(wiki);
          keySort(subscriptions, subSortKey);
        }
      })
      .on('unsubscribe', function(wikis) {
        var i, idx;
        if (!Array.isArray(wikis)) {
          wikis = [wikis];
        }
        i = wikis.length;
        while (i--) {
          idx = subscriptions.indexOf(wikis[i]);
          if (idx !== -1) {
            // Remove from list
            subscriptions.splice(idx, 1);
          }
        }
      })
      .on('disconnect', function() {
        // Clean up
        delete subscriptionStore[socket.id];
      });
    });


    // TODO: This doesn't seem to work.
    // I want Zookeeper to know that this consumer
    // has shut down properly.

    // Handle SIGINT to close Kafka consumers nicely
    process.on('SIGINT', function() {
        log('Caught interrupt signal');
        kafkaConsumer.close(true, function() {
          kafkaClient.close(function() {
            process.exit();
          });
        });

        process.exit();
    });
  };
Example #7
0
 kafka_consumer.on('offsetOutOfRange', function (err) {
     // no idea how this happens, but just start over if it does
     // I can't find a way to get the max offset
     console.log('Resetting offset for', err['topic'], 'partition:', err['partition']);
     kafka_consumer.setOffset(err['topic'], err['partition'], 0);
 });
Example #8
0
					function (cb) {
						consumer.close(true, cb);
					},
Example #9
0
					function (cb) {
						consumer.commit(true, cb);
					},
'use strict';

var assert = require('assert');
var kafka = require('kafka-node');
var client = new kafka.Client('localhost:2181');
var topic = 'loopback-connector-kafka' + Math.random();
var consumer = new kafka.HighLevelConsumer(client, [ { topic: topic } ], {});
var producer = new kafka.HighLevelProducer(client);
var connector = require('..');
var settings = {
    connectionString: 'localhost:2181'
}
var dataSource = { settings: settings };
connector.initialize(dataSource);

consumer.on('message', function (message) {
    assert.equal(message.value, 'Hallo');
    process.exit();
});

consumer.on('error', function (error) {
    console.log('Consumer error: ', error);
})

producer.createTopics([ topic ], function (err) {
    if (err) {
        console.log('Got error when create topic:', err);
        return;
    }

    setTimeout(function () {