所以我正在研究跨多個進程運行socket.io。Socket.io - 使用多個節點
該指南在這裏:https://socket.io/docs/using-multiple-nodes/給我留下了一些問題。
它提到使用配置nginx來在socket.io進程之間進行負載平衡,但它也提到了在Node.js中使用內置的集羣模塊。
我應該使用nginx和Node.js中的集羣模塊嗎?
另外如何判斷負載平衡是否正常工作?
我使用nginx選項測試了它,其中兩個socket.io進程使用redis適配器運行並使用集羣模塊。
這是我在我的nginx的配置有:
http {
upstream io_nodes {
ip_hash;
server 127.0.0.1:6001;
server 127.0.0.1:6002;
}
server {
listen 3000;
server_name example.com;
location/{
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Host $host;
proxy_http_version 1.1;
proxy_pass http://io_nodes;
}
}
這是我socket.io代碼的示例(其中大部分是採取從這裏:https://github.com/elad/node-cluster-socket.io):
var express = require('express'),
cluster = require('cluster'),
net = require('net'),
redis = require('redis'),
sio = require('socket.io'),
sio_redis = require('socket.io-redis');
var port = 6001,
num_processes = require('os').cpus().length;
if (cluster.isMaster) {
console.log('is master 6001');
// This stores our workers. We need to keep them to be able to reference
// them based on source IP address. It's also useful for auto-restart,
// for example.
var workers = [];
// Helper function for spawning worker at index 'i'.
var spawn = function(i) {
workers[i] = cluster.fork();
// Optional: Restart worker on exit
workers[i].on('exit', function(code, signal) {
console.log('respawning worker', i);
spawn(i);
});
};
// Spawn workers.
for (var i = 0; i < num_processes; i++) {
spawn(i);
}
// Helper function for getting a worker index based on IP address.
// This is a hot path so it should be really fast. The way it works
// is by converting the IP address to a number by removing non numeric
// characters, then compressing it to the number of slots we have.
//
// Compared against "real" hashing (from the sticky-session code) and
// "real" IP number conversion, this function is on par in terms of
// worker index distribution only much faster.
var worker_index = function(ip, len) {
var s = '';
for (var i = 0, _len = ip.length; i < _len; i++) {
if (!isNaN(ip[i])) {
s += ip[i];
}
}
return Number(s) % len;
};
// Create the outside facing server listening on our port.
var server = net.createServer({ pauseOnConnect: true }, function(connection) {
// We received a connection and need to pass it to the appropriate
// worker. Get the worker for this connection's source IP and pass
// it the connection.
var worker = workers[worker_index(connection.remoteAddress, num_processes)];
worker.send('sticky-session:connection', connection);
}).listen(port);
} else {
// Note we don't use a port here because the master listens on it for us.
var app = new express();
// Here you might use middleware, attach routes, etc.
// Don't expose our internal server to the outside.
var server = app.listen(0, 'localhost'),
io = sio(server);
// Tell Socket.IO to use the redis adapter. By default, the redis
// server is assumed to be on localhost:6379. You don't have to
// specify them explicitly unless you want to change them.
io.adapter(sio_redis({ host: 'localhost', port: 6379 }));
// Here you might use Socket.IO middleware for authorization etc.
io.on('connection', function(socket) {
console.log('port 6001');
console.log(socket.id);
});
// Listen to messages sent from the master. Ignore everything else.
process.on('message', function(message, connection) {
if (message !== 'sticky-session:connection') {
return;
}
// Emulate a connection event on the server by emitting the
// event with the connection the master sent us.
server.emit('connection', connection);
connection.resume();
});
}
連接雖然我在本地測試所有這些,但是工作得很好..
如何知道它是否正常工作?每次客戶端連接時,它都會連接到端口6001
上的socket.io進程。
客戶端連接代碼連接到端口3000
。
如果您的所有服務器進程都在一臺計算機上,則可以在沒有NGINX的情況下使用羣集模塊。如果你使用多臺計算機,那麼你需要像NGINX這樣的網絡結構來在不同的服務器之間進行負載平衡。而且,您可以一起使用(多臺服務器通過NGINX和每臺服務器上運行集羣的服務器進行負載平衡)。這裏的關鍵是node.js集羣只能在同一主機上的不同進程間傳播負載。 – jfriend00
這很有道理,謝謝。我只是被這一切困惑。 –