-/*
- The rpcc class handles client-side RPC. Each rpcc is bound to a single RPC
- server. The jobs of rpcc include maintaining a connection to server, sending
- RPC requests and waiting for responses, retransmissions, at-most-once delivery
- etc.
-
- The rpcs class handles the server side of RPC. Each rpcs handles multiple
- connections from different rpcc objects. The jobs of rpcs include accepting
- connections, dispatching requests to registered RPC handlers, at-most-once
- delivery etc.
-
- Both rpcc and rpcs use the connection class as an abstraction for the
- underlying communication channel. To send an RPC request/reply, one calls
- connection::send() which blocks until data is sent or the connection has
- failed (thus the caller can free the buffer when send() returns). When a
- request/reply is received, connection makes a callback into the corresponding
- rpcc or rpcs (see rpcc::got_pdu() and rpcs::got_pdu()).
-
- Thread organization:
- rpcc uses application threads to send RPC requests and blocks to receive the
- reply or error. All connections use a single PollMgr object to perform async
- socket IO. PollMgr creates a single thread to examine the readiness of socket
- file descriptors and informs the corresponding connection whenever a socket is
- ready to be read or written. (We use asynchronous socket IO to reduce the
- number of threads needed to manage these connections; without async IO, at
- least one thread is needed per connection to read data without blocking other
- activities.) Each rpcs object creates one thread for listening on the server
- port and a pool of threads for executing RPC requests. The thread pool allows
- us to control the number of threads spawned at the server (spawning one thread
- per request will hurt when the server faces thousands of requests).
-
- In order to delete a connection object, we must maintain a reference count.
- For rpcc, multiple client threads might be invoking the rpcc::call() functions
- and thus holding multiple references to the underlying connection object. For
- rpcs, multiple dispatch threads might be holding references to the same
- connection object. A connection object is deleted only when the underlying
- connection is dead and the reference count reaches zero.
-
- This version of the RPC library explicitly joins exited threads to make sure
- no outstanding references exist before deleting objects.
-
- To delete a rpcc object safely, the users of the library must ensure that
- there are no outstanding calls on the rpcc object.
-
- To delete a rpcs object safely, we do the following in sequence: 1. stop
- accepting new incoming connections. 2. close existing active connections. 3.
- delete the dispatch thread pool which involves waiting for current active RPC
- handlers to finish. It is interesting how a thread pool can be deleted
- without using thread cancellation. The trick is to inject x "poison pills" for
- a thread pool of x threads. Upon getting a poison pill instead of a normal
- task, a worker thread will exit (and thread pool destructor waits to join all
- x exited worker threads).
- */
+//
+// The rpcc class handles client-side RPC. Each rpcc is bound to a single RPC
+// server. The jobs of rpcc include maintaining a connection to server, sending
+// RPC requests and waiting for responses, retransmissions, at-most-once delivery
+// etc.
+//
+// The rpcs class handles the server side of RPC. Each rpcs handles multiple
+// connections from different rpcc objects. The jobs of rpcs include accepting
+// connections, dispatching requests to registered RPC handlers, at-most-once
+// delivery etc.
+//
+// Both rpcc and rpcs use the connection class as an abstraction for the
+// underlying communication channel. To send an RPC request/reply, one calls
+// connection::send() which blocks until data is sent or the connection has
+// failed (thus the caller can free the buffer when send() returns). When a
+// request/reply is received, connection makes a callback into the corresponding
+// rpcc or rpcs (see rpcc::got_pdu() and rpcs::got_pdu()).
+//
+// Thread organization:
+// rpcc uses application threads to send RPC requests and blocks to receive the
+// reply or error. All connections use a single PollMgr object to perform async
+// socket IO. PollMgr creates a single thread to examine the readiness of socket
+// file descriptors and informs the corresponding connection whenever a socket is
+// ready to be read or written. (We use asynchronous socket IO to reduce the
+// number of threads needed to manage these connections; without async IO, at
+// least one thread is needed per connection to read data without blocking other
+// activities.) Each rpcs object creates one thread for listening on the server
+// port and a pool of threads for executing RPC requests. The thread pool allows
+// us to control the number of threads spawned at the server (spawning one thread
+// per request will hurt when the server faces thousands of requests).
+//
+// In order to delete a connection object, we must maintain a reference count.
+// For rpcc, multiple client threads might be invoking the rpcc::call() functions
+// and thus holding multiple references to the underlying connection object. For
+// rpcs, multiple dispatch threads might be holding references to the same
+// connection object. A connection object is deleted only when the underlying
+// connection is dead and the reference count reaches zero.
+//
+// This version of the RPC library explicitly joins exited threads to make sure
+// no outstanding references exist before deleting objects.
+//
+// To delete a rpcc object safely, the users of the library must ensure that
+// there are no outstanding calls on the rpcc object.
+//
+// To delete a rpcs object safely, we do the following in sequence: 1. stop
+// accepting new incoming connections. 2. close existing active connections. 3.
+// delete the dispatch thread pool which involves waiting for current active RPC
+// handlers to finish. It is interesting how a thread pool can be deleted
+// without using thread cancellation. The trick is to inject x "poison pills" for
+// a thread pool of x threads. Upon getting a poison pill instead of a normal
+// task, a worker thread will exit (and thread pool destructor waits to join all
+// x exited worker threads).
+//
#include "rpc.h"
#include <netinet/tcp.h>
#include <netdb.h>
#include <unistd.h>
+#include <string.h>
inline void set_rand_seed() {
auto now = time_point_cast<nanoseconds>(steady_clock::now());
srandom((uint32_t)now.time_since_epoch().count()^(uint32_t)getpid());
}
-static sockaddr_in make_sockaddr(const string &hostandport);
+static sockaddr_in make_sockaddr(const string & hostandport);
-rpcc::rpcc(const string & d, bool retrans) :
- dst_(make_sockaddr(d)), srv_nonce_(0), bind_done_(false), xid_(1), lossytest_(0),
- retrans_(retrans), reachable_(true), chan_(), destroy_wait_ (false), xid_rep_done_(-1)
+rpcc::rpcc(const string & d) : dst_(make_sockaddr(d))
{
- if(retrans){
- set_rand_seed();
- clt_nonce_ = (unsigned int)random();
- } else {
- // special client nonce 0 means this client does not
- // require at-most-once logic from the server
- // because it uses tcp and never retries a failed connection
- clt_nonce_ = 0;
- }
+ set_rand_seed();
+ clt_nonce_ = (nonce_t)random();
char *loss_env = getenv("RPC_LOSSY");
- if(loss_env)
+ if (loss_env)
lossytest_ = atoi(loss_env);
- // xid starts with 1 and latest received reply starts with 0
- xid_rep_window_.push_back(0);
-
IF_LEVEL(2) LOG("cltn_nonce is " << clt_nonce_ << " lossy " << lossytest_);
}
// are blocked inside rpcc or will use rpcc in the future
rpcc::~rpcc() {
cancel();
- IF_LEVEL(2) LOG("delete nonce " << clt_nonce_ << " channo=" << (chan_?chan_->channo():-1));
- if(chan_)
- chan_->closeconn();
+ IF_LEVEL(2) LOG("delete nonce " << clt_nonce_ << " chan " << (chan_?(int)chan_->fd:-1));
+ chan_.reset();
VERIFY(calls_.size() == 0);
}
int rpcc::bind(milliseconds to) {
- unsigned int r;
- int ret = call_timeout(rpc_const::bind, to, r, 0);
- if(ret == 0){
+ nonce_t r;
+ rpc_protocol::status ret = call_timeout(rpc_protocol::bind, to, r);
+ if (ret == 0) {
lock ml(m_);
bind_done_ = true;
srv_nonce_ = r;
lock ml(m_);
if (calls_.size()) {
LOG("force callers to fail");
- for(auto &p : calls_){
+ for (auto & p : calls_) {
caller *ca = p.second;
IF_LEVEL(2) LOG("force caller to fail");
- {
- lock cl(ca->m);
- ca->done = true;
- ca->intret = rpc_const::cancel_failure;
- ca->c.notify_one();
- }
+
+ lock cl(ca->m);
+ ca->done = true;
+ ca->intret = rpc_protocol::cancel_failure;
+ ca->c.notify_one();
}
- while (calls_.size () > 0){
- destroy_wait_ = true;
+ destroy_wait_ = true;
+ while (calls_.size () > 0)
destroy_wait_c_.wait(ml);
- }
+
LOG("done");
}
}
-int rpcc::call1(proc_t proc, marshall &req, string &rep, milliseconds to) {
+int rpcc::call1(proc_id_t proc, milliseconds to, string & rep, marshall & req) {
caller ca(0, &rep);
- int xid_rep;
+ xid_t xid_rep;
{
lock ml(m_);
- if((proc != rpc_const::bind && !bind_done_) ||
- (proc == rpc_const::bind && bind_done_)){
+ if ((proc != rpc_protocol::bind.id && !bind_done_) || (proc == rpc_protocol::bind.id && bind_done_)) {
IF_LEVEL(1) LOG("rpcc has not been bound to dst or binding twice");
- return rpc_const::bind_failure;
+ return rpc_protocol::bind_failure;
}
- if(destroy_wait_){
- return rpc_const::cancel_failure;
- }
+ if (destroy_wait_)
+ return rpc_protocol::cancel_failure;
ca.xid = xid_++;
calls_[ca.xid] = &ca;
- req.pack_header(request_header{ca.xid, proc, clt_nonce_, srv_nonce_, xid_rep_window_.front()});
+ req.pack_header(rpc_protocol::request_header{
+ ca.xid, proc, clt_nonce_, srv_nonce_, xid_rep_window_.front()
+ });
xid_rep = xid_rep_window_.front();
}
milliseconds curr_to = rpc::to_min;
- auto finaldeadline = steady_clock::now() + to, nextdeadline = finaldeadline;
+ auto finaldeadline = steady_clock::now() + to;
bool transmit = true;
shared_ptr<connection> ch;
while (1) {
- if(transmit) {
- get_refconn(ch);
+ if (transmit) {
+ get_latest_connection(ch);
if (ch) {
if (reachable_) {
request forgot;
transmit = false; // only send once on a given channel
}
- if(finaldeadline == time_point<steady_clock>::min())
- break;
-
- nextdeadline = steady_clock::now() + curr_to;
- if(nextdeadline > finaldeadline) {
- nextdeadline = finaldeadline;
- finaldeadline = time_point<steady_clock>::min();
- }
+ auto nextdeadline = min(steady_clock::now() + curr_to, finaldeadline);
+ curr_to *= 2;
{
lock cal(ca.m);
- while (!ca.done){
+ while (!ca.done) {
IF_LEVEL(2) LOG("wait");
- if(ca.c.wait_until(cal, nextdeadline) == cv_status::timeout){
+ if (ca.c.wait_until(cal, nextdeadline) == cv_status::timeout) {
IF_LEVEL(2) LOG("timeout");
break;
}
}
- if(ca.done){
+ if (ca.done) {
IF_LEVEL(2) LOG("reply received");
break;
}
}
- if(retrans_ && (!ch || ch->isdead())) {
- // since connection is dead, retransmit
- // on the new connection
+ if (nextdeadline >= finaldeadline)
+ break;
+
+ // retransmit on new connection if connection is dead
+ if (!ch || ch->isdead())
transmit = true;
- }
- curr_to *= 2;
}
{
// may need to update the xid again here, in case the
// packet times out before it's even sent by the channel.
// I don't think there's any harm in maybe doing it twice
- update_xid_rep(ca.xid);
+ update_xid_rep(ca.xid, ml);
- if(destroy_wait_){
- destroy_wait_c_.notify_one();
- }
+ if (destroy_wait_)
+ destroy_wait_c_.notify_one();
}
if (ca.done && lossytest_)
ntoh(dst_.sin_port) << " done? " << ca.done << " ret " << ca.intret);
// destruction of req automatically frees its buffer
- return (ca.done? ca.intret : rpc_const::timeout_failure);
+ return (ca.done? ca.intret : rpc_protocol::timeout_failure);
}
-void
-rpcc::get_refconn(shared_ptr<connection> & ch)
-{
+void rpcc::get_latest_connection(shared_ptr<connection> & ch) {
lock ml(chan_m_);
if (!chan_ || chan_->isdead())
- chan_ = connect_to_dst(dst_, this, lossytest_);
+ chan_ = connection::to_dst(dst_, this, lossytest_);
if (chan_)
ch = chan_;
rpcc::got_pdu(const shared_ptr<connection> &, const string & b)
{
unmarshall rep(b, true);
- reply_header h;
+ rpc_protocol::reply_header h;
rep.unpack_header(h);
- if(!rep.ok()){
+ if (!rep.ok()) {
IF_LEVEL(1) LOG("unmarshall header failed!!!");
return true;
}
lock ml(m_);
- update_xid_rep(h.xid);
+ update_xid_rep(h.xid, ml);
- if(calls_.find(h.xid) == calls_.end()){
+ if (calls_.find(h.xid) == calls_.end()) {
IF_LEVEL(2) LOG("xid " << h.xid << " no pending request");
return true;
}
caller *ca = calls_[h.xid];
lock cl(ca->m);
- if(!ca->done){
+ if (!ca->done) {
*ca->rep = b;
ca->intret = h.ret;
- if(ca->intret < 0){
+ if (ca->intret < 0) {
IF_LEVEL(2) LOG("RPC reply error for xid " << h.xid << " intret " << ca->intret);
}
ca->done = 1;
return true;
}
-// assumes thread holds mutex m
-void
-rpcc::update_xid_rep(int xid)
-{
- if(xid <= xid_rep_window_.front()){
+void rpcc::update_xid_rep(xid_t xid, lock & m_lock) {
+ VERIFY(m_lock);
+ if (xid <= xid_rep_window_.front())
return;
- }
- for (auto it = xid_rep_window_.begin(); it != xid_rep_window_.end(); it++){
- if(*it > xid){
+ for (auto it = xid_rep_window_.begin(); it != xid_rep_window_.end(); it++) {
+ if (*it > xid) {
xid_rep_window_.insert(it, xid);
goto compress;
}
compress:
auto it = xid_rep_window_.begin();
- for (it++; it != xid_rep_window_.end(); it++){
+ for (it++; it != xid_rep_window_.end(); it++) {
while (xid_rep_window_.front() + 1 == *it)
xid_rep_window_.pop_front();
}
}
-rpcs::rpcs(in_port_t p1, size_t count)
- : port_(p1), counting_(count), curr_counts_(count), reachable_ (true)
+rpcs::rpcs(in_port_t p1) : port_(p1)
{
set_rand_seed();
- nonce_ = (unsigned int)random();
+ nonce_ = (nonce_t)random();
IF_LEVEL(2) LOG("created with nonce " << nonce_);
- reg(rpc_const::bind, &rpcs::rpcbind, this);
- dispatchpool_ = unique_ptr<ThrPool>(new ThrPool(6, false));
+ reg(rpc_protocol::bind, &rpcs::rpcbind, this);
}
void rpcs::start() {
char *loss_env = getenv("RPC_LOSSY");
- listener_ = unique_ptr<tcpsconn>(new tcpsconn(this, port_, loss_env ? atoi(loss_env) : 0));
+ listener_.reset(new connection_listener(this, port_, loss_env ? atoi(loss_env) : 0));
}
-rpcs::~rpcs()
-{
+rpcs::~rpcs() {
// must delete listener before dispatchpool
listener_ = nullptr;
dispatchpool_ = nullptr;
- free_reply_window();
}
-bool
-rpcs::got_pdu(const shared_ptr<connection> & c, const string & b)
-{
- if(!reachable_){
+bool rpcs::got_pdu(const shared_ptr<connection> & c, const string & b) {
+ if (!reachable_) {
IF_LEVEL(1) LOG("not reachable");
return true;
}
- return dispatchpool_->addJob(bind(&rpcs::dispatch, this, c, b));
-}
-
-void
-rpcs::reg1(proc_t proc, handler *h)
-{
- lock pl(procs_m_);
- VERIFY(procs_.count(proc) == 0);
- procs_[proc] = h;
- VERIFY(procs_.count(proc) >= 1);
-}
-
-void
-rpcs::updatestat(proc_t proc)
-{
- lock cl(count_m_);
- counts_[proc]++;
- curr_counts_--;
- if(curr_counts_ == 0){
- LOG("RPC STATS: ");
- for (auto i = counts_.begin(); i != counts_.end(); i++)
- LOG(hex << i->first << ":" << dec << i->second);
-
- lock rwl(reply_window_m_);
-
- size_t totalrep = 0, maxrep = 0;
- for (auto clt : reply_window_) {
- totalrep += clt.second.size();
- if(clt.second.size() > maxrep)
- maxrep = clt.second.size();
- }
- IF_LEVEL(1) LOG("REPLY WINDOW: clients " << (reply_window_.size()-1) << " total reply " <<
- totalrep << " max per client " << maxrep);
- curr_counts_ = counting_;
- }
+ return dispatchpool_->addJob(std::bind(&rpcs::dispatch, this, c, b));
}
void rpcs::dispatch(shared_ptr<connection> c, const string & buf) {
unmarshall req(buf, true);
- request_header h;
+ rpc_protocol::request_header h;
req.unpack_header(h);
- proc_t proc = h.proc;
+ proc_id_t proc = h.proc;
if (!req.ok()) {
IF_LEVEL(1) LOG("unmarshall header failed");
dec << h.xid_rep << ") from clt " << h.clt_nonce << " for srv instance " << h.srv_nonce);
marshall rep;
- reply_header rh{h.xid,0};
+ rpc_protocol::reply_header rh{h.xid,0};
// is client sending to an old instance of server?
- if(h.srv_nonce != 0 && h.srv_nonce != nonce_){
+ if (h.srv_nonce != 0 && h.srv_nonce != nonce_) {
IF_LEVEL(2) LOG("rpc for an old server instance " << h.srv_nonce <<
" (current " << nonce_ << ") proc " << hex << h.proc);
- rh.ret = rpc_const::oldsrv_failure;
+ rh.ret = rpc_protocol::oldsrv_failure;
rep.pack_header(rh);
c->send(rep);
return;
// is RPC proc a registered procedure?
{
lock pl(procs_m_);
- if(procs_.count(proc) < 1){
+ if (procs_.count(proc) < 1) {
LOG("unknown proc 0x" << hex << proc << " with h.srv_nonce=" << h.srv_nonce << ", my srv_nonce=" << nonce_);
VERIFY(0);
return;
f = procs_[proc];
}
- rpcs::rpcstate_t stat;
- string b1;
-
- if(h.clt_nonce){
- // have i seen this client before?
- {
- lock rwl(reply_window_m_);
- // if we don't know about this clt_nonce, create a cleanup object
- if(reply_window_.find(h.clt_nonce) == reply_window_.end()){
- VERIFY (reply_window_[h.clt_nonce].size() == 0); // create
- reply_window_[h.clt_nonce].push_back(reply_t(-1)); // store starting reply xid
- IF_LEVEL(2) LOG("new client " << h.clt_nonce << " xid " << h.xid <<
- " chan " << c->channo() << ", total clients " << (reply_window_.size()-1));
- }
- }
-
- // save the latest good connection to the client
- {
- lock rwl(conns_m_);
- if (conns_.find(h.clt_nonce) == conns_.end())
- conns_[h.clt_nonce] = c;
- else if(conns_[h.clt_nonce]->create_time() < c->create_time())
- conns_[h.clt_nonce] = c;
+ // have i seen this client before?
+ {
+ lock rwl(reply_window_m_);
+ // if we don't know about this clt_nonce, create a cleanup object
+ if (reply_window_.find(h.clt_nonce) == reply_window_.end()) {
+ VERIFY (reply_window_[h.clt_nonce].size() == 0); // create
+ reply_window_[h.clt_nonce].push_back(reply_t(-1)); // store starting reply xid
+ IF_LEVEL(2) LOG("new client " << h.clt_nonce << " xid " << h.xid <<
+ " chan " << c->fd << ", total clients " << (reply_window_.size()-1));
}
+ }
- stat = checkduplicate_and_update(h.clt_nonce, h.xid, h.xid_rep, b1);
- } else {
- // this client does not require at most once logic
- stat = NEW;
+ // save the latest good connection to the client
+ {
+ lock rwl(conns_m_);
+ if (conns_.find(h.clt_nonce) == conns_.end())
+ conns_[h.clt_nonce] = c;
+ else if (conns_[h.clt_nonce]->create_time < c->create_time)
+ conns_[h.clt_nonce] = c;
}
- switch (stat) {
+ string b1;
+
+ switch (check_duplicate_and_update(h.clt_nonce, h.xid, h.xid_rep, b1)) {
case NEW: // new request
- if (counting_)
- updatestat(proc);
-
- rh.ret = (*f)(req, rep);
- if (rh.ret == rpc_const::unmarshal_args_failure) {
- cerr << "failed to unmarshall the arguments. You are " <<
- "probably calling RPC 0x" << hex << proc << " with the wrong " <<
- "types of arguments." << endl;
+ rh.ret = (*f)(forward<unmarshall>(req), rep);
+ if (rh.ret == rpc_protocol::unmarshall_args_failure) {
+ LOG("failed to unmarshall the arguments. You are " <<
+ "probably calling RPC 0x" << hex << proc << " with the wrong " <<
+ "types of arguments.");
VERIFY(0);
}
VERIFY(rh.ret >= 0);
IF_LEVEL(2) LOG("sending and saving reply of size " << b1.size() << " for rpc " <<
h.xid << ", proc " << hex << proc << " ret " << dec << rh.ret << ", clt " << h.clt_nonce);
- if (h.clt_nonce > 0) {
- // only record replies for clients that require at-most-once logic
- add_reply(h.clt_nonce, h.xid, b1);
- }
+ add_reply(h.clt_nonce, h.xid, b1);
// get the latest connection to the client
{
break;
case FORGOTTEN: // very old request and we don't have the response anymore
IF_LEVEL(2) LOG("very old request " << h.xid << " from " << h.clt_nonce);
- rh.ret = rpc_const::atmostonce_failure;
+ rh.ret = rpc_protocol::atmostonce_failure;
rep.pack_header(rh);
c->send(rep);
break;
// DONE: seen this xid, previous reply returned in b.
// FORGOTTEN: might have seen this xid, but deleted previous reply.
rpcs::rpcstate_t
-rpcs::checkduplicate_and_update(unsigned int clt_nonce, int xid,
- int xid_rep, string & b)
+rpcs::check_duplicate_and_update(nonce_t clt_nonce, xid_t xid,
+ xid_t xid_rep, string & b)
{
lock rwl(reply_window_m_);
- list<reply_t> &l = reply_window_[clt_nonce];
+ list<reply_t> & l = reply_window_[clt_nonce];
VERIFY(l.size() > 0);
VERIFY(xid >= xid_rep);
- int past_xid_rep = l.begin()->xid;
+ xid_t past_xid_rep = l.begin()->xid;
list<reply_t>::iterator start = l.begin(), it = ++start;
// rpcs::dispatch calls add_reply when it is sending a reply to an RPC,
// and passes the return value in b.
// add_reply() should remember b.
-// free_reply_window() and checkduplicate_and_update are responsible for
-// cleaning up the remembered values.
-void rpcs::add_reply(unsigned int clt_nonce, int xid, const string & b) {
+void rpcs::add_reply(nonce_t clt_nonce, xid_t xid, const string & b) {
lock rwl(reply_window_m_);
// remember the RPC reply value
- list<reply_t> &l = reply_window_[clt_nonce];
+ list<reply_t> & l = reply_window_[clt_nonce];
list<reply_t>::iterator it = l.begin();
// skip to our place in the list
for (it++; it != l.end() && it->xid < xid; it++);
// there should already be an entry, so whine if there isn't
if (it == l.end() || it->xid != xid) {
- cerr << "Could not find reply struct in add_reply" << endl;
+ LOG("Could not find reply struct in add_reply");
l.insert(it, reply_t(xid, b));
} else {
*it = reply_t(xid, b);
}
}
-void rpcs::free_reply_window(void) {
- lock rwl(reply_window_m_);
- reply_window_.clear();
-}
-
-int rpcs::rpcbind(unsigned int &r, int) {
+rpc_protocol::status rpcs::rpcbind(nonce_t & r) {
IF_LEVEL(2) LOG("called return nonce " << nonce_);
r = nonce_;
return 0;
}
-static sockaddr_in make_sockaddr(const string &host, const string &port);
-
-static sockaddr_in make_sockaddr(const string &hostandport) {
+static sockaddr_in make_sockaddr(const string & hostandport) {
+ string host = "127.0.0.1";
+ string port = hostandport;
auto colon = hostandport.find(':');
- if (colon == string::npos)
- return make_sockaddr("127.0.0.1", hostandport);
- else
- return make_sockaddr(hostandport.substr(0, colon), hostandport.substr(colon+1));
-}
+ if (colon != string::npos) {
+ host = hostandport.substr(0, colon);
+ port = hostandport.substr(colon+1);
+ }
-static sockaddr_in make_sockaddr(const string &host, const string &port) {
- sockaddr_in dst;
- bzero(&dst, sizeof(dst));
+ sockaddr_in dst{}; // zero initialize
dst.sin_family = AF_INET;
struct in_addr a{inet_addr(host.c_str())};
- if(a.s_addr != INADDR_NONE)
+ if (a.s_addr != INADDR_NONE)
dst.sin_addr.s_addr = a.s_addr;
else {
struct hostent *hp = gethostbyname(host.c_str());
if (!hp || hp->h_length != 4 || hp->h_addrtype != AF_INET) {
- cerr << "cannot find host name " << host << endl;
+ LOG_NONMEMBER("cannot find host name " << host);
exit(1);
}
memcpy(&a, hp->h_addr_list[0], sizeof(in_addr_t));