X-Git-Url: http://xvm.mit.edu/gitweb/invirt/third/libt4.git/blobdiff_plain/46fb2b4bbe3a0a8516ab04cfafa895a882c70f86..f0dcb6b97d6d40f67698d1f71ac26970f1776f82:/rpc/rpc.cc?ds=sidebyside diff --git a/rpc/rpc.cc b/rpc/rpc.cc index 90d9608..7937785 100644 --- a/rpc/rpc.cc +++ b/rpc/rpc.cc @@ -1,121 +1,102 @@ -/* - The rpcc class handles client-side RPC. Each rpcc is bound to a - single RPC server. The jobs of rpcc include maintaining a connection to - server, sending RPC requests and waiting for responses, retransmissions, - at-most-once delivery etc. - - The rpcs class handles the server side of RPC. Each rpcs handles multiple - connections from different rpcc objects. The jobs of rpcs include accepting - connections, dispatching requests to registered RPC handlers, at-most-once - delivery etc. - - Both rpcc and rpcs use the connection class as an abstraction for the - underlying communication channel. To send an RPC request/reply, one calls - connection::send() which blocks until data is sent or the connection has failed - (thus the caller can free the buffer when send() returns). When a - request/reply is received, connection makes a callback into the corresponding - rpcc or rpcs (see rpcc::got_pdu() and rpcs::got_pdu()). - - Thread organization: - rpcc uses application threads to send RPC requests and blocks to receive the - reply or error. All connections use a single PollMgr object to perform async - socket IO. PollMgr creates a single thread to examine the readiness of socket - file descriptors and informs the corresponding connection whenever a socket is - ready to be read or written. (We use asynchronous socket IO to reduce the - number of threads needed to manage these connections; without async IO, at - least one thread is needed per connection to read data without blocking other - activities.) Each rpcs object creates one thread for listening on the server - port and a pool of threads for executing RPC requests. The - thread pool allows us to control the number of threads spawned at the server - (spawning one thread per request will hurt when the server faces thousands of - requests). - - In order to delete a connection object, we must maintain a reference count. - For rpcc, - multiple client threads might be invoking the rpcc::call() functions and thus - holding multiple references to the underlying connection object. For rpcs, - multiple dispatch threads might be holding references to the same connection - object. A connection object is deleted only when the underlying connection is - dead and the reference count reaches zero. - - This version of the RPC library explicitly joins exited threads to make sure - no outstanding references exist before deleting objects. - - To delete a rpcc object safely, the users of the library must ensure that - there are no outstanding calls on the rpcc object. - - To delete a rpcs object safely, we do the following in sequence: 1. stop - accepting new incoming connections. 2. close existing active connections. - 3. delete the dispatch thread pool which involves waiting for current active - RPC handlers to finish. It is interesting how a thread pool can be deleted - without using thread cancellation. The trick is to inject x "poison pills" for - a thread pool of x threads. Upon getting a poison pill instead of a normal - task, a worker thread will exit (and thread pool destructor waits to join all - x exited worker threads). - */ - -#include "types.h" +// +// The rpcc class handles client-side RPC. Each rpcc is bound to a single RPC +// server. The jobs of rpcc include maintaining a connection to server, sending +// RPC requests and waiting for responses, retransmissions, at-most-once delivery +// etc. +// +// The rpcs class handles the server side of RPC. Each rpcs handles multiple +// connections from different rpcc objects. The jobs of rpcs include accepting +// connections, dispatching requests to registered RPC handlers, at-most-once +// delivery etc. +// +// Both rpcc and rpcs use the connection class as an abstraction for the +// underlying communication channel. To send an RPC request/reply, one calls +// connection::send() which blocks until data is sent or the connection has +// failed (thus the caller can free the buffer when send() returns). When a +// request/reply is received, connection makes a callback into the corresponding +// rpcc or rpcs (see rpcc::got_pdu() and rpcs::got_pdu()). +// +// Thread organization: +// rpcc uses application threads to send RPC requests and blocks to receive the +// reply or error. All connections use a single PollMgr object to perform async +// socket IO. PollMgr creates a single thread to examine the readiness of socket +// file descriptors and informs the corresponding connection whenever a socket is +// ready to be read or written. (We use asynchronous socket IO to reduce the +// number of threads needed to manage these connections; without async IO, at +// least one thread is needed per connection to read data without blocking other +// activities.) Each rpcs object creates one thread for listening on the server +// port and a pool of threads for executing RPC requests. The thread pool allows +// us to control the number of threads spawned at the server (spawning one thread +// per request will hurt when the server faces thousands of requests). +// +// In order to delete a connection object, we must maintain a reference count. +// For rpcc, multiple client threads might be invoking the rpcc::call() functions +// and thus holding multiple references to the underlying connection object. For +// rpcs, multiple dispatch threads might be holding references to the same +// connection object. A connection object is deleted only when the underlying +// connection is dead and the reference count reaches zero. +// +// This version of the RPC library explicitly joins exited threads to make sure +// no outstanding references exist before deleting objects. +// +// To delete a rpcc object safely, the users of the library must ensure that +// there are no outstanding calls on the rpcc object. +// +// To delete a rpcs object safely, we do the following in sequence: 1. stop +// accepting new incoming connections. 2. close existing active connections. 3. +// delete the dispatch thread pool which involves waiting for current active RPC +// handlers to finish. It is interesting how a thread pool can be deleted +// without using thread cancellation. The trick is to inject x "poison pills" for +// a thread pool of x threads. Upon getting a poison pill instead of a normal +// task, a worker thread will exit (and thread pool destructor waits to join all +// x exited worker threads). +// + #include "rpc.h" -#include #include #include #include #include - -const rpcc::TO rpcc::to_max = { 120000 }; -const rpcc::TO rpcc::to_min = { 1000 }; +#include inline void set_rand_seed() { auto now = time_point_cast(steady_clock::now()); srandom((uint32_t)now.time_since_epoch().count()^(uint32_t)getpid()); } -rpcc::rpcc(const string & d, bool retrans) : - dst_(make_sockaddr(d)), srv_nonce_(0), bind_done_(false), xid_(1), lossytest_(0), - retrans_(retrans), reachable_(true), chan_(NULL), destroy_wait_ (false), xid_rep_done_(-1) +static sockaddr_in make_sockaddr(const string & hostandport); + +rpcc::rpcc(const string & d) : dst_(make_sockaddr(d)) { - if(retrans){ - set_rand_seed(); - clt_nonce_ = (unsigned int)random(); - } else { - // special client nonce 0 means this client does not - // require at-most-once logic from the server - // because it uses tcp and never retries a failed connection - clt_nonce_ = 0; - } + set_rand_seed(); + clt_nonce_ = (nonce_t)random(); char *loss_env = getenv("RPC_LOSSY"); - if(loss_env != NULL){ + if (loss_env) lossytest_ = atoi(loss_env); - } - - // xid starts with 1 and latest received reply starts with 0 - xid_rep_window_.push_back(0); - IF_LEVEL(2) LOG("rpcc::rpcc cltn_nonce is " << clt_nonce_ << " lossy " << lossytest_); + IF_LEVEL(2) LOG("cltn_nonce is " << clt_nonce_ << " lossy " << lossytest_); } // IMPORTANT: destruction should happen only when no external threads // are blocked inside rpcc or will use rpcc in the future rpcc::~rpcc() { - IF_LEVEL(2) LOG("rpcc::~rpcc delete nonce " << clt_nonce_ << " channo=" << (chan_?chan_->channo():-1)); - if(chan_){ - chan_->closeconn(); - chan_->decref(); - } + cancel(); + IF_LEVEL(2) LOG("delete nonce " << clt_nonce_ << " chan " << (chan_?(int)chan_->fd:-1)); + chan_.reset(); VERIFY(calls_.size() == 0); } -int rpcc::bind(TO to) { - unsigned int r; - int ret = call_timeout(rpc_const::bind, to, r, 0); - if(ret == 0){ +int rpcc::bind(milliseconds to) { + nonce_t r; + rpc_protocol::status ret = call_timeout(rpc_protocol::bind, to, r); + if (ret == 0) { lock ml(m_); bind_done_ = true; srv_nonce_ = r; } else { - IF_LEVEL(2) LOG("rpcc::bind " << inet_ntoa(dst_.sin_addr) << " failed " << ret); + IF_LEVEL(2) LOG("bind " << inet_ntoa(dst_.sin_addr) << " failed " << ret); } return ret; }; @@ -123,64 +104,62 @@ int rpcc::bind(TO to) { // Cancel all outstanding calls void rpcc::cancel(void) { lock ml(m_); - LOG("rpcc::cancel: force callers to fail"); - for(auto &p : calls_){ - caller *ca = p.second; + if (calls_.size()) { + LOG("force callers to fail"); + for (auto & p : calls_) { + caller *ca = p.second; + + IF_LEVEL(2) LOG("force caller to fail"); - IF_LEVEL(2) LOG("rpcc::cancel: force caller to fail"); - { lock cl(ca->m); ca->done = true; - ca->intret = rpc_const::cancel_failure; + ca->intret = rpc_protocol::cancel_failure; ca->c.notify_one(); } - } - while (calls_.size () > 0){ destroy_wait_ = true; - destroy_wait_c_.wait(ml); + while (calls_.size () > 0) + destroy_wait_c_.wait(ml); + + LOG("done"); } - LOG("rpcc::cancel: done"); } -int rpcc::call1(proc_t proc, marshall &req, unmarshall &rep, TO to) { +int rpcc::call1(proc_id_t proc, milliseconds to, string & rep, marshall & req) { caller ca(0, &rep); - int xid_rep; + xid_t xid_rep; { lock ml(m_); - if((proc != rpc_const::bind && !bind_done_) || - (proc == rpc_const::bind && bind_done_)){ - IF_LEVEL(1) LOG("rpcc::call1 rpcc has not been bound to dst or binding twice"); - return rpc_const::bind_failure; + if ((proc != rpc_protocol::bind.id && !bind_done_) || (proc == rpc_protocol::bind.id && bind_done_)) { + IF_LEVEL(1) LOG("rpcc has not been bound to dst or binding twice"); + return rpc_protocol::bind_failure; } - if(destroy_wait_){ - return rpc_const::cancel_failure; - } + if (destroy_wait_) + return rpc_protocol::cancel_failure; ca.xid = xid_++; calls_[ca.xid] = &ca; - req.pack_req_header({ca.xid, proc, clt_nonce_, srv_nonce_, xid_rep_window_.front()}); + req.pack_header(rpc_protocol::request_header{ + ca.xid, proc, clt_nonce_, srv_nonce_, xid_rep_window_.front() + }); xid_rep = xid_rep_window_.front(); } - TO curr_to; - auto finaldeadline = steady_clock::now() + milliseconds(to.to), - nextdeadline = finaldeadline; - - curr_to.to = to_min.to; + milliseconds curr_to = rpc::to_min; + auto finaldeadline = steady_clock::now() + to; bool transmit = true; - connection *ch = NULL; + shared_ptr ch; - while (1){ - if(transmit){ - get_refconn(&ch); - if(ch){ - if(reachable_) { + while (1) { + if (transmit) { + get_latest_connection(ch); + if (ch) { + if (reachable_) { request forgot; { lock ml(m_); @@ -190,46 +169,40 @@ int rpcc::call1(proc_t proc, marshall &req, unmarshall &rep, TO to) { } } if (forgot.isvalid()) - ch->send((char *)forgot.buf.c_str(), forgot.buf.size()); - ch->send(req.cstr(), req.size()); + ch->send(forgot.buf); + ch->send(req); } else IF_LEVEL(1) LOG("not reachable"); - IF_LEVEL(2) LOG("rpcc::call1 " << clt_nonce_ << " just sent req proc " << hex << proc << + IF_LEVEL(2) LOG(clt_nonce_ << " just sent req proc " << hex << proc << " xid " << dec << ca.xid << " clt_nonce " << clt_nonce_); } transmit = false; // only send once on a given channel } - if(finaldeadline == time_point::min()) - break; - - nextdeadline = steady_clock::now() + milliseconds(curr_to.to); - if(nextdeadline > finaldeadline) { - nextdeadline = finaldeadline; - finaldeadline = time_point::min(); - } + auto nextdeadline = min(steady_clock::now() + curr_to, finaldeadline); + curr_to *= 2; { lock cal(ca.m); - while (!ca.done){ - IF_LEVEL(2) LOG("rpcc:call1: wait"); - if(ca.c.wait_until(cal, nextdeadline) == cv_status::timeout){ - IF_LEVEL(2) LOG("rpcc::call1: timeout"); + while (!ca.done) { + IF_LEVEL(2) LOG("wait"); + if (ca.c.wait_until(cal, nextdeadline) == cv_status::timeout) { + IF_LEVEL(2) LOG("timeout"); break; } } - if(ca.done){ - IF_LEVEL(2) LOG("rpcc::call1: reply received"); + if (ca.done) { + IF_LEVEL(2) LOG("reply received"); break; } } - if(retrans_ && (!ch || ch->isdead())){ - // since connection is dead, retransmit - // on the new connection + if (nextdeadline >= finaldeadline) + break; + + // retransmit on new connection if connection is dead + if (!ch || ch->isdead()) transmit = true; - } - curr_to.to <<= 1; } { @@ -239,18 +212,17 @@ int rpcc::call1(proc_t proc, marshall &req, unmarshall &rep, TO to) { // may need to update the xid again here, in case the // packet times out before it's even sent by the channel. // I don't think there's any harm in maybe doing it twice - update_xid_rep(ca.xid); + update_xid_rep(ca.xid, ml); - if(destroy_wait_){ - destroy_wait_c_.notify_one(); - } + if (destroy_wait_) + destroy_wait_c_.notify_one(); } if (ca.done && lossytest_) { lock ml(m_); if (!dup_req_.isvalid()) { - dup_req_.buf.assign(req.cstr(), req.size()); + dup_req_.buf = req; dup_req_.xid = ca.xid; } if (xid_rep > xid_rep_done_) @@ -259,33 +231,21 @@ int rpcc::call1(proc_t proc, marshall &req, unmarshall &rep, TO to) { lock cal(ca.m); - IF_LEVEL(2) LOG("rpcc::call1 " << clt_nonce_ << " call done for req proc " << hex << proc << + IF_LEVEL(2) LOG(clt_nonce_ << " call done for req proc " << hex << proc << " xid " << dec << ca.xid << " " << inet_ntoa(dst_.sin_addr) << ":" << - ntohs(dst_.sin_port) << " done? " << ca.done << " ret " << ca.intret); - - if(ch) - ch->decref(); + ntoh(dst_.sin_port) << " done? " << ca.done << " ret " << ca.intret); // destruction of req automatically frees its buffer - return (ca.done? ca.intret : rpc_const::timeout_failure); + return (ca.done? ca.intret : rpc_protocol::timeout_failure); } -void -rpcc::get_refconn(connection **ch) -{ +void rpcc::get_latest_connection(shared_ptr & ch) { lock ml(chan_m_); - if(!chan_ || chan_->isdead()){ - if(chan_) - chan_->decref(); - chan_ = connect_to_dst(dst_, this, lossytest_); - } - if(ch && chan_){ - if(*ch){ - (*ch)->decref(); - } - *ch = chan_; - (*ch)->incref(); - } + if (!chan_ || chan_->isdead()) + chan_ = connection::to_dst(dst_, this, lossytest_); + + if (chan_) + ch = chan_; } // PollMgr's thread is being used to @@ -294,33 +254,33 @@ rpcc::get_refconn(connection **ch) // // this function keeps no reference for connection *c bool -rpcc::got_pdu(connection *, char *b, size_t sz) +rpcc::got_pdu(const shared_ptr &, const string & b) { - unmarshall rep(b, sz); - reply_header h; - rep.unpack_reply_header(&h); + unmarshall rep(b, true); + rpc_protocol::reply_header h; + rep.unpack_header(h); - if(!rep.ok()){ - IF_LEVEL(1) LOG("rpcc:got_pdu unmarshall header failed!!!"); + if (!rep.ok()) { + IF_LEVEL(1) LOG("unmarshall header failed!!!"); return true; } lock ml(m_); - update_xid_rep(h.xid); + update_xid_rep(h.xid, ml); - if(calls_.find(h.xid) == calls_.end()){ - IF_LEVEL(2) LOG("rpcc::got_pdu xid " << h.xid << " no pending request"); + if (calls_.find(h.xid) == calls_.end()) { + IF_LEVEL(2) LOG("xid " << h.xid << " no pending request"); return true; } caller *ca = calls_[h.xid]; lock cl(ca->m); - if(!ca->done){ - ca->un->take_in(rep); + if (!ca->done) { + *ca->rep = b; ca->intret = h.ret; - if(ca->intret < 0){ - IF_LEVEL(2) LOG("rpcc::got_pdu: RPC reply error for xid " << h.xid << " intret " << ca->intret); + if (ca->intret < 0) { + IF_LEVEL(2) LOG("RPC reply error for xid " << h.xid << " intret " << ca->intret); } ca->done = 1; } @@ -328,16 +288,13 @@ rpcc::got_pdu(connection *, char *b, size_t sz) return true; } -// assumes thread holds mutex m -void -rpcc::update_xid_rep(int xid) -{ - if(xid <= xid_rep_window_.front()){ +void rpcc::update_xid_rep(xid_t xid, lock & m_lock) { + VERIFY(m_lock); + if (xid <= xid_rep_window_.front()) return; - } - for (auto it = xid_rep_window_.begin(); it != xid_rep_window_.end(); it++){ - if(*it > xid){ + for (auto it = xid_rep_window_.begin(); it != xid_rep_window_.end(); it++) { + if (*it > xid) { xid_rep_window_.insert(it, xid); goto compress; } @@ -346,120 +303,66 @@ rpcc::update_xid_rep(int xid) compress: auto it = xid_rep_window_.begin(); - for (it++; it != xid_rep_window_.end(); it++){ + for (it++; it != xid_rep_window_.end(); it++) { while (xid_rep_window_.front() + 1 == *it) xid_rep_window_.pop_front(); } } -rpcs::rpcs(unsigned int p1, size_t count) - : port_(p1), counting_(count), curr_counts_(count), lossytest_(0), reachable_ (true) +rpcs::rpcs(in_port_t p1) : port_(p1) { set_rand_seed(); - nonce_ = (unsigned int)random(); - IF_LEVEL(2) LOG("rpcs::rpcs created with nonce " << nonce_); + nonce_ = (nonce_t)random(); + IF_LEVEL(2) LOG("created with nonce " << nonce_); - char *loss_env = getenv("RPC_LOSSY"); - if(loss_env != NULL){ - lossytest_ = atoi(loss_env); - } - - reg(rpc_const::bind, &rpcs::rpcbind, this); - dispatchpool_ = new ThrPool(6,false); + reg(rpc_protocol::bind, &rpcs::rpcbind, this); +} - listener_ = new tcpsconn(this, port_, lossytest_); +void rpcs::start() { + char *loss_env = getenv("RPC_LOSSY"); + listener_.reset(new connection_listener(this, port_, loss_env ? atoi(loss_env) : 0)); } -rpcs::~rpcs() -{ +rpcs::~rpcs() { // must delete listener before dispatchpool - delete listener_; - delete dispatchpool_; - free_reply_window(); + listener_ = nullptr; + dispatchpool_ = nullptr; } -bool -rpcs::got_pdu(connection *c, char *b, size_t sz) -{ - if(!reachable_){ - IF_LEVEL(1) LOG("rpcss::got_pdu: not reachable"); - return true; - } - - djob_t *j = new djob_t(c, b, sz); - c->incref(); - bool succ = dispatchpool_->addJob(bind(&rpcs::dispatch, this, j)); - if(!succ || !reachable_){ - c->decref(); - delete j; +bool rpcs::got_pdu(const shared_ptr & c, const string & b) { + if (!reachable_) { + IF_LEVEL(1) LOG("not reachable"); + return true; } - return succ; -} -void -rpcs::reg1(proc_t proc, handler *h) -{ - lock pl(procs_m_); - VERIFY(procs_.count(proc) == 0); - procs_[proc] = h; - VERIFY(procs_.count(proc) >= 1); + return dispatchpool_->addJob(std::bind(&rpcs::dispatch, this, c, b)); } -void -rpcs::updatestat(proc_t proc) -{ - lock cl(count_m_); - counts_[proc]++; - curr_counts_--; - if(curr_counts_ == 0){ - LOG("RPC STATS: "); - for (auto i = counts_.begin(); i != counts_.end(); i++) - LOG(hex << i->first << ":" << dec << i->second); +void rpcs::dispatch(shared_ptr c, const string & buf) { + unmarshall req(buf, true); - lock rwl(reply_window_m_); + rpc_protocol::request_header h; + req.unpack_header(h); + proc_id_t proc = h.proc; - size_t totalrep = 0, maxrep = 0; - for (auto clt : reply_window_) { - totalrep += clt.second.size(); - if(clt.second.size() > maxrep) - maxrep = clt.second.size(); - } - IF_LEVEL(1) LOG("REPLY WINDOW: clients " << (reply_window_.size()-1) << " total reply " << - totalrep << " max per client " << maxrep); - curr_counts_ = counting_; - } -} - -void -rpcs::dispatch(djob_t *j) -{ - connection *c = j->conn; - unmarshall req(j->buf, j->sz); - delete j; - - request_header h; - req.unpack_req_header(&h); - proc_t proc = h.proc; - - if(!req.ok()){ - IF_LEVEL(1) LOG("rpcs:dispatch unmarshall header failed!!!"); - c->decref(); + if (!req.ok()) { + IF_LEVEL(1) LOG("unmarshall header failed"); return; } - IF_LEVEL(2) LOG("rpcs::dispatch: rpc " << h.xid << " (proc " << hex << proc << ", last_rep " << + IF_LEVEL(2) LOG("rpc " << h.xid << " (proc " << hex << proc << ", last_rep " << dec << h.xid_rep << ") from clt " << h.clt_nonce << " for srv instance " << h.srv_nonce); marshall rep; - reply_header rh(h.xid,0); + rpc_protocol::reply_header rh{h.xid,0}; // is client sending to an old instance of server? - if(h.srv_nonce != 0 && h.srv_nonce != nonce_){ - IF_LEVEL(2) LOG("rpcs::dispatch: rpc for an old server instance " << h.srv_nonce << + if (h.srv_nonce != 0 && h.srv_nonce != nonce_) { + IF_LEVEL(2) LOG("rpc for an old server instance " << h.srv_nonce << " (current " << nonce_ << ") proc " << hex << h.proc); - rh.ret = rpc_const::oldsrv_failure; - rep.pack_reply_header(rh); - c->send(rep.cstr(),rep.size()); + rh.ret = rpc_protocol::oldsrv_failure; + rep.pack_header(rh); + c->send(rep); return; } @@ -467,9 +370,8 @@ rpcs::dispatch(djob_t *j) // is RPC proc a registered procedure? { lock pl(procs_m_); - if(procs_.count(proc) < 1){ - cerr << "rpcs::dispatch: unknown proc " << hex << proc << "." << endl; - c->decref(); + if (procs_.count(proc) < 1) { + LOG("unknown proc 0x" << hex << proc << " with h.srv_nonce=" << h.srv_nonce << ", my srv_nonce=" << nonce_); VERIFY(0); return; } @@ -477,98 +379,69 @@ rpcs::dispatch(djob_t *j) f = procs_[proc]; } - rpcs::rpcstate_t stat; - char *b1 = nullptr; - size_t sz1 = 0; - - if(h.clt_nonce){ - // have i seen this client before? - { - lock rwl(reply_window_m_); - // if we don't know about this clt_nonce, create a cleanup object - if(reply_window_.find(h.clt_nonce) == reply_window_.end()){ - VERIFY (reply_window_[h.clt_nonce].size() == 0); // create - reply_window_[h.clt_nonce].push_back(reply_t(-1)); // store starting reply xid - IF_LEVEL(2) LOG("rpcs::dispatch: new client " << h.clt_nonce << " xid " << h.xid << - " chan " << c->channo() << ", total clients " << (reply_window_.size()-1)); - } - } - - // save the latest good connection to the client - { - lock rwl(conss_m_); - if(conns_.find(h.clt_nonce) == conns_.end()){ - c->incref(); - conns_[h.clt_nonce] = c; - } else if(conns_[h.clt_nonce]->compare(c) < 0){ - conns_[h.clt_nonce]->decref(); - c->incref(); - conns_[h.clt_nonce] = c; - } + // have i seen this client before? + { + lock rwl(reply_window_m_); + // if we don't know about this clt_nonce, create a cleanup object + if (reply_window_.find(h.clt_nonce) == reply_window_.end()) { + VERIFY (reply_window_[h.clt_nonce].size() == 0); // create + reply_window_[h.clt_nonce].push_back(reply_t(-1)); // store starting reply xid + IF_LEVEL(2) LOG("new client " << h.clt_nonce << " xid " << h.xid << + " chan " << c->fd << ", total clients " << (reply_window_.size()-1)); } + } - stat = checkduplicate_and_update(h.clt_nonce, h.xid, - h.xid_rep, &b1, &sz1); - } else { - // this client does not require at most once logic - stat = NEW; + // save the latest good connection to the client + { + lock rwl(conns_m_); + if (conns_.find(h.clt_nonce) == conns_.end()) + conns_[h.clt_nonce] = c; + else if (conns_[h.clt_nonce]->create_time < c->create_time) + conns_[h.clt_nonce] = c; } - switch (stat){ - case NEW: // new request - if(counting_){ - updatestat(proc); - } + string b1; - rh.ret = (*f)(req, rep); - if (rh.ret == rpc_const::unmarshal_args_failure) { - cerr << "rpcs::dispatch: failed to unmarshall the arguments. You are " << - "probably calling RPC 0x" << hex << proc << " with the wrong " << - "types of arguments." << endl; + switch (check_duplicate_and_update(h.clt_nonce, h.xid, h.xid_rep, b1)) { + case NEW: // new request + rh.ret = (*f)(forward(req), rep); + if (rh.ret == rpc_protocol::unmarshall_args_failure) { + LOG("failed to unmarshall the arguments. You are " << + "probably calling RPC 0x" << hex << proc << " with the wrong " << + "types of arguments."); VERIFY(0); } VERIFY(rh.ret >= 0); - rep.pack_reply_header(rh); - rep.take_buf(&b1,&sz1); + rep.pack_header(rh); + b1 = rep; - IF_LEVEL(2) LOG("rpcs::dispatch: sending and saving reply of size " << sz1 << " for rpc " << + IF_LEVEL(2) LOG("sending and saving reply of size " << b1.size() << " for rpc " << h.xid << ", proc " << hex << proc << " ret " << dec << rh.ret << ", clt " << h.clt_nonce); - if(h.clt_nonce > 0){ - // only record replies for clients that require at-most-once logic - add_reply(h.clt_nonce, h.xid, b1, sz1); - } + add_reply(h.clt_nonce, h.xid, b1); // get the latest connection to the client { - lock rwl(conss_m_); - if(c->isdead() && c != conns_[h.clt_nonce]){ - c->decref(); + lock rwl(conns_m_); + if (c->isdead()) c = conns_[h.clt_nonce]; - c->incref(); - } } - c->send(b1, sz1); - if(h.clt_nonce == 0){ - // reply is not added to at-most-once window, free it - free(b1); - } + c->send(rep); break; case INPROGRESS: // server is working on this request break; case DONE: // duplicate and we still have the response - c->send(b1, sz1); + c->send(b1); break; case FORGOTTEN: // very old request and we don't have the response anymore - IF_LEVEL(2) LOG("rpcs::dispatch: very old request " << h.xid << " from " << h.clt_nonce); - rh.ret = rpc_const::atmostonce_failure; - rep.pack_reply_header(rh); - c->send(rep.cstr(),rep.size()); + IF_LEVEL(2) LOG("very old request " << h.xid << " from " << h.clt_nonce); + rh.ret = rpc_protocol::atmostonce_failure; + rep.pack_header(rh); + c->send(rep); break; } - c->decref(); } // rpcs::dispatch calls this when an RPC request arrives. @@ -583,29 +456,27 @@ rpcs::dispatch(djob_t *j) // returns one of: // NEW: never seen this xid before. // INPROGRESS: seen this xid, and still processing it. -// DONE: seen this xid, previous reply returned in *b and *sz. +// DONE: seen this xid, previous reply returned in b. // FORGOTTEN: might have seen this xid, but deleted previous reply. rpcs::rpcstate_t -rpcs::checkduplicate_and_update(unsigned int clt_nonce, int xid, - int xid_rep, char **b, size_t *sz) +rpcs::check_duplicate_and_update(nonce_t clt_nonce, xid_t xid, + xid_t xid_rep, string & b) { lock rwl(reply_window_m_); - list &l = reply_window_[clt_nonce]; + list & l = reply_window_[clt_nonce]; VERIFY(l.size() > 0); VERIFY(xid >= xid_rep); - int past_xid_rep = l.begin()->xid; + xid_t past_xid_rep = l.begin()->xid; list::iterator start = l.begin(), it = ++start; if (past_xid_rep < xid_rep || past_xid_rep == -1) { // scan for deletion candidates - for (; it != l.end() && it->xid < xid_rep; it++) { - if (it->cb_present) - free(it->buf); - } + while (it != l.end() && it->xid < xid_rep) + it++; l.erase(start, it); l.begin()->xid = xid_rep; } @@ -621,12 +492,10 @@ rpcs::checkduplicate_and_update(unsigned int clt_nonce, int xid, if (it != l.end() && it->xid == xid) { if (it->cb_present) { // return information about the remembered reply - *b = it->buf; - *sz = it->sz; + b = it->buf; return DONE; - } else { - return INPROGRESS; } + return INPROGRESS; } else { // remember that a new request has arrived l.insert(it, reply_t(xid)); @@ -635,81 +504,56 @@ rpcs::checkduplicate_and_update(unsigned int clt_nonce, int xid, } // rpcs::dispatch calls add_reply when it is sending a reply to an RPC, -// and passes the return value in b and sz. -// add_reply() should remember b and sz. -// free_reply_window() and checkduplicate_and_update is responsible for -// calling free(b). -void -rpcs::add_reply(unsigned int clt_nonce, int xid, - char *b, size_t sz) -{ +// and passes the return value in b. +// add_reply() should remember b. +void rpcs::add_reply(nonce_t clt_nonce, xid_t xid, const string & b) { lock rwl(reply_window_m_); // remember the RPC reply value - list &l = reply_window_[clt_nonce]; + list & l = reply_window_[clt_nonce]; list::iterator it = l.begin(); // skip to our place in the list for (it++; it != l.end() && it->xid < xid; it++); // there should already be an entry, so whine if there isn't if (it == l.end() || it->xid != xid) { - cerr << "Could not find reply struct in add_reply" << endl; - l.insert(it, reply_t(xid, b, sz)); + LOG("Could not find reply struct in add_reply"); + l.insert(it, reply_t(xid, b)); } else { - *it = reply_t(xid, b, sz); + *it = reply_t(xid, b); } } -void rpcs::free_reply_window(void) { - lock rwl(reply_window_m_); - for (auto clt : reply_window_) { - for (auto it : clt.second){ - if (it.cb_present) - free(it.buf); - } - clt.second.clear(); - } - reply_window_.clear(); -} - -int rpcs::rpcbind(unsigned int &r, int) { - IF_LEVEL(2) LOG("rpcs::rpcbind called return nonce " << nonce_); +rpc_protocol::status rpcs::rpcbind(nonce_t & r) { + IF_LEVEL(2) LOG("called return nonce " << nonce_); r = nonce_; return 0; } -bool operator<(const sockaddr_in &a, const sockaddr_in &b){ - return ((a.sin_addr.s_addr < b.sin_addr.s_addr) || - ((a.sin_addr.s_addr == b.sin_addr.s_addr) && - ((a.sin_port < b.sin_port)))); -} - -/*---------------auxilary function--------------*/ -sockaddr_in make_sockaddr(const string &hostandport) { +static sockaddr_in make_sockaddr(const string & hostandport) { + string host = "127.0.0.1"; + string port = hostandport; auto colon = hostandport.find(':'); - if (colon == string::npos) - return make_sockaddr("127.0.0.1", hostandport); - else - return make_sockaddr(hostandport.substr(0, colon), hostandport.substr(colon+1)); -} + if (colon != string::npos) { + host = hostandport.substr(0, colon); + port = hostandport.substr(colon+1); + } -sockaddr_in make_sockaddr(const string &host, const string &port) { - sockaddr_in dst; - bzero(&dst, sizeof(dst)); + sockaddr_in dst{}; // zero initialize dst.sin_family = AF_INET; struct in_addr a{inet_addr(host.c_str())}; - if(a.s_addr != INADDR_NONE) + if (a.s_addr != INADDR_NONE) dst.sin_addr.s_addr = a.s_addr; else { struct hostent *hp = gethostbyname(host.c_str()); if (!hp || hp->h_length != 4 || hp->h_addrtype != AF_INET) { - cerr << "cannot find host name " << host << endl; + LOG_NONMEMBER("cannot find host name " << host); exit(1); } memcpy(&a, hp->h_addr_list[0], sizeof(in_addr_t)); dst.sin_addr.s_addr = a.s_addr; } - dst.sin_port = hton((uint16_t)stoi(port)); + dst.sin_port = hton((in_port_t)stoi(port)); return dst; }