2 The rpcc class handles client-side RPC. Each rpcc is bound to a single RPC
3 server. The jobs of rpcc include maintaining a connection to server, sending
4 RPC requests and waiting for responses, retransmissions, at-most-once delivery
7 The rpcs class handles the server side of RPC. Each rpcs handles multiple
8 connections from different rpcc objects. The jobs of rpcs include accepting
9 connections, dispatching requests to registered RPC handlers, at-most-once
12 Both rpcc and rpcs use the connection class as an abstraction for the
13 underlying communication channel. To send an RPC request/reply, one calls
14 connection::send() which blocks until data is sent or the connection has
15 failed (thus the caller can free the buffer when send() returns). When a
16 request/reply is received, connection makes a callback into the corresponding
17 rpcc or rpcs (see rpcc::got_pdu() and rpcs::got_pdu()).
20 rpcc uses application threads to send RPC requests and blocks to receive the
21 reply or error. All connections use a single PollMgr object to perform async
22 socket IO. PollMgr creates a single thread to examine the readiness of socket
23 file descriptors and informs the corresponding connection whenever a socket is
24 ready to be read or written. (We use asynchronous socket IO to reduce the
25 number of threads needed to manage these connections; without async IO, at
26 least one thread is needed per connection to read data without blocking other
27 activities.) Each rpcs object creates one thread for listening on the server
28 port and a pool of threads for executing RPC requests. The thread pool allows
29 us to control the number of threads spawned at the server (spawning one thread
30 per request will hurt when the server faces thousands of requests).
32 In order to delete a connection object, we must maintain a reference count.
33 For rpcc, multiple client threads might be invoking the rpcc::call() functions
34 and thus holding multiple references to the underlying connection object. For
35 rpcs, multiple dispatch threads might be holding references to the same
36 connection object. A connection object is deleted only when the underlying
37 connection is dead and the reference count reaches zero.
39 This version of the RPC library explicitly joins exited threads to make sure
40 no outstanding references exist before deleting objects.
42 To delete a rpcc object safely, the users of the library must ensure that
43 there are no outstanding calls on the rpcc object.
45 To delete a rpcs object safely, we do the following in sequence: 1. stop
46 accepting new incoming connections. 2. close existing active connections. 3.
47 delete the dispatch thread pool which involves waiting for current active RPC
48 handlers to finish. It is interesting how a thread pool can be deleted
49 without using thread cancellation. The trick is to inject x "poison pills" for
50 a thread pool of x threads. Upon getting a poison pill instead of a normal
51 task, a worker thread will exit (and thread pool destructor waits to join all
52 x exited worker threads).
58 #include <sys/types.h>
59 #include <arpa/inet.h>
60 #include <netinet/tcp.h>
64 inline void set_rand_seed() {
65 auto now = time_point_cast<nanoseconds>(steady_clock::now());
66 srandom((uint32_t)now.time_since_epoch().count()^(uint32_t)getpid());
69 static sockaddr_in make_sockaddr(const string &hostandport);
71 rpcc::rpcc(const string & d, bool retrans) :
72 dst_(make_sockaddr(d)), srv_nonce_(0), bind_done_(false), xid_(1), lossytest_(0),
73 retrans_(retrans), reachable_(true), chan_(), destroy_wait_ (false), xid_rep_done_(-1)
77 clt_nonce_ = (unsigned int)random();
79 // special client nonce 0 means this client does not
80 // require at-most-once logic from the server
81 // because it uses tcp and never retries a failed connection
85 char *loss_env = getenv("RPC_LOSSY");
87 lossytest_ = atoi(loss_env);
89 // xid starts with 1 and latest received reply starts with 0
90 xid_rep_window_.push_back(0);
92 IF_LEVEL(2) LOG("cltn_nonce is " << clt_nonce_ << " lossy " << lossytest_);
95 // IMPORTANT: destruction should happen only when no external threads
96 // are blocked inside rpcc or will use rpcc in the future
99 IF_LEVEL(2) LOG("delete nonce " << clt_nonce_ << " channo=" << (chan_?chan_->channo():-1));
102 VERIFY(calls_.size() == 0);
105 int rpcc::bind(milliseconds to) {
107 int ret = call_timeout(rpc_const::bind, to, r, 0);
113 IF_LEVEL(2) LOG("bind " << inet_ntoa(dst_.sin_addr) << " failed " << ret);
118 // Cancel all outstanding calls
119 void rpcc::cancel(void) {
122 LOG("force callers to fail");
123 for(auto &p : calls_){
124 caller *ca = p.second;
126 IF_LEVEL(2) LOG("force caller to fail");
130 ca->intret = rpc_const::cancel_failure;
135 while (calls_.size () > 0){
136 destroy_wait_ = true;
137 destroy_wait_c_.wait(ml);
143 int rpcc::call1(proc_t proc, marshall &req, string &rep, milliseconds to) {
150 if((proc != rpc_const::bind && !bind_done_) ||
151 (proc == rpc_const::bind && bind_done_)){
152 IF_LEVEL(1) LOG("rpcc has not been bound to dst or binding twice");
153 return rpc_const::bind_failure;
157 return rpc_const::cancel_failure;
161 calls_[ca.xid] = &ca;
163 req.pack_header(request_header{ca.xid, proc, clt_nonce_, srv_nonce_, xid_rep_window_.front()});
164 xid_rep = xid_rep_window_.front();
167 milliseconds curr_to = rpc::to_min;
168 auto finaldeadline = steady_clock::now() + to, nextdeadline = finaldeadline;
170 bool transmit = true;
171 shared_ptr<connection> ch;
181 if (dup_req_.isvalid() && xid_rep_done_ > dup_req_.xid) {
186 if (forgot.isvalid())
187 ch->send(forgot.buf);
190 else IF_LEVEL(1) LOG("not reachable");
191 IF_LEVEL(2) LOG(clt_nonce_ << " just sent req proc " << hex << proc <<
192 " xid " << dec << ca.xid << " clt_nonce " << clt_nonce_);
194 transmit = false; // only send once on a given channel
197 if(finaldeadline == time_point<steady_clock>::min())
200 nextdeadline = steady_clock::now() + curr_to;
201 if(nextdeadline > finaldeadline) {
202 nextdeadline = finaldeadline;
203 finaldeadline = time_point<steady_clock>::min();
209 IF_LEVEL(2) LOG("wait");
210 if(ca.c.wait_until(cal, nextdeadline) == cv_status::timeout){
211 IF_LEVEL(2) LOG("timeout");
216 IF_LEVEL(2) LOG("reply received");
221 if(retrans_ && (!ch || ch->isdead())) {
222 // since connection is dead, retransmit
223 // on the new connection
230 // no locking of ca.m since only this thread changes ca.xid
232 calls_.erase(ca.xid);
233 // may need to update the xid again here, in case the
234 // packet times out before it's even sent by the channel.
235 // I don't think there's any harm in maybe doing it twice
236 update_xid_rep(ca.xid);
239 destroy_wait_c_.notify_one();
243 if (ca.done && lossytest_)
246 if (!dup_req_.isvalid()) {
248 dup_req_.xid = ca.xid;
250 if (xid_rep > xid_rep_done_)
251 xid_rep_done_ = xid_rep;
256 IF_LEVEL(2) LOG(clt_nonce_ << " call done for req proc " << hex << proc <<
257 " xid " << dec << ca.xid << " " << inet_ntoa(dst_.sin_addr) << ":" <<
258 ntoh(dst_.sin_port) << " done? " << ca.done << " ret " << ca.intret);
260 // destruction of req automatically frees its buffer
261 return (ca.done? ca.intret : rpc_const::timeout_failure);
265 rpcc::get_refconn(shared_ptr<connection> & ch)
268 if (!chan_ || chan_->isdead())
269 chan_ = connect_to_dst(dst_, this, lossytest_);
275 // PollMgr's thread is being used to
276 // make this upcall from connection object to rpcc.
277 // this funtion must not block.
279 // this function keeps no reference for connection *c
281 rpcc::got_pdu(const shared_ptr<connection> &, const string & b)
283 unmarshall rep(b, true);
285 rep.unpack_header(h);
288 IF_LEVEL(1) LOG("unmarshall header failed!!!");
294 update_xid_rep(h.xid);
296 if(calls_.find(h.xid) == calls_.end()){
297 IF_LEVEL(2) LOG("xid " << h.xid << " no pending request");
300 caller *ca = calls_[h.xid];
307 IF_LEVEL(2) LOG("RPC reply error for xid " << h.xid << " intret " << ca->intret);
315 // assumes thread holds mutex m
317 rpcc::update_xid_rep(int xid)
319 if(xid <= xid_rep_window_.front()){
323 for (auto it = xid_rep_window_.begin(); it != xid_rep_window_.end(); it++){
325 xid_rep_window_.insert(it, xid);
329 xid_rep_window_.push_back(xid);
332 auto it = xid_rep_window_.begin();
333 for (it++; it != xid_rep_window_.end(); it++){
334 while (xid_rep_window_.front() + 1 == *it)
335 xid_rep_window_.pop_front();
339 rpcs::rpcs(in_port_t p1, size_t count)
340 : port_(p1), counting_(count), curr_counts_(count), reachable_ (true)
343 nonce_ = (unsigned int)random();
344 IF_LEVEL(2) LOG("created with nonce " << nonce_);
346 reg(rpc_const::bind, &rpcs::rpcbind, this);
347 dispatchpool_ = unique_ptr<ThrPool>(new ThrPool(6, false));
351 char *loss_env = getenv("RPC_LOSSY");
352 listener_ = unique_ptr<tcpsconn>(new tcpsconn(this, port_, loss_env ? atoi(loss_env) : 0));
357 // must delete listener before dispatchpool
359 dispatchpool_ = nullptr;
364 rpcs::got_pdu(const shared_ptr<connection> & c, const string & b)
367 IF_LEVEL(1) LOG("not reachable");
371 return dispatchpool_->addJob(bind(&rpcs::dispatch, this, c, b));
375 rpcs::reg1(proc_t proc, handler *h)
378 VERIFY(procs_.count(proc) == 0);
380 VERIFY(procs_.count(proc) >= 1);
384 rpcs::updatestat(proc_t proc)
389 if(curr_counts_ == 0){
391 for (auto i = counts_.begin(); i != counts_.end(); i++)
392 LOG(hex << i->first << ":" << dec << i->second);
394 lock rwl(reply_window_m_);
396 size_t totalrep = 0, maxrep = 0;
397 for (auto clt : reply_window_) {
398 totalrep += clt.second.size();
399 if(clt.second.size() > maxrep)
400 maxrep = clt.second.size();
402 IF_LEVEL(1) LOG("REPLY WINDOW: clients " << (reply_window_.size()-1) << " total reply " <<
403 totalrep << " max per client " << maxrep);
404 curr_counts_ = counting_;
408 void rpcs::dispatch(shared_ptr<connection> c, const string & buf) {
409 unmarshall req(buf, true);
412 req.unpack_header(h);
413 proc_t proc = h.proc;
416 IF_LEVEL(1) LOG("unmarshall header failed");
420 IF_LEVEL(2) LOG("rpc " << h.xid << " (proc " << hex << proc << ", last_rep " <<
421 dec << h.xid_rep << ") from clt " << h.clt_nonce << " for srv instance " << h.srv_nonce);
424 reply_header rh{h.xid,0};
426 // is client sending to an old instance of server?
427 if(h.srv_nonce != 0 && h.srv_nonce != nonce_){
428 IF_LEVEL(2) LOG("rpc for an old server instance " << h.srv_nonce <<
429 " (current " << nonce_ << ") proc " << hex << h.proc);
430 rh.ret = rpc_const::oldsrv_failure;
437 // is RPC proc a registered procedure?
440 if(procs_.count(proc) < 1){
441 LOG("unknown proc 0x" << hex << proc << " with h.srv_nonce=" << h.srv_nonce << ", my srv_nonce=" << nonce_);
449 rpcs::rpcstate_t stat;
453 // have i seen this client before?
455 lock rwl(reply_window_m_);
456 // if we don't know about this clt_nonce, create a cleanup object
457 if(reply_window_.find(h.clt_nonce) == reply_window_.end()){
458 VERIFY (reply_window_[h.clt_nonce].size() == 0); // create
459 reply_window_[h.clt_nonce].push_back(reply_t(-1)); // store starting reply xid
460 IF_LEVEL(2) LOG("new client " << h.clt_nonce << " xid " << h.xid <<
461 " chan " << c->channo() << ", total clients " << (reply_window_.size()-1));
465 // save the latest good connection to the client
468 if (conns_.find(h.clt_nonce) == conns_.end())
469 conns_[h.clt_nonce] = c;
470 else if(conns_[h.clt_nonce]->create_time() < c->create_time())
471 conns_[h.clt_nonce] = c;
474 stat = checkduplicate_and_update(h.clt_nonce, h.xid, h.xid_rep, b1);
476 // this client does not require at most once logic
481 case NEW: // new request
485 rh.ret = (*f)(req, rep);
486 if (rh.ret == rpc_const::unmarshal_args_failure) {
487 cerr << "failed to unmarshall the arguments. You are " <<
488 "probably calling RPC 0x" << hex << proc << " with the wrong " <<
489 "types of arguments." << endl;
497 IF_LEVEL(2) LOG("sending and saving reply of size " << b1.size() << " for rpc " <<
498 h.xid << ", proc " << hex << proc << " ret " << dec << rh.ret << ", clt " << h.clt_nonce);
500 if (h.clt_nonce > 0) {
501 // only record replies for clients that require at-most-once logic
502 add_reply(h.clt_nonce, h.xid, b1);
505 // get the latest connection to the client
509 c = conns_[h.clt_nonce];
514 case INPROGRESS: // server is working on this request
516 case DONE: // duplicate and we still have the response
519 case FORGOTTEN: // very old request and we don't have the response anymore
520 IF_LEVEL(2) LOG("very old request " << h.xid << " from " << h.clt_nonce);
521 rh.ret = rpc_const::atmostonce_failure;
528 // rpcs::dispatch calls this when an RPC request arrives.
530 // checks to see if an RPC with xid from clt_nonce has already been received.
531 // if not, remembers the request in reply_window_.
533 // deletes remembered requests with XIDs <= xid_rep; the client
534 // says it has received a reply for every RPC up through xid_rep.
535 // frees the reply_t::buf of each such request.
538 // NEW: never seen this xid before.
539 // INPROGRESS: seen this xid, and still processing it.
540 // DONE: seen this xid, previous reply returned in b.
541 // FORGOTTEN: might have seen this xid, but deleted previous reply.
543 rpcs::checkduplicate_and_update(unsigned int clt_nonce, int xid,
544 int xid_rep, string & b)
546 lock rwl(reply_window_m_);
548 list<reply_t> &l = reply_window_[clt_nonce];
550 VERIFY(l.size() > 0);
551 VERIFY(xid >= xid_rep);
553 int past_xid_rep = l.begin()->xid;
555 list<reply_t>::iterator start = l.begin(), it = ++start;
557 if (past_xid_rep < xid_rep || past_xid_rep == -1) {
558 // scan for deletion candidates
559 while (it != l.end() && it->xid < xid_rep)
562 l.begin()->xid = xid_rep;
565 if (xid < past_xid_rep && past_xid_rep != -1)
568 // skip non-deletion candidates
569 while (it != l.end() && it->xid < xid)
572 // if it's in the list it must be right here
573 if (it != l.end() && it->xid == xid) {
574 if (it->cb_present) {
575 // return information about the remembered reply
581 // remember that a new request has arrived
582 l.insert(it, reply_t(xid));
587 // rpcs::dispatch calls add_reply when it is sending a reply to an RPC,
588 // and passes the return value in b.
589 // add_reply() should remember b.
590 // free_reply_window() and checkduplicate_and_update are responsible for
591 // cleaning up the remembered values.
592 void rpcs::add_reply(unsigned int clt_nonce, int xid, const string & b) {
593 lock rwl(reply_window_m_);
594 // remember the RPC reply value
595 list<reply_t> &l = reply_window_[clt_nonce];
596 list<reply_t>::iterator it = l.begin();
597 // skip to our place in the list
598 for (it++; it != l.end() && it->xid < xid; it++);
599 // there should already be an entry, so whine if there isn't
600 if (it == l.end() || it->xid != xid) {
601 cerr << "Could not find reply struct in add_reply" << endl;
602 l.insert(it, reply_t(xid, b));
604 *it = reply_t(xid, b);
608 void rpcs::free_reply_window(void) {
609 lock rwl(reply_window_m_);
610 reply_window_.clear();
613 int rpcs::rpcbind(unsigned int &r, int) {
614 IF_LEVEL(2) LOG("called return nonce " << nonce_);
619 static sockaddr_in make_sockaddr(const string &host, const string &port);
621 static sockaddr_in make_sockaddr(const string &hostandport) {
622 auto colon = hostandport.find(':');
623 if (colon == string::npos)
624 return make_sockaddr("127.0.0.1", hostandport);
626 return make_sockaddr(hostandport.substr(0, colon), hostandport.substr(colon+1));
629 static sockaddr_in make_sockaddr(const string &host, const string &port) {
631 bzero(&dst, sizeof(dst));
632 dst.sin_family = AF_INET;
634 struct in_addr a{inet_addr(host.c_str())};
636 if(a.s_addr != INADDR_NONE)
637 dst.sin_addr.s_addr = a.s_addr;
639 struct hostent *hp = gethostbyname(host.c_str());
641 if (!hp || hp->h_length != 4 || hp->h_addrtype != AF_INET) {
642 cerr << "cannot find host name " << host << endl;
645 memcpy(&a, hp->h_addr_list[0], sizeof(in_addr_t));
646 dst.sin_addr.s_addr = a.s_addr;
648 dst.sin_port = hton((in_port_t)stoi(port));