2 The rpcc class handles client-side RPC. Each rpcc is bound to a single RPC
3 server. The jobs of rpcc include maintaining a connection to server, sending
4 RPC requests and waiting for responses, retransmissions, at-most-once delivery
7 The rpcs class handles the server side of RPC. Each rpcs handles multiple
8 connections from different rpcc objects. The jobs of rpcs include accepting
9 connections, dispatching requests to registered RPC handlers, at-most-once
12 Both rpcc and rpcs use the connection class as an abstraction for the
13 underlying communication channel. To send an RPC request/reply, one calls
14 connection::send() which blocks until data is sent or the connection has
15 failed (thus the caller can free the buffer when send() returns). When a
16 request/reply is received, connection makes a callback into the corresponding
17 rpcc or rpcs (see rpcc::got_pdu() and rpcs::got_pdu()).
20 rpcc uses application threads to send RPC requests and blocks to receive the
21 reply or error. All connections use a single PollMgr object to perform async
22 socket IO. PollMgr creates a single thread to examine the readiness of socket
23 file descriptors and informs the corresponding connection whenever a socket is
24 ready to be read or written. (We use asynchronous socket IO to reduce the
25 number of threads needed to manage these connections; without async IO, at
26 least one thread is needed per connection to read data without blocking other
27 activities.) Each rpcs object creates one thread for listening on the server
28 port and a pool of threads for executing RPC requests. The thread pool allows
29 us to control the number of threads spawned at the server (spawning one thread
30 per request will hurt when the server faces thousands of requests).
32 In order to delete a connection object, we must maintain a reference count.
33 For rpcc, multiple client threads might be invoking the rpcc::call() functions
34 and thus holding multiple references to the underlying connection object. For
35 rpcs, multiple dispatch threads might be holding references to the same
36 connection object. A connection object is deleted only when the underlying
37 connection is dead and the reference count reaches zero.
39 This version of the RPC library explicitly joins exited threads to make sure
40 no outstanding references exist before deleting objects.
42 To delete a rpcc object safely, the users of the library must ensure that
43 there are no outstanding calls on the rpcc object.
45 To delete a rpcs object safely, we do the following in sequence: 1. stop
46 accepting new incoming connections. 2. close existing active connections. 3.
47 delete the dispatch thread pool which involves waiting for current active RPC
48 handlers to finish. It is interesting how a thread pool can be deleted
49 without using thread cancellation. The trick is to inject x "poison pills" for
50 a thread pool of x threads. Upon getting a poison pill instead of a normal
51 task, a worker thread will exit (and thread pool destructor waits to join all
52 x exited worker threads).
58 #include <sys/types.h>
59 #include <arpa/inet.h>
60 #include <netinet/tcp.h>
64 inline void set_rand_seed() {
65 auto now = time_point_cast<nanoseconds>(steady_clock::now());
66 srandom((uint32_t)now.time_since_epoch().count()^(uint32_t)getpid());
69 static sockaddr_in make_sockaddr(const string &hostandport);
71 rpcc::rpcc(const string & d, bool retrans) :
72 dst_(make_sockaddr(d)), srv_nonce_(0), bind_done_(false), xid_(1), lossytest_(0),
73 retrans_(retrans), reachable_(true), chan_(), destroy_wait_ (false), xid_rep_done_(-1)
77 clt_nonce_ = (unsigned int)random();
79 // special client nonce 0 means this client does not
80 // require at-most-once logic from the server
81 // because it uses tcp and never retries a failed connection
85 char *loss_env = getenv("RPC_LOSSY");
87 lossytest_ = atoi(loss_env);
89 // xid starts with 1 and latest received reply starts with 0
90 xid_rep_window_.push_back(0);
92 IF_LEVEL(2) LOG("cltn_nonce is " << clt_nonce_ << " lossy " << lossytest_);
95 // IMPORTANT: destruction should happen only when no external threads
96 // are blocked inside rpcc or will use rpcc in the future
99 IF_LEVEL(2) LOG("delete nonce " << clt_nonce_ << " channo=" << (chan_?chan_->channo():-1));
102 VERIFY(calls_.size() == 0);
105 int rpcc::bind(milliseconds to) {
107 int ret = call_timeout(rpc_const::bind, to, r, 0);
113 IF_LEVEL(2) LOG("bind " << inet_ntoa(dst_.sin_addr) << " failed " << ret);
118 // Cancel all outstanding calls
119 void rpcc::cancel(void) {
122 LOG("force callers to fail");
123 for(auto &p : calls_){
124 caller *ca = p.second;
126 IF_LEVEL(2) LOG("force caller to fail");
130 ca->intret = rpc_const::cancel_failure;
135 while (calls_.size () > 0){
136 destroy_wait_ = true;
137 destroy_wait_c_.wait(ml);
143 int rpcc::call1(proc_t proc, marshall &req, string &rep, milliseconds to) {
150 if((proc != rpc_const::bind && !bind_done_) ||
151 (proc == rpc_const::bind && bind_done_)){
152 IF_LEVEL(1) LOG("rpcc has not been bound to dst or binding twice");
153 return rpc_const::bind_failure;
157 return rpc_const::cancel_failure;
161 calls_[ca.xid] = &ca;
163 req.pack_header(request_header{ca.xid, proc, clt_nonce_, srv_nonce_, xid_rep_window_.front()});
164 xid_rep = xid_rep_window_.front();
167 milliseconds curr_to = rpc::to_min;
168 auto finaldeadline = steady_clock::now() + to, nextdeadline = finaldeadline;
170 bool transmit = true;
171 shared_ptr<connection> ch;
181 if (dup_req_.isvalid() && xid_rep_done_ > dup_req_.xid) {
186 if (forgot.isvalid())
187 ch->send(forgot.buf);
190 else IF_LEVEL(1) LOG("not reachable");
191 IF_LEVEL(2) LOG(clt_nonce_ << " just sent req proc " << hex << proc <<
192 " xid " << dec << ca.xid << " clt_nonce " << clt_nonce_);
194 transmit = false; // only send once on a given channel
197 if(finaldeadline == time_point<steady_clock>::min())
200 nextdeadline = steady_clock::now() + curr_to;
201 if(nextdeadline > finaldeadline) {
202 nextdeadline = finaldeadline;
203 finaldeadline = time_point<steady_clock>::min();
209 IF_LEVEL(2) LOG("wait");
210 if(ca.c.wait_until(cal, nextdeadline) == cv_status::timeout){
211 IF_LEVEL(2) LOG("timeout");
216 IF_LEVEL(2) LOG("reply received");
221 if(retrans_ && (!ch || ch->isdead())) {
222 // since connection is dead, retransmit
223 // on the new connection
230 // no locking of ca.m since only this thread changes ca.xid
232 calls_.erase(ca.xid);
233 // may need to update the xid again here, in case the
234 // packet times out before it's even sent by the channel.
235 // I don't think there's any harm in maybe doing it twice
236 update_xid_rep(ca.xid);
239 destroy_wait_c_.notify_one();
243 if (ca.done && lossytest_)
246 if (!dup_req_.isvalid()) {
248 dup_req_.xid = ca.xid;
250 if (xid_rep > xid_rep_done_)
251 xid_rep_done_ = xid_rep;
256 IF_LEVEL(2) LOG(clt_nonce_ << " call done for req proc " << hex << proc <<
257 " xid " << dec << ca.xid << " " << inet_ntoa(dst_.sin_addr) << ":" <<
258 ntoh(dst_.sin_port) << " done? " << ca.done << " ret " << ca.intret);
260 // destruction of req automatically frees its buffer
261 return (ca.done? ca.intret : rpc_const::timeout_failure);
265 rpcc::get_refconn(shared_ptr<connection> & ch)
268 if (!chan_ || chan_->isdead())
269 chan_ = connect_to_dst(dst_, this, lossytest_);
275 // PollMgr's thread is being used to
276 // make this upcall from connection object to rpcc.
277 // this funtion must not block.
279 // this function keeps no reference for connection *c
281 rpcc::got_pdu(const shared_ptr<connection> &, const string & b)
283 unmarshall rep(b, true);
285 rep.unpack_header(h);
288 IF_LEVEL(1) LOG("unmarshall header failed!!!");
294 update_xid_rep(h.xid);
296 if(calls_.find(h.xid) == calls_.end()){
297 IF_LEVEL(2) LOG("xid " << h.xid << " no pending request");
300 caller *ca = calls_[h.xid];
307 IF_LEVEL(2) LOG("RPC reply error for xid " << h.xid << " intret " << ca->intret);
315 // assumes thread holds mutex m
317 rpcc::update_xid_rep(int xid)
319 if(xid <= xid_rep_window_.front()){
323 for (auto it = xid_rep_window_.begin(); it != xid_rep_window_.end(); it++){
325 xid_rep_window_.insert(it, xid);
329 xid_rep_window_.push_back(xid);
332 auto it = xid_rep_window_.begin();
333 for (it++; it != xid_rep_window_.end(); it++){
334 while (xid_rep_window_.front() + 1 == *it)
335 xid_rep_window_.pop_front();
339 rpcs::rpcs(in_port_t p1, size_t count)
340 : port_(p1), counting_(count), curr_counts_(count), reachable_ (true)
343 nonce_ = (unsigned int)random();
344 IF_LEVEL(2) LOG("created with nonce " << nonce_);
346 reg(rpc_const::bind, &rpcs::rpcbind, this);
347 dispatchpool_ = new ThrPool(6, false);
349 char *loss_env = getenv("RPC_LOSSY");
350 listener_ = new tcpsconn(this, port_, loss_env ? atoi(loss_env) : 0);
355 // must delete listener before dispatchpool
357 delete dispatchpool_;
362 rpcs::got_pdu(const shared_ptr<connection> & c, const string & b)
365 IF_LEVEL(1) LOG("not reachable");
369 return dispatchpool_->addJob(bind(&rpcs::dispatch, this, c, b));
373 rpcs::reg1(proc_t proc, handler *h)
376 VERIFY(procs_.count(proc) == 0);
378 VERIFY(procs_.count(proc) >= 1);
382 rpcs::updatestat(proc_t proc)
387 if(curr_counts_ == 0){
389 for (auto i = counts_.begin(); i != counts_.end(); i++)
390 LOG(hex << i->first << ":" << dec << i->second);
392 lock rwl(reply_window_m_);
394 size_t totalrep = 0, maxrep = 0;
395 for (auto clt : reply_window_) {
396 totalrep += clt.second.size();
397 if(clt.second.size() > maxrep)
398 maxrep = clt.second.size();
400 IF_LEVEL(1) LOG("REPLY WINDOW: clients " << (reply_window_.size()-1) << " total reply " <<
401 totalrep << " max per client " << maxrep);
402 curr_counts_ = counting_;
406 void rpcs::dispatch(shared_ptr<connection> c, const string & buf) {
407 unmarshall req(buf, true);
410 req.unpack_header(h);
411 proc_t proc = h.proc;
414 IF_LEVEL(1) LOG("unmarshall header failed");
418 IF_LEVEL(2) LOG("rpc " << h.xid << " (proc " << hex << proc << ", last_rep " <<
419 dec << h.xid_rep << ") from clt " << h.clt_nonce << " for srv instance " << h.srv_nonce);
422 reply_header rh{h.xid,0};
424 // is client sending to an old instance of server?
425 if(h.srv_nonce != 0 && h.srv_nonce != nonce_){
426 IF_LEVEL(2) LOG("rpc for an old server instance " << h.srv_nonce <<
427 " (current " << nonce_ << ") proc " << hex << h.proc);
428 rh.ret = rpc_const::oldsrv_failure;
435 // is RPC proc a registered procedure?
438 if(procs_.count(proc) < 1){
439 cerr << "unknown proc " << hex << proc << "." << endl;
447 rpcs::rpcstate_t stat;
451 // have i seen this client before?
453 lock rwl(reply_window_m_);
454 // if we don't know about this clt_nonce, create a cleanup object
455 if(reply_window_.find(h.clt_nonce) == reply_window_.end()){
456 VERIFY (reply_window_[h.clt_nonce].size() == 0); // create
457 reply_window_[h.clt_nonce].push_back(reply_t(-1)); // store starting reply xid
458 IF_LEVEL(2) LOG("new client " << h.clt_nonce << " xid " << h.xid <<
459 " chan " << c->channo() << ", total clients " << (reply_window_.size()-1));
463 // save the latest good connection to the client
466 if (conns_.find(h.clt_nonce) == conns_.end())
467 conns_[h.clt_nonce] = c;
468 else if(conns_[h.clt_nonce]->create_time() < c->create_time())
469 conns_[h.clt_nonce] = c;
472 stat = checkduplicate_and_update(h.clt_nonce, h.xid, h.xid_rep, b1);
474 // this client does not require at most once logic
479 case NEW: // new request
484 rh.ret = (*f)(req, rep);
485 if (rh.ret == rpc_const::unmarshal_args_failure) {
486 cerr << "failed to unmarshall the arguments. You are " <<
487 "probably calling RPC 0x" << hex << proc << " with the wrong " <<
488 "types of arguments." << endl;
496 IF_LEVEL(2) LOG("sending and saving reply of size " << b1.size() << " for rpc " <<
497 h.xid << ", proc " << hex << proc << " ret " << dec << rh.ret << ", clt " << h.clt_nonce);
499 if (h.clt_nonce > 0) {
500 // only record replies for clients that require at-most-once logic
501 add_reply(h.clt_nonce, h.xid, b1);
504 // get the latest connection to the client
508 c = conns_[h.clt_nonce];
513 case INPROGRESS: // server is working on this request
515 case DONE: // duplicate and we still have the response
518 case FORGOTTEN: // very old request and we don't have the response anymore
519 IF_LEVEL(2) LOG("very old request " << h.xid << " from " << h.clt_nonce);
520 rh.ret = rpc_const::atmostonce_failure;
527 // rpcs::dispatch calls this when an RPC request arrives.
529 // checks to see if an RPC with xid from clt_nonce has already been received.
530 // if not, remembers the request in reply_window_.
532 // deletes remembered requests with XIDs <= xid_rep; the client
533 // says it has received a reply for every RPC up through xid_rep.
534 // frees the reply_t::buf of each such request.
537 // NEW: never seen this xid before.
538 // INPROGRESS: seen this xid, and still processing it.
539 // DONE: seen this xid, previous reply returned in b.
540 // FORGOTTEN: might have seen this xid, but deleted previous reply.
542 rpcs::checkduplicate_and_update(unsigned int clt_nonce, int xid,
543 int xid_rep, string & b)
545 lock rwl(reply_window_m_);
547 list<reply_t> &l = reply_window_[clt_nonce];
549 VERIFY(l.size() > 0);
550 VERIFY(xid >= xid_rep);
552 int past_xid_rep = l.begin()->xid;
554 list<reply_t>::iterator start = l.begin(), it = ++start;
556 if (past_xid_rep < xid_rep || past_xid_rep == -1) {
557 // scan for deletion candidates
558 while (it != l.end() && it->xid < xid_rep)
561 l.begin()->xid = xid_rep;
564 if (xid < past_xid_rep && past_xid_rep != -1)
567 // skip non-deletion candidates
568 while (it != l.end() && it->xid < xid)
571 // if it's in the list it must be right here
572 if (it != l.end() && it->xid == xid) {
573 if (it->cb_present) {
574 // return information about the remembered reply
580 // remember that a new request has arrived
581 l.insert(it, reply_t(xid));
586 // rpcs::dispatch calls add_reply when it is sending a reply to an RPC,
587 // and passes the return value in b.
588 // add_reply() should remember b.
589 // free_reply_window() and checkduplicate_and_update are responsible for
590 // cleaning up the remembered values.
591 void rpcs::add_reply(unsigned int clt_nonce, int xid, const string & b) {
592 lock rwl(reply_window_m_);
593 // remember the RPC reply value
594 list<reply_t> &l = reply_window_[clt_nonce];
595 list<reply_t>::iterator it = l.begin();
596 // skip to our place in the list
597 for (it++; it != l.end() && it->xid < xid; it++);
598 // there should already be an entry, so whine if there isn't
599 if (it == l.end() || it->xid != xid) {
600 cerr << "Could not find reply struct in add_reply" << endl;
601 l.insert(it, reply_t(xid, b));
603 *it = reply_t(xid, b);
607 void rpcs::free_reply_window(void) {
608 lock rwl(reply_window_m_);
609 reply_window_.clear();
612 int rpcs::rpcbind(unsigned int &r, int) {
613 IF_LEVEL(2) LOG("called return nonce " << nonce_);
618 static sockaddr_in make_sockaddr(const string &host, const string &port);
620 static sockaddr_in make_sockaddr(const string &hostandport) {
621 auto colon = hostandport.find(':');
622 if (colon == string::npos)
623 return make_sockaddr("127.0.0.1", hostandport);
625 return make_sockaddr(hostandport.substr(0, colon), hostandport.substr(colon+1));
628 static sockaddr_in make_sockaddr(const string &host, const string &port) {
630 bzero(&dst, sizeof(dst));
631 dst.sin_family = AF_INET;
633 struct in_addr a{inet_addr(host.c_str())};
635 if(a.s_addr != INADDR_NONE)
636 dst.sin_addr.s_addr = a.s_addr;
638 struct hostent *hp = gethostbyname(host.c_str());
640 if (!hp || hp->h_length != 4 || hp->h_addrtype != AF_INET) {
641 cerr << "cannot find host name " << host << endl;
644 memcpy(&a, hp->h_addr_list[0], sizeof(in_addr_t));
645 dst.sin_addr.s_addr = a.s_addr;
647 dst.sin_port = hton((in_port_t)stoi(port));