2 // The rpcc class handles client-side RPC. Each rpcc is bound to a single RPC
3 // server. The jobs of rpcc include maintaining a connection to server, sending
4 // RPC requests and waiting for responses, retransmissions, at-most-once delivery
7 // The rpcs class handles the server side of RPC. Each rpcs handles multiple
8 // connections from different rpcc objects. The jobs of rpcs include accepting
9 // connections, dispatching requests to registered RPC handlers, at-most-once
12 // Both rpcc and rpcs use the connection class as an abstraction for the
13 // underlying communication channel. To send an RPC request/reply, one calls
14 // connection::send() which blocks until data is sent or the connection has
15 // failed (thus the caller can free the buffer when send() returns). When a
16 // request/reply is received, connection makes a callback into the corresponding
17 // rpcc or rpcs (see rpcc::got_pdu() and rpcs::got_pdu()).
19 // Thread organization:
20 // rpcc uses application threads to send RPC requests and blocks to receive the
21 // reply or error. All connections use a single PollMgr object to perform async
22 // socket IO. PollMgr creates a single thread to examine the readiness of socket
23 // file descriptors and informs the corresponding connection whenever a socket is
24 // ready to be read or written. (We use asynchronous socket IO to reduce the
25 // number of threads needed to manage these connections; without async IO, at
26 // least one thread is needed per connection to read data without blocking other
27 // activities.) Each rpcs object creates one thread for listening on the server
28 // port and a pool of threads for executing RPC requests. The thread pool allows
29 // us to control the number of threads spawned at the server (spawning one thread
30 // per request will hurt when the server faces thousands of requests).
32 // In order to delete a connection object, we must maintain a reference count.
33 // For rpcc, multiple client threads might be invoking the rpcc::call() functions
34 // and thus holding multiple references to the underlying connection object. For
35 // rpcs, multiple dispatch threads might be holding references to the same
36 // connection object. A connection object is deleted only when the underlying
37 // connection is dead and the reference count reaches zero.
39 // This version of the RPC library explicitly joins exited threads to make sure
40 // no outstanding references exist before deleting objects.
42 // To delete a rpcc object safely, the users of the library must ensure that
43 // there are no outstanding calls on the rpcc object.
45 // To delete a rpcs object safely, we do the following in sequence: 1. stop
46 // accepting new incoming connections. 2. close existing active connections. 3.
47 // delete the dispatch thread pool which involves waiting for current active RPC
48 // handlers to finish. It is interesting how a thread pool can be deleted
49 // without using thread cancellation. The trick is to inject x "poison pills" for
50 // a thread pool of x threads. Upon getting a poison pill instead of a normal
51 // task, a worker thread will exit (and thread pool destructor waits to join all
52 // x exited worker threads).
57 #include <arpa/inet.h>
58 #include <netinet/tcp.h>
64 using namespace std::chrono;
66 static sockaddr_in make_sockaddr(const string & hostandport);
68 rpcc::rpcc(const string & d) : dst_(make_sockaddr(d))
70 clt_nonce_ = (nonce_t)global->random_generator();
72 char *loss_env = getenv("RPC_LOSSY");
74 lossytest_ = atoi(loss_env);
76 IF_LEVEL(2) LOG << "cltn_nonce is " << clt_nonce_ << " lossy " << lossytest_;
79 // IMPORTANT: destruction should happen only when no external threads
80 // are blocked inside rpcc or will use rpcc in the future
86 IF_LEVEL(2) LOG << "delete nonce " << clt_nonce_ << " chan " << (chan_?(int)chan_->fd:-1);
88 VERIFY(calls_.size() == 0);
91 int rpcc::bind(milliseconds to) {
93 rpc_protocol::status ret = call_timeout(rpc_protocol::bind, to, r);
99 IF_LEVEL(2) LOG << "bind " << inet_ntoa(dst_.sin_addr) << " failed " << ret;
104 shared_ptr<rpcc> rpcc::bind_cached(const string & destination) {
105 auto client = global->get_handle(destination);
106 lock cl = lock(client->bind_m_);
107 if (!client->bind_done_) {
108 LOG_NONMEMBER << "bind(\"" << destination << "\")";
109 int ret = client->bind(milliseconds(1000));
111 LOG_NONMEMBER << "bind failure! " << destination << " " << ret;
114 LOG_NONMEMBER << "bind succeeded " << destination;
120 void rpcc::unbind_cached(const string & destination) {
121 global->erase_handle(destination);
124 // Cancel all outstanding calls
125 void rpcc::cancel(lock & m_lock) {
128 LOG << "force callers to fail";
129 for (auto & p : calls_) {
130 caller *ca = p.second;
132 IF_LEVEL(2) LOG << "force caller to fail";
136 ca->intret = rpc_protocol::cancel_failure;
140 destroy_wait_ = true;
141 while (calls_.size () > 0)
142 destroy_wait_c_.wait(m_lock);
148 int rpcc::call1(proc_id_t proc, milliseconds to, string & rep, marshall & req) {
155 if ((proc != rpc_protocol::bind.id && !bind_done_) || (proc == rpc_protocol::bind.id && bind_done_)) {
156 IF_LEVEL(1) LOG << "rpcc has not been bound to dst or binding twice";
157 return rpc_protocol::bind_failure;
161 return rpc_protocol::cancel_failure;
164 calls_[ca.xid] = &ca;
166 req.write_header(rpc_protocol::request_header{
167 ca.xid, proc, clt_nonce_, srv_nonce_, xid_rep_window_.front()
169 xid_rep = xid_rep_window_.front();
172 milliseconds curr_to = rpc::to_min;
173 auto finaldeadline = steady_clock::now() + to;
175 bool transmit = true;
176 shared_ptr<connection> ch;
180 get_latest_connection(ch);
186 if (dup_req_.isvalid() && xid_rep_done_ > dup_req_.xid) {
191 if (forgot.isvalid())
192 ch->send(forgot.buf);
195 else IF_LEVEL(1) LOG << "not reachable";
196 IF_LEVEL(2) LOG << clt_nonce_ << " just sent req proc " << std::hex << proc
197 << " xid " << std::dec << ca.xid << " clt_nonce " << clt_nonce_;
199 transmit = false; // only send once on a given channel
202 auto nextdeadline = std::min(steady_clock::now() + curr_to, finaldeadline);
208 IF_LEVEL(2) LOG << "wait";
209 if (ca.c.wait_until(cal, nextdeadline) == std::cv_status::timeout) {
210 IF_LEVEL(2) LOG << "timeout";
215 IF_LEVEL(2) LOG << "reply received";
220 if (nextdeadline >= finaldeadline)
223 // retransmit on new connection if connection is dead
224 if (!ch || ch->isdead())
229 // no locking of ca.m since only this thread changes ca.xid
231 calls_.erase(ca.xid);
232 // may need to update the xid again here, in case the
233 // packet times out before it's even sent by the channel.
234 // I don't think there's any harm in maybe doing it twice
235 update_xid_rep(ca.xid, ml);
238 destroy_wait_c_.notify_one();
241 if (ca.done && lossytest_)
244 if (!dup_req_.isvalid()) {
246 dup_req_.xid = ca.xid;
248 if (xid_rep > xid_rep_done_)
249 xid_rep_done_ = xid_rep;
254 IF_LEVEL(2) LOG << clt_nonce_ << " call done for req proc " << std::hex << proc
255 << " xid " << std::dec << ca.xid << " " << inet_ntoa(dst_.sin_addr) << ":"
256 << ntoh(dst_.sin_port) << " done? " << ca.done << " ret " << ca.intret;
258 // destruction of req automatically frees its buffer
259 return ca.done ? ca.intret : rpc_protocol::timeout_failure;
262 void rpcc::get_latest_connection(shared_ptr<connection> & ch) {
264 if (!chan_ || chan_->isdead())
265 chan_ = connection::to_dst(dst_, this, lossytest_);
271 // Runs in poll_mgr's thread as an upcall from the connection object to the
272 // rpcc. Does not call blocking RPC handlers.
273 bool rpcc::got_pdu(const shared_ptr<connection> &, const string & b) {
274 unmarshall rep(b, true);
275 rpc_protocol::reply_header h;
279 IF_LEVEL(1) LOG << "unmarshall header failed!!!";
285 update_xid_rep(h.xid, ml);
287 if (calls_.find(h.xid) == calls_.end()) {
288 IF_LEVEL(2) LOG << "xid " << h.xid << " no pending request";
291 caller *ca = calls_[h.xid];
297 if (ca->intret < 0) {
298 IF_LEVEL(2) LOG << "RPC reply error for xid " << h.xid << " intret " << ca->intret;
306 void rpcc::update_xid_rep(xid_t xid, lock & m_lock) {
308 if (xid <= xid_rep_window_.front())
311 for (auto it = xid_rep_window_.begin(); it != xid_rep_window_.end(); it++) {
313 xid_rep_window_.insert(it, xid);
317 xid_rep_window_.push_back(xid);
320 auto it = xid_rep_window_.begin();
321 for (it++; it != xid_rep_window_.end(); it++) {
322 while (xid_rep_window_.front() + 1 == *it)
323 xid_rep_window_.pop_front();
327 rpcs::rpcs(in_port_t p1) : port_(p1)
329 nonce_ = (nonce_t)global->random_generator();
330 IF_LEVEL(2) LOG << "created with nonce " << nonce_;
332 reg(rpc_protocol::bind, &rpcs::rpcbind, this);
336 char *loss_env = getenv("RPC_LOSSY");
337 listener_.reset(new connection_listener(this, port_, loss_env ? atoi(loss_env) : 0));
341 // must delete listener before dispatchpool
343 dispatchpool_ = nullptr;
346 bool rpcs::got_pdu(const shared_ptr<connection> & c, const string & b) {
348 IF_LEVEL(1) LOG << "not reachable";
352 return dispatchpool_->addJob(std::bind(&rpcs::dispatch, this, c, b));
355 void rpcs::dispatch(shared_ptr<connection> c, const string & buf) {
356 unmarshall req(buf, true);
358 rpc_protocol::request_header h;
360 proc_id_t proc = h.proc;
363 IF_LEVEL(1) LOG << "unmarshall header failed";
367 IF_LEVEL(2) LOG << "rpc " << h.xid << " (proc " << std::hex << proc << ", last_rep "
368 << std::dec << h.xid_rep << ") from clt " << h.clt_nonce << " for srv instance " << h.srv_nonce;
371 rpc_protocol::reply_header rh{h.xid,0};
373 // is client sending to an old instance of server?
374 if (h.srv_nonce != 0 && h.srv_nonce != nonce_) {
375 IF_LEVEL(2) LOG << "rpc for an old server instance " << h.srv_nonce
376 << " (current " << nonce_ << ") proc " << std::hex << h.proc;
377 rh.ret = rpc_protocol::oldsrv_failure;
378 rep.write_header(rh);
384 // is RPC proc a registered procedure?
387 if (procs_.count(proc) < 1) {
388 LOG << "unknown proc 0x" << std::hex << proc << " with h.srv_nonce=" << h.srv_nonce << ", my srv_nonce=" << nonce_;
396 // have i seen this client before?
398 lock rwl(reply_window_m_);
399 // if we don't know about this clt_nonce, create a cleanup object
400 if (reply_window_.find(h.clt_nonce) == reply_window_.end()) {
401 VERIFY (reply_window_[h.clt_nonce].size() == 0); // create
402 reply_window_[h.clt_nonce].push_back(reply_t(-1)); // store starting reply xid
403 IF_LEVEL(2) LOG << "new client " << h.clt_nonce << " xid " << h.xid
404 << " chan " << c->fd << ", total clients " << (reply_window_.size()-1);
408 // save the latest good connection to the client
411 if (conns_.find(h.clt_nonce) == conns_.end())
412 conns_[h.clt_nonce] = c;
413 else if (conns_[h.clt_nonce]->create_time < c->create_time)
414 conns_[h.clt_nonce] = c;
419 switch (check_duplicate_and_update(h.clt_nonce, h.xid, h.xid_rep, b1)) {
420 case NEW: // new request
421 rh.ret = (*f)(std::forward<unmarshall>(req), rep);
422 if (rh.ret == rpc_protocol::unmarshall_args_failure) {
423 LOG << "failed to unmarshall the arguments. You are "
424 << "probably calling RPC 0x" << std::hex << proc << " with the wrong "
425 << "types of arguments.";
430 rep.write_header(rh);
433 IF_LEVEL(2) LOG << "sending and saving reply of size " << b1.size() << " for rpc "
434 << h.xid << ", proc " << std::hex << proc << " ret " << std::dec
435 << rh.ret << ", clt " << h.clt_nonce;
437 add_reply(h.clt_nonce, h.xid, b1);
439 // get the latest connection to the client
443 c = conns_[h.clt_nonce];
448 case INPROGRESS: // server is working on this request
450 case DONE: // duplicate and we still have the response
453 case FORGOTTEN: // very old request and we don't have the response anymore
454 IF_LEVEL(2) LOG << "very old request " << h.xid << " from " << h.clt_nonce;
455 rh.ret = rpc_protocol::atmostonce_failure;
456 rep.write_header(rh);
462 // rpcs::dispatch calls this when an RPC request arrives.
464 // checks to see if an RPC with xid from clt_nonce has already been received.
465 // if not, remembers the request in reply_window_.
467 // deletes remembered requests with XIDs <= xid_rep; the client
468 // says it has received a reply for every RPC up through xid_rep.
469 // frees the reply_t::buf of each such request.
472 // NEW: never seen this xid before.
473 // INPROGRESS: seen this xid, and still processing it.
474 // DONE: seen this xid, previous reply returned in b.
475 // FORGOTTEN: might have seen this xid, but deleted previous reply.
477 rpcs::check_duplicate_and_update(nonce_t clt_nonce, xid_t xid,
478 xid_t xid_rep, string & b)
480 lock rwl(reply_window_m_);
482 list<reply_t> & l = reply_window_[clt_nonce];
484 VERIFY(l.size() > 0);
485 VERIFY(xid >= xid_rep);
487 xid_t past_xid_rep = l.begin()->xid;
489 list<reply_t>::iterator start = l.begin(), it = ++start;
491 if (past_xid_rep < xid_rep || past_xid_rep == -1) {
492 // scan for deletion candidates
493 while (it != l.end() && it->xid < xid_rep)
496 l.begin()->xid = xid_rep;
499 if (xid < past_xid_rep && past_xid_rep != -1)
502 // skip non-deletion candidates
503 while (it != l.end() && it->xid < xid)
506 // if it's in the list it must be right here
507 if (it != l.end() && it->xid == xid) {
508 if (it->cb_present) {
509 // return information about the remembered reply
515 // remember that a new request has arrived
516 l.insert(it, reply_t(xid));
521 // rpcs::dispatch calls add_reply when it is sending a reply to an RPC,
522 // and passes the return value in b.
523 // add_reply() should remember b.
524 void rpcs::add_reply(nonce_t clt_nonce, xid_t xid, const string & b) {
525 lock rwl(reply_window_m_);
526 // remember the RPC reply value
527 list<reply_t> & l = reply_window_[clt_nonce];
528 list<reply_t>::iterator it = l.begin();
529 // skip to our place in the list
530 for (it++; it != l.end() && it->xid < xid; it++);
531 // there should already be an entry, so whine if there isn't
532 if (it == l.end() || it->xid != xid) {
533 LOG << "Could not find reply struct in add_reply";
534 l.insert(it, reply_t(xid, b));
536 *it = reply_t(xid, b);
540 rpc_protocol::status rpcs::rpcbind(nonce_t & r) {
541 IF_LEVEL(2) LOG << "called return nonce " << nonce_;
546 static sockaddr_in make_sockaddr(const string & hostandport) {
547 string host = "127.0.0.1";
548 string port = hostandport;
549 auto colon = hostandport.find(':');
550 if (colon != string::npos) {
551 host = hostandport.substr(0, colon);
552 port = hostandport.substr(colon+1);
555 sockaddr_in dst = sockaddr_in(); // zero initialize
556 dst.sin_family = AF_INET;
558 struct in_addr a{inet_addr(host.c_str())};
560 if (a.s_addr != INADDR_NONE)
561 dst.sin_addr.s_addr = a.s_addr;
563 struct hostent *hp = gethostbyname(host.c_str());
565 if (!hp || hp->h_length != 4 || hp->h_addrtype != AF_INET) {
566 LOG_NONMEMBER << "cannot find host name " << host;
569 memcpy(&a, hp->h_addr_list[0], sizeof(in_addr_t));
570 dst.sin_addr.s_addr = a.s_addr;
572 dst.sin_port = hton((in_port_t)std::stoi(port));