2 // Replicated state machine implementation with a primary and several
3 // backups. The primary receives requests, assigns each a view stamp (a
4 // vid, and a sequence number) in the order of reception, and forwards
5 // them to all backups. A backup executes requests in the order that
6 // the primary stamps them and replies with an OK to the primary. The
7 // primary executes the request after it receives OKs from all backups,
8 // and sends the reply back to the client.
10 // The config module will tell the RSM about a new view. If the
11 // primary in the previous view is a member of the new view, then it
12 // will stay the primary. Otherwise, the smallest numbered node of
13 // the previous view will be the new primary. In either case, the new
14 // primary will be a node from the previous view. The configuration
15 // module constructs the sequence of views for the RSM and the RSM
16 // ensures there will be always one primary, who was a member of the
19 // When a new node starts, the recovery thread is in charge of joining
20 // the RSM. It will collect the internal RSM state from the primary;
21 // the primary asks the config module to add the new node and returns
22 // to the joining the internal RSM state (e.g., paxos log). Since
23 // there is only one primary, all joins happen in well-defined total
26 // The recovery thread also runs during a view change (e.g, when a node
27 // has failed). After a failure some of the backups could have
28 // processed a request that the primary has not, but those results are
29 // not visible to clients (since the primary responds). If the
30 // primary of the previous view is in the current view, then it will
31 // be the primary and its state is authoritive: the backups download
32 // from the primary the current state. A primary waits until all
33 // backups have downloaded the state. Once the RSM is in sync, the
34 // primary accepts requests again from clients. If one of the backups
35 // is the new primary, then its state is authoritative. In either
36 // scenario, the next view uses a node as primary that has the state
37 // resulting from processing all acknowledged client requests.
38 // Therefore, if the nodes sync up before processing the next request,
39 // the next view will have the correct state.
41 // While the RSM in a view change (i.e., a node has failed, a new view
42 // has been formed, but the sync hasn't completed), another failure
43 // could happen, which complicates a view change. During syncing the
44 // primary or backups can timeout, and initiate another Paxos round.
45 // There are 2 variables that RSM uses to keep track in what state it
47 // - inviewchange: a node has failed and the RSM is performing a view change
48 // - insync: this node is syncing its state
50 // If inviewchange is false and a node is the primary, then it can
51 // process client requests. If it is true, clients are told to retry
52 // later again. While inviewchange is true, the RSM may go through several
53 // member list changes, one by one. After a member list
54 // change completes, the nodes tries to sync. If the sync complets,
55 // the view change completes (and inviewchange is set to false). If
56 // the sync fails, the node may start another member list change
57 // (inviewchange = true and insync = false).
59 // The implementation should be used only with servers that run all
60 // requests run to completion; in particular, a request shouldn't
61 // block. If a request blocks, the backup won't respond to the
62 // primary, and the primary won't execute the request. A request may
63 // send an RPC to another host, but the RPC should be a one-way
64 // message to that host; the backup shouldn't do anything based on the
65 // response or execute after the response, because it is not
66 // guaranteed that all backup will receive the same response and
67 // execute in the same order.
69 // The implementation can be viewed as a layered system:
70 // RSM module ---- in charge of replication
71 // config module ---- in charge of view management
72 // Paxos module ---- in charge of running Paxos to agree on a value
74 // Each module has threads and internal locks. Furthermore, a thread
75 // may call down through the layers (e.g., to run Paxos's proposer).
76 // When Paxos's acceptor accepts a new value for an instance, a thread
77 // will invoke an upcall to inform higher layers of the new value.
78 // The rule is that a module releases its internal locks before it
79 // upcalls, but can keep its locks when calling down.
84 #include <sys/types.h>
89 #include "threaded_log.h"
90 #include "lang/verify.h"
91 #include "rsm_client.h"
94 rsm::rsm(std::string _first, std::string _me) :
95 stf(0), primary(_first), insync (false), inviewchange (true), vid_commit(0),
96 partitioned (false), dopartition(false), break1(false), break2(false)
103 cfg = new config(_first, _me, this);
106 // Commit the first view here. We can not have acceptor::acceptor
107 // do the commit, since at that time this->cfg is not initialized
110 rsmrpc = cfg->get_rpcs();
111 rsmrpc->reg(rsm_client_protocol::invoke, &rsm::client_invoke, this);
112 rsmrpc->reg(rsm_client_protocol::members, &rsm::client_members, this);
113 rsmrpc->reg(rsm_protocol::invoke, &rsm::invoke, this);
114 rsmrpc->reg(rsm_protocol::transferreq, &rsm::transferreq, this);
115 rsmrpc->reg(rsm_protocol::transferdonereq, &rsm::transferdonereq, this);
116 rsmrpc->reg(rsm_protocol::joinreq, &rsm::joinreq, this);
118 // tester must be on different port, otherwise it may partition itself
119 testsvr = new rpcs((uint32_t)std::stoi(_me) + 1);
120 testsvr->reg(rsm_test_protocol::net_repair, &rsm::test_net_repairreq, this);
121 testsvr->reg(rsm_test_protocol::breakpoint, &rsm::breakpointreq, this);
125 std::thread(&rsm::recovery, this).detach();
129 void rsm::reg1(int proc, handler *h) {
134 // The recovery thread runs this function
135 void rsm::recovery() [[noreturn]] {
140 while (!cfg->ismember(cfg->myaddr(), vid_commit)) {
141 // XXX iannucci 2013/09/15 -- I don't understand whether accessing
142 // cfg->view_id in this manner involves a race. I suspect not.
143 if (join(primary, ml)) {
144 LOG("recovery: joined");
145 commit_change(cfg->view_id(), ml);
148 std::this_thread::sleep_for(std::chrono::seconds(30)); // XXX make another node in cfg primary?
152 vid_insync = vid_commit;
153 LOG("recovery: sync vid_insync " << vid_insync);
154 if (primary == cfg->myaddr()) {
155 r = sync_with_backups(ml);
157 r = sync_with_primary(ml);
159 LOG("recovery: sync done");
161 // If there was a commited viewchange during the synchronization, restart
163 if (vid_insync != vid_commit)
167 myvs.vid = vid_commit;
169 inviewchange = false;
171 LOG("recovery: go to sleep " << insync << " " << inviewchange);
172 recovery_cond.wait(ml);
176 bool rsm::sync_with_backups(lock & rsm_mutex_lock) {
177 rsm_mutex_lock.unlock();
179 // Make sure that the state of lock_server is stable during
180 // synchronization; otherwise, the primary's state may be more recent
181 // than replicas after the synchronization.
182 lock invoke_mutex_lock(invoke_mutex);
183 // By acquiring and releasing the invoke_mutex once, we make sure that
184 // the state of lock_server will not be changed until all
185 // replicas are synchronized. The reason is that client_invoke arrives
186 // after this point of time will see inviewchange == true, and returns
189 rsm_mutex_lock.lock();
190 // Start accepting synchronization request (statetransferreq) now!
192 cfg->get_view(vid_insync, backups);
193 backups.erase(find(backups.begin(), backups.end(), cfg->myaddr()));
194 LOG("rsm::sync_with_backups " << make_iterator_pair(backups.begin(), backups.end()));
195 sync_cond.wait(rsm_mutex_lock);
201 bool rsm::sync_with_primary(lock & rsm_mutex_lock) {
202 // Remember the primary of vid_insync
203 std::string m = primary;
204 while (vid_insync == vid_commit) {
205 if (statetransfer(m, rsm_mutex_lock))
208 return statetransferdone(m, rsm_mutex_lock);
213 * Call to transfer state from m to the local node.
214 * Assumes that rsm_mutex is already held.
216 bool rsm::statetransfer(std::string m, lock & rsm_mutex_lock)
218 rsm_protocol::transferres r;
221 LOG("rsm::statetransfer: contact " << m << " w. my last_myvs(" << last_myvs.vid << "," << last_myvs.seqno << ")");
224 rsm_mutex_lock.unlock();
227 ret = cl->call_timeout(rsm_protocol::transferreq, rpcc::to(1000),
228 r, cfg->myaddr(), last_myvs, vid_insync);
230 rsm_mutex_lock.lock();
232 if (cl == 0 || ret != rsm_protocol::OK) {
233 LOG("rsm::statetransfer: couldn't reach " << m << " " << std::hex << cl << " " << std::dec << ret);
236 if (stf && last_myvs != r.last) {
237 stf->unmarshal_state(r.state);
240 LOG("rsm::statetransfer transfer from " << m << " success, vs(" << last_myvs.vid << "," << last_myvs.seqno << ")");
244 bool rsm::statetransferdone(std::string m, lock & rsm_mutex_lock) {
245 rsm_mutex_lock.unlock();
247 rpcc *cl = h.safebind();
251 auto ret = (rsm_protocol::status)cl->call(rsm_protocol::transferdonereq, r, cfg->myaddr(), vid_insync);
252 done = (ret == rsm_protocol::OK);
254 rsm_mutex_lock.lock();
259 bool rsm::join(std::string m, lock & rsm_mutex_lock) {
262 rsm_protocol::joinres r;
264 LOG("rsm::join: " << m << " mylast (" << last_myvs.vid << "," << last_myvs.seqno << ")");
267 rsm_mutex_lock.unlock();
270 ret = cl->call_timeout(rsm_protocol::joinreq, rpcc::to(120000), r,
271 cfg->myaddr(), last_myvs);
273 rsm_mutex_lock.lock();
276 if (cl == 0 || ret != rsm_protocol::OK) {
277 LOG("rsm::join: couldn't reach " << m << " " << std::hex << cl << " " << std::dec << ret);
280 LOG("rsm::join: succeeded " << r.log);
286 * Config informs rsm whenever it has successfully
287 * completed a view change
289 void rsm::commit_change(unsigned vid) {
291 commit_change(vid, ml);
292 if (cfg->ismember(cfg->myaddr(), vid_commit))
296 void rsm::commit_change(unsigned vid, lock &) {
297 if (vid <= vid_commit)
299 LOG("commit_change: new view (" << vid << ") last vs (" << last_myvs.vid << "," <<
300 last_myvs.seqno << ") " << primary << " insync " << insync);
304 recovery_cond.notify_one();
305 sync_cond.notify_one();
306 if (cfg->ismember(cfg->myaddr(), vid_commit))
311 void rsm::execute(int procno, std::string req, std::string &r) {
313 handler *h = procs[procno];
315 unmarshall args(req);
318 auto ret = (rsm_protocol::status)(*h)(args, rep);
326 // Clients call client_invoke to invoke a procedure on the replicated state
327 // machine: the primary receives the request, assigns it a sequence
328 // number, and invokes it on all members of the replicated state
331 rsm_client_protocol::status rsm::client_invoke(std::string &r, int procno, std::string req) {
332 LOG("rsm::client_invoke: procno 0x" << std::hex << procno);
333 lock ml(invoke_mutex);
334 std::vector<std::string> m;
339 LOG("Checking for inviewchange");
341 return rsm_client_protocol::BUSY;
342 LOG("Checking for primacy");
343 myaddr = cfg->myaddr();
344 if (primary != myaddr)
345 return rsm_client_protocol::NOTPRIMARY;
346 LOG("Assigning a viewstamp");
347 cfg->get_view(vid_commit, m);
348 // assign the RPC the next viewstamp number
353 // send an invoke RPC to all slaves in the current view with a timeout of 1 second
354 LOG("Invoking slaves");
355 for (unsigned i = 0; i < m.size(); i++) {
356 if (m[i] != myaddr) {
357 // if invoke on slave fails, return rsm_client_protocol::BUSY
359 LOG("Sending invoke to " << m[i]);
360 rpcc *cl = h.safebind();
362 return rsm_client_protocol::BUSY;
364 auto ret = (rsm_protocol::status)cl->call_timeout(rsm_protocol::invoke, rpcc::to(1000), ignored_rval, procno, vs, req);
365 LOG("Invoke returned " << ret);
366 if (ret != rsm_protocol::OK)
367 return rsm_client_protocol::BUSY;
369 lock rsm_mutex_lock(rsm_mutex);
370 partition1(rsm_mutex_lock);
373 execute(procno, req, r);
375 return rsm_client_protocol::OK;
379 // The primary calls the internal invoke at each member of the
380 // replicated state machine
382 // the replica must execute requests in order (with no gaps)
383 // according to requests' seqno
385 rsm_protocol::status rsm::invoke(int &, int proc, viewstamp vs, std::string req) {
386 LOG("rsm::invoke: procno 0x" << std::hex << proc);
387 lock ml(invoke_mutex);
388 std::vector<std::string> m;
392 // check if !inviewchange
393 LOG("Checking for view change");
395 return rsm_protocol::ERR;
397 LOG("Checking for slave status");
398 myaddr = cfg->myaddr();
399 if (primary == myaddr)
400 return rsm_protocol::ERR;
401 cfg->get_view(vid_commit, m);
402 if (find(m.begin(), m.end(), myaddr) == m.end())
403 return rsm_protocol::ERR;
404 // check sequence number
405 LOG("Checking sequence number");
407 return rsm_protocol::ERR;
411 execute(proc, req, r);
414 return rsm_protocol::OK;
418 * RPC handler: Send back the local node's state to the caller
420 rsm_protocol::status rsm::transferreq(rsm_protocol::transferres &r, std::string src,
421 viewstamp last, unsigned vid) {
423 LOG("transferreq from " << src << " (" << last.vid << "," << last.seqno << ") vs (" <<
424 last_myvs.vid << "," << last_myvs.seqno << ")");
425 if (!insync || vid != vid_insync)
426 return rsm_protocol::BUSY;
427 if (stf && last != last_myvs)
428 r.state = stf->marshal_state();
430 return rsm_protocol::OK;
434 * RPC handler: Inform the local node (the primary) that node m has synchronized
437 rsm_protocol::status rsm::transferdonereq(int &, std::string m, unsigned vid) {
439 if (!insync || vid != vid_insync)
440 return rsm_protocol::BUSY;
441 backups.erase(find(backups.begin(), backups.end(), m));
443 sync_cond.notify_one();
444 return rsm_protocol::OK;
447 // a node that wants to join an RSM as a server sends a
448 // joinreq to the RSM's current primary; this is the
449 // handler for that RPC.
450 rsm_protocol::status rsm::joinreq(rsm_protocol::joinres &r, std::string m, viewstamp last) {
451 auto ret = rsm_protocol::OK;
454 LOG("joinreq: src " << m << " last (" << last.vid << "," << last.seqno << ") mylast (" <<
455 last_myvs.vid << "," << last_myvs.seqno << ")");
456 if (cfg->ismember(m, vid_commit)) {
457 LOG("joinreq: is still a member");
459 } else if (cfg->myaddr() != primary) {
460 LOG("joinreq: busy");
461 ret = rsm_protocol::BUSY;
463 // We cache vid_commit to avoid adding m to a view which already contains
464 // m due to race condition
465 unsigned vid_cache = vid_commit;
469 succ = cfg->add(m, vid_cache);
472 if (cfg->ismember(m, cfg->view_id())) {
474 LOG("joinreq: ret " << ret << " log " << r.log);
476 LOG("joinreq: failed; proposer couldn't add " << succ);
477 ret = rsm_protocol::BUSY;
484 * RPC handler: Send back all the nodes this local knows about to client
485 * so the client can switch to a different primary
486 * when it existing primary fails
488 rsm_client_protocol::status rsm::client_members(std::vector<std::string> &r, int) {
489 std::vector<std::string> m;
491 cfg->get_view(vid_commit, m);
492 m.push_back(primary);
494 LOG("rsm::client_members return " << print_members(m) << " m " << primary);
495 return rsm_client_protocol::OK;
498 // if primary is member of new view, that node is primary
499 // otherwise, the lowest number node of the previous view.
500 // caller should hold rsm_mutex
501 void rsm::set_primary(unsigned vid) {
502 std::vector<std::string> c, p;
503 cfg->get_view(vid, c);
504 cfg->get_view(vid - 1, p);
505 VERIFY (c.size() > 0);
507 if (isamember(primary,c)) {
508 LOG("set_primary: primary stays " << primary);
512 VERIFY(p.size() > 0);
513 for (unsigned i = 0; i < p.size(); i++) {
514 if (isamember(p[i], c)) {
516 LOG("set_primary: primary is " << primary);
523 bool rsm::amiprimary() {
525 return primary == cfg->myaddr() && !inviewchange;
531 // Simulate partitions
533 // assumes caller holds rsm_mutex
534 void rsm::net_repair(bool heal, lock &) {
535 std::vector<std::string> m;
536 cfg->get_view(vid_commit, m);
537 for (unsigned i = 0; i < m.size(); i++) {
538 if (m[i] != cfg->myaddr()) {
540 LOG("rsm::net_repair: " << m[i] << " " << heal);
541 if (h.safebind()) h.safebind()->set_reachable(heal);
544 rsmrpc->set_reachable(heal);
547 rsm_test_protocol::status rsm::test_net_repairreq(rsm_test_protocol::status &r, int heal) {
549 LOG("rsm::test_net_repairreq: " << heal << " (dopartition " <<
550 dopartition << ", partitioned " << partitioned << ")");
552 net_repair(heal, ml);
558 r = rsm_test_protocol::OK;
562 // simulate failure at breakpoint 1 and 2
564 void rsm::breakpoint1() {
566 LOG("Dying at breakpoint 1 in rsm!");
571 void rsm::breakpoint2() {
573 LOG("Dying at breakpoint 2 in rsm!");
578 void rsm::partition1(lock & rsm_mutex_lock) {
580 net_repair(false, rsm_mutex_lock);
586 rsm_test_protocol::status rsm::breakpointreq(rsm_test_protocol::status &r, int b) {
587 r = rsm_test_protocol::OK;
589 LOG("rsm::breakpointreq: " << b);
590 if (b == 1) break1 = true;
591 else if (b == 2) break2 = true;
592 else if (b == 3 || b == 4) cfg->breakpoint(b);
593 else r = rsm_test_protocol::ERR;