2 // Replicated state machine implementation with a primary and several
3 // backups. The primary receives requests, assigns each a view stamp (a
4 // vid, and a sequence number) in the order of reception, and forwards
5 // them to all backups. A backup executes requests in the order that
6 // the primary stamps them and replies with an OK to the primary. The
7 // primary executes the request after it receives OKs from all backups,
8 // and sends the reply back to the client.
10 // The config module will tell the RSM about a new view. If the
11 // primary in the previous view is a member of the new view, then it
12 // will stay the primary. Otherwise, the smallest numbered node of
13 // the previous view will be the new primary. In either case, the new
14 // primary will be a node from the previous view. The configuration
15 // module constructs the sequence of views for the RSM and the RSM
16 // ensures there will be always one primary, who was a member of the
19 // When a new node starts, the recovery thread is in charge of joining
20 // the RSM. It will collect the internal RSM state from the primary;
21 // the primary asks the config module to add the new node and returns
22 // to the joining the internal RSM state (e.g., paxos log). Since
23 // there is only one primary, all joins happen in well-defined total
26 // The recovery thread also runs during a view change (e.g, when a node
27 // has failed). After a failure some of the backups could have
28 // processed a request that the primary has not, but those results are
29 // not visible to clients (since the primary responds). If the
30 // primary of the previous view is in the current view, then it will
31 // be the primary and its state is authoritive: the backups download
32 // from the primary the current state. A primary waits until all
33 // backups have downloaded the state. Once the RSM is in sync, the
34 // primary accepts requests again from clients. If one of the backups
35 // is the new primary, then its state is authoritative. In either
36 // scenario, the next view uses a node as primary that has the state
37 // resulting from processing all acknowledged client requests.
38 // Therefore, if the nodes sync up before processing the next request,
39 // the next view will have the correct state.
41 // While the RSM in a view change (i.e., a node has failed, a new view
42 // has been formed, but the sync hasn't completed), another failure
43 // could happen, which complicates a view change. During syncing the
44 // primary or backups can timeout, and initiate another Paxos round.
45 // There are 2 variables that RSM uses to keep track in what state it
47 // - inviewchange: a node has failed and the RSM is performing a view change
48 // - insync: this node is syncing its state
50 // If inviewchange is false and a node is the primary, then it can
51 // process client requests. If it is true, clients are told to retry
52 // later again. While inviewchange is true, the RSM may go through several
53 // member list changes, one by one. After a member list
54 // change completes, the nodes tries to sync. If the sync complets,
55 // the view change completes (and inviewchange is set to false). If
56 // the sync fails, the node may start another member list change
57 // (inviewchange = true and insync = false).
59 // The implementation should be used only with servers that run all
60 // requests run to completion; in particular, a request shouldn't
61 // block. If a request blocks, the backup won't respond to the
62 // primary, and the primary won't execute the request. A request may
63 // send an RPC to another host, but the RPC should be a one-way
64 // message to that host; the backup shouldn't do anything based on the
65 // response or execute after the response, because it is not
66 // guaranteed that all backup will receive the same response and
67 // execute in the same order.
69 // The implementation can be viewed as a layered system:
70 // RSM module ---- in charge of replication
71 // config module ---- in charge of view management
72 // Paxos module ---- in charge of running Paxos to agree on a value
74 // Each module has threads and internal locks. Furthermore, a thread
75 // may call down through the layers (e.g., to run Paxos's proposer).
76 // When Paxos's acceptor accepts a new value for an instance, a thread
77 // will invoke an upcall to inform higher layers of the new value.
78 // The rule is that a module releases its internal locks before it
79 // upcalls, but can keep its locks when calling down.
84 #include <sys/types.h>
90 #include "lang/verify.h"
91 #include "rsm_client.h"
93 static void *recoverythread(void *x) {
99 rsm::rsm(std::string _first, std::string _me) :
100 stf(0), primary(_first), insync (false), inviewchange (true), vid_commit(0),
101 partitioned (false), dopartition(false), break1(false), break2(false)
110 cfg = new config(_first, _me, this);
113 // Commit the first view here. We can not have acceptor::acceptor
114 // do the commit, since at that time this->cfg is not initialized
117 rsmrpc = cfg->get_rpcs();
118 rsmrpc->reg(rsm_client_protocol::invoke, this, &rsm::client_invoke);
119 rsmrpc->reg(rsm_client_protocol::members, this, &rsm::client_members);
120 rsmrpc->reg(rsm_protocol::invoke, this, &rsm::invoke);
121 rsmrpc->reg(rsm_protocol::transferreq, this, &rsm::transferreq);
122 rsmrpc->reg(rsm_protocol::transferdonereq, this, &rsm::transferdonereq);
123 rsmrpc->reg(rsm_protocol::joinreq, this, &rsm::joinreq);
125 // tester must be on different port, otherwise it may partition itself
126 testsvr = new rpcs(atoi(_me.c_str()) + 1);
127 testsvr->reg(rsm_test_protocol::net_repair, this, &rsm::test_net_repairreq);
128 testsvr->reg(rsm_test_protocol::breakpoint, this, &rsm::breakpointreq);
131 ScopedLock ml(rsm_mutex);
132 VERIFY(pthread_create(&th, NULL, &recoverythread, (void *) this) == 0);
136 void rsm::reg1(int proc, handler *h) {
137 ScopedLock ml(rsm_mutex);
141 // The recovery thread runs this function
142 void rsm::recovery() {
144 ScopedLock ml(rsm_mutex);
147 while (!cfg->ismember(cfg->myaddr(), vid_commit)) {
149 tprintf("recovery: joined\n");
150 commit_change_wo(cfg->vid());
152 ScopedUnlock su(rsm_mutex);
153 sleep (30); // XXX make another node in cfg primary?
156 vid_insync = vid_commit;
157 tprintf("recovery: sync vid_insync %d\n", vid_insync);
158 if (primary == cfg->myaddr()) {
159 r = sync_with_backups();
161 r = sync_with_primary();
163 tprintf("recovery: sync done\n");
165 // If there was a commited viewchange during the synchronization, restart
167 if (vid_insync != vid_commit)
171 myvs.vid = vid_commit;
173 inviewchange = false;
175 tprintf("recovery: go to sleep %d %d\n", insync, inviewchange);
176 recovery_cond.wait(rsm_mutex);
181 std::ostream & operator<<(std::ostream &o, const std::vector<A> &d) {
183 for (typename std::vector<A>::const_iterator i=d.begin(); i!=d.end(); i++) {
192 bool rsm::sync_with_backups() {
194 ScopedUnlock su(rsm_mutex);
195 // Make sure that the state of lock_server_cache_rsm is stable during
196 // synchronization; otherwise, the primary's state may be more recent
197 // than replicas after the synchronization.
198 ScopedLock ml(invoke_mutex);
199 // By acquiring and releasing the invoke_mutex once, we make sure that
200 // the state of lock_server_cache_rsm will not be changed until all
201 // replicas are synchronized. The reason is that client_invoke arrives
202 // after this point of time will see inviewchange == true, and returns
205 // Start accepting synchronization request (statetransferreq) now!
207 backups = std::vector<std::string>(cfg->get_view(vid_insync));
208 backups.erase(find(backups.begin(), backups.end(), cfg->myaddr()));
209 LOG("rsm::sync_with_backups " << backups);
210 sync_cond.wait(rsm_mutex);
216 bool rsm::sync_with_primary() {
217 // Remember the primary of vid_insync
218 std::string m = primary;
219 while (vid_insync == vid_commit) {
220 if (statetransfer(m))
223 return statetransferdone(m);
228 * Call to transfer state from m to the local node.
229 * Assumes that rsm_mutex is already held.
231 bool rsm::statetransfer(std::string m)
233 rsm_protocol::transferres r;
236 tprintf("rsm::statetransfer: contact %s w. my last_myvs(%d,%d)\n",
237 m.c_str(), last_myvs.vid, last_myvs.seqno);
240 ScopedUnlock su(rsm_mutex);
243 ret = cl->call(rsm_protocol::transferreq, cfg->myaddr(),
244 last_myvs, vid_insync, r, rpcc::to(1000));
247 if (cl == 0 || ret != rsm_protocol::OK) {
248 tprintf("rsm::statetransfer: couldn't reach %s %lx %d\n", m.c_str(),
249 (long unsigned) cl, ret);
252 if (stf && last_myvs != r.last) {
253 stf->unmarshal_state(r.state);
256 tprintf("rsm::statetransfer transfer from %s success, vs(%d,%d)\n",
257 m.c_str(), last_myvs.vid, last_myvs.seqno);
261 bool rsm::statetransferdone(std::string m) {
262 ScopedUnlock su(rsm_mutex);
264 rpcc *cl = h.safebind();
268 rsm_protocol::status ret = cl->call(rsm_protocol::transferdonereq, cfg->myaddr(), vid_insync, r);
269 if (ret != rsm_protocol::OK)
275 bool rsm::join(std::string m) {
278 rsm_protocol::joinres r;
280 tprintf("rsm::join: %s mylast (%d,%d)\n", m.c_str(), last_myvs.vid,
284 ScopedUnlock su(rsm_mutex);
287 ret = cl->call(rsm_protocol::joinreq, cfg->myaddr(), last_myvs,
288 r, rpcc::to(120000));
292 if (cl == 0 || ret != rsm_protocol::OK) {
293 tprintf("rsm::join: couldn't reach %s %p %d\n", m.c_str(),
297 tprintf("rsm::join: succeeded %s\n", r.log.c_str());
303 * Config informs rsm whenever it has successfully
304 * completed a view change
306 void rsm::commit_change(unsigned vid) {
307 ScopedLock ml(rsm_mutex);
308 commit_change_wo(vid);
309 if (cfg->ismember(cfg->myaddr(), vid_commit))
313 void rsm::commit_change_wo(unsigned vid) {
314 if (vid <= vid_commit)
316 tprintf("commit_change: new view (%d) last vs (%d,%d) %s insync %d\n",
317 vid, last_myvs.vid, last_myvs.seqno, primary.c_str(), insync);
321 recovery_cond.signal();
323 if (cfg->ismember(cfg->myaddr(), vid_commit))
328 void rsm::execute(int procno, std::string req, std::string &r) {
329 tprintf("execute\n");
330 handler *h = procs[procno];
332 unmarshall args(req);
335 rsm_protocol::status ret = h->fn(args, rep);
343 // Clients call client_invoke to invoke a procedure on the replicated state
344 // machine: the primary receives the request, assigns it a sequence
345 // number, and invokes it on all members of the replicated state
348 rsm_client_protocol::status rsm::client_invoke(int procno, std::string req, std::string &r) {
349 LOG("rsm::client_invoke: procno 0x" << std::hex << procno);
350 ScopedLock ml(invoke_mutex);
351 std::vector<std::string> m;
355 ScopedLock ml(rsm_mutex);
356 LOG("Checking for inviewchange");
358 return rsm_client_protocol::BUSY;
359 LOG("Checking for primacy");
360 myaddr = cfg->myaddr();
361 if (primary != myaddr)
362 return rsm_client_protocol::NOTPRIMARY;
363 LOG("Assigning a viewstamp");
364 m = cfg->get_view(vid_commit);
365 // assign the RPC the next viewstamp number
370 // send an invoke RPC to all slaves in the current view with a timeout of 1 second
371 LOG("Invoking slaves");
372 for (unsigned i = 0; i < m.size(); i++) {
373 if (m[i] != myaddr) {
374 // if invoke on slave fails, return rsm_client_protocol::BUSY
376 LOG("Sending invoke to " << m[i]);
377 rpcc *cl = h.safebind();
379 return rsm_client_protocol::BUSY;
380 rsm_protocol::status ret;
382 ret = cl->call(rsm_protocol::invoke, procno, vs, req, r, rpcc::to(1000));
383 LOG("Invoke returned " << ret);
384 if (ret != rsm_protocol::OK)
385 return rsm_client_protocol::BUSY;
390 execute(procno, req, r);
392 return rsm_client_protocol::OK;
396 // The primary calls the internal invoke at each member of the
397 // replicated state machine
399 // the replica must execute requests in order (with no gaps)
400 // according to requests' seqno
402 rsm_protocol::status rsm::invoke(int proc, viewstamp vs, std::string req, int &dummy) {
403 LOG("rsm::invoke: procno 0x" << std::hex << proc);
404 ScopedLock ml(invoke_mutex);
405 std::vector<std::string> m;
408 ScopedLock ml(rsm_mutex);
409 // check if !inviewchange
410 LOG("Checking for view change");
412 return rsm_protocol::ERR;
414 LOG("Checking for slave status");
415 myaddr = cfg->myaddr();
416 if (primary == myaddr)
417 return rsm_protocol::ERR;
418 m = cfg->get_view(vid_commit);
419 if (find(m.begin(), m.end(), myaddr) == m.end())
420 return rsm_protocol::ERR;
421 // check sequence number
422 LOG("Checking sequence number");
424 return rsm_protocol::ERR;
428 execute(proc, req, r);
431 return rsm_protocol::OK;
435 * RPC handler: Send back the local node's state to the caller
437 rsm_protocol::status rsm::transferreq(std::string src, viewstamp last, unsigned vid,
438 rsm_protocol::transferres &r) {
439 ScopedLock ml(rsm_mutex);
440 int ret = rsm_protocol::OK;
441 tprintf("transferreq from %s (%d,%d) vs (%d,%d)\n", src.c_str(),
442 last.vid, last.seqno, last_myvs.vid, last_myvs.seqno);
443 if (!insync || vid != vid_insync) {
444 return rsm_protocol::BUSY;
446 if (stf && last != last_myvs)
447 r.state = stf->marshal_state();
453 * RPC handler: Inform the local node (the primary) that node m has synchronized
456 rsm_protocol::status rsm::transferdonereq(std::string m, unsigned vid, int &) {
457 ScopedLock ml(rsm_mutex);
458 if (!insync || vid != vid_insync)
459 return rsm_protocol::BUSY;
460 backups.erase(find(backups.begin(), backups.end(), m));
463 return rsm_protocol::OK;
466 // a node that wants to join an RSM as a server sends a
467 // joinreq to the RSM's current primary; this is the
468 // handler for that RPC.
469 rsm_protocol::status rsm::joinreq(std::string m, viewstamp last, rsm_protocol::joinres &r) {
470 int ret = rsm_protocol::OK;
472 ScopedLock ml(rsm_mutex);
473 tprintf("joinreq: src %s last (%d,%d) mylast (%d,%d)\n", m.c_str(),
474 last.vid, last.seqno, last_myvs.vid, last_myvs.seqno);
475 if (cfg->ismember(m, vid_commit)) {
476 tprintf("joinreq: is still a member\n");
478 } else if (cfg->myaddr() != primary) {
479 tprintf("joinreq: busy\n");
480 ret = rsm_protocol::BUSY;
482 // We cache vid_commit to avoid adding m to a view which already contains
483 // m due to race condition
484 unsigned vid_cache = vid_commit;
487 ScopedUnlock su(rsm_mutex);
488 succ = cfg->add(m, vid_cache);
490 if (cfg->ismember(m, cfg->vid())) {
492 tprintf("joinreq: ret %d log %s\n:", ret, r.log.c_str());
494 tprintf("joinreq: failed; proposer couldn't add %d\n", succ);
495 ret = rsm_protocol::BUSY;
502 * RPC handler: Send back all the nodes this local knows about to client
503 * so the client can switch to a different primary
504 * when it existing primary fails
506 rsm_client_protocol::status rsm::client_members(int i, std::vector<std::string> &r) {
507 std::vector<std::string> m;
508 ScopedLock ml(rsm_mutex);
509 m = cfg->get_view(vid_commit);
510 m.push_back(primary);
512 tprintf("rsm::client_members return %s m %s\n", print_members(m).c_str(),
514 return rsm_client_protocol::OK;
517 // if primary is member of new view, that node is primary
518 // otherwise, the lowest number node of the previous view.
519 // caller should hold rsm_mutex
520 void rsm::set_primary(unsigned vid) {
521 std::vector<std::string> c = cfg->get_view(vid);
522 std::vector<std::string> p = cfg->get_view(vid - 1);
523 VERIFY (c.size() > 0);
525 if (isamember(primary,c)) {
526 tprintf("set_primary: primary stays %s\n", primary.c_str());
530 VERIFY(p.size() > 0);
531 for (unsigned i = 0; i < p.size(); i++) {
532 if (isamember(p[i], c)) {
534 tprintf("set_primary: primary is %s\n", primary.c_str());
541 bool rsm::amiprimary() {
542 ScopedLock ml(rsm_mutex);
543 return primary == cfg->myaddr() && !inviewchange;
549 // Simulate partitions
551 // assumes caller holds rsm_mutex
552 void rsm::net_repair_wo(bool heal) {
553 std::vector<std::string> m;
554 m = cfg->get_view(vid_commit);
555 for (unsigned i = 0; i < m.size(); i++) {
556 if (m[i] != cfg->myaddr()) {
558 tprintf("rsm::net_repair_wo: %s %d\n", m[i].c_str(), heal);
559 if (h.safebind()) h.safebind()->set_reachable(heal);
562 rsmrpc->set_reachable(heal);
565 rsm_test_protocol::status rsm::test_net_repairreq(int heal, int &r) {
566 ScopedLock ml(rsm_mutex);
567 tprintf("rsm::test_net_repairreq: %d (dopartition %d, partitioned %d)\n",
568 heal, dopartition, partitioned);
576 r = rsm_test_protocol::OK;
580 // simulate failure at breakpoint 1 and 2
582 void rsm::breakpoint1() {
584 tprintf("Dying at breakpoint 1 in rsm!\n");
589 void rsm::breakpoint2() {
591 tprintf("Dying at breakpoint 2 in rsm!\n");
596 void rsm::partition1() {
598 net_repair_wo(false);
604 rsm_test_protocol::status rsm::breakpointreq(int b, int &r) {
605 r = rsm_test_protocol::OK;
606 ScopedLock ml(rsm_mutex);
607 tprintf("rsm::breakpointreq: %d\n", b);
608 if (b == 1) break1 = true;
609 else if (b == 2) break2 = true;
610 else if (b == 3 || b == 4) cfg->breakpoint(b);
611 else r = rsm_test_protocol::ERR;