// The rule is that a module releases its internal locks before it
// upcalls, but can keep its locks when calling down.
-#include <fstream>
-#include <iostream>
-#include <algorithm>
#include <sys/types.h>
#include <unistd.h>
+#include "types.h"
#include "handle.h"
#include "rsm.h"
-#include "tprintf.h"
-#include "lang/verify.h"
#include "rsm_client.h"
-#include "lock.h"
-rsm::rsm(std::string _first, std::string _me) :
+rsm::rsm(const string & _first, const string & _me) :
stf(0), primary(_first), insync (false), inviewchange (true), vid_commit(0),
partitioned (false), dopartition(false), break1(false), break2(false)
{
- last_myvs.vid = 0;
- last_myvs.seqno = 0;
- myvs = last_myvs;
- myvs.seqno = 1;
-
cfg = new config(_first, _me, this);
if (_first == _me) {
rsmrpc->reg(rsm_protocol::joinreq, &rsm::joinreq, this);
// tester must be on different port, otherwise it may partition itself
- testsvr = new rpcs(atoi(_me.c_str()) + 1);
+ testsvr = new rpcs((in_port_t)stoi(_me) + 1);
testsvr->reg(rsm_test_protocol::net_repair, &rsm::test_net_repairreq, this);
testsvr->reg(rsm_test_protocol::breakpoint, &rsm::breakpointreq, this);
{
lock ml(rsm_mutex);
- std::thread(&rsm::recovery, this).detach();
+ thread(&rsm::recovery, this).detach();
}
}
}
// The recovery thread runs this function
-void rsm::recovery() {
+void rsm::recovery() [[noreturn]] {
bool r = true;
lock ml(rsm_mutex);
while (!cfg->ismember(cfg->myaddr(), vid_commit)) {
// XXX iannucci 2013/09/15 -- I don't understand whether accessing
// cfg->view_id in this manner involves a race. I suspect not.
- if (join(primary)) {
- tprintf("recovery: joined\n");
- commit_change_wo(cfg->view_id());
+ if (join(primary, ml)) {
+ LOG("recovery: joined");
+ commit_change(cfg->view_id(), ml);
} else {
ml.unlock();
- std::this_thread::sleep_for(std::chrono::seconds(30)); // XXX make another node in cfg primary?
+ this_thread::sleep_for(seconds(30)); // XXX make another node in cfg primary?
ml.lock();
}
}
vid_insync = vid_commit;
- tprintf("recovery: sync vid_insync %d\n", vid_insync);
+ LOG("recovery: sync vid_insync " << vid_insync);
if (primary == cfg->myaddr()) {
- r = sync_with_backups();
+ r = sync_with_backups(ml);
} else {
- r = sync_with_primary();
+ r = sync_with_primary(ml);
}
- tprintf("recovery: sync done\n");
+ LOG("recovery: sync done");
// If there was a commited viewchange during the synchronization, restart
// the recovery
myvs.seqno = 1;
inviewchange = false;
}
- tprintf("recovery: go to sleep %d %d\n", insync, inviewchange);
+ LOG("recovery: go to sleep " << insync << " " << inviewchange);
recovery_cond.wait(ml);
}
}
-template <class A>
-std::ostream & operator<<(std::ostream &o, const std::vector<A> &d) {
- o << "[";
- for (typename std::vector<A>::const_iterator i=d.begin(); i!=d.end(); i++) {
- o << *i;
- if (i+1 != d.end())
- o << ", ";
- }
- o << "]";
- return o;
-}
-
-bool rsm::sync_with_backups() {
- adopt_lock ml(rsm_mutex);
- ml.unlock();
+bool rsm::sync_with_backups(lock & rsm_mutex_lock) {
+ rsm_mutex_lock.unlock();
{
// Make sure that the state of lock_server is stable during
// synchronization; otherwise, the primary's state may be more recent
// than replicas after the synchronization.
- lock ml(invoke_mutex);
+ lock invoke_mutex_lock(invoke_mutex);
// By acquiring and releasing the invoke_mutex once, we make sure that
// the state of lock_server will not be changed until all
// replicas are synchronized. The reason is that client_invoke arrives
// after this point of time will see inviewchange == true, and returns
// BUSY.
}
- ml.lock();
+ rsm_mutex_lock.lock();
// Start accepting synchronization request (statetransferreq) now!
insync = true;
cfg->get_view(vid_insync, backups);
backups.erase(find(backups.begin(), backups.end(), cfg->myaddr()));
- LOG("rsm::sync_with_backups " << backups);
- sync_cond.wait(ml);
+ LOG("backups " << backups);
+ sync_cond.wait(rsm_mutex_lock);
insync = false;
return true;
}
-bool rsm::sync_with_primary() {
+bool rsm::sync_with_primary(lock & rsm_mutex_lock) {
// Remember the primary of vid_insync
- std::string m = primary;
+ string m = primary;
while (vid_insync == vid_commit) {
- if (statetransfer(m))
+ if (statetransfer(m, rsm_mutex_lock))
break;
}
- return statetransferdone(m);
+ return statetransferdone(m, rsm_mutex_lock);
}
-/**
- * Call to transfer state from m to the local node.
- * Assumes that rsm_mutex is already held.
- */
-bool rsm::statetransfer(std::string m)
+//
+// Call to transfer state from m to the local node.
+// Assumes that rsm_mutex is already held.
+//
+bool rsm::statetransfer(const string & m, lock & rsm_mutex_lock)
{
rsm_protocol::transferres r;
handle h(m);
int ret = 0;
- tprintf("rsm::statetransfer: contact %s w. my last_myvs(%d,%d)\n",
- m.c_str(), last_myvs.vid, last_myvs.seqno);
+ LOG("contact " << m << " w. my last_myvs(" << last_myvs.vid << "," << last_myvs.seqno << ")");
rpcc *cl;
{
- adopt_lock ml(rsm_mutex);
- ml.unlock();
+ rsm_mutex_lock.unlock();
cl = h.safebind();
if (cl) {
ret = cl->call_timeout(rsm_protocol::transferreq, rpcc::to(1000),
r, cfg->myaddr(), last_myvs, vid_insync);
}
- ml.lock();
+ rsm_mutex_lock.lock();
}
if (cl == 0 || ret != rsm_protocol::OK) {
- tprintf("rsm::statetransfer: couldn't reach %s %lx %d\n", m.c_str(),
- (long unsigned) cl, ret);
+ LOG("couldn't reach " << m << " " << hex << cl << " " << dec << ret);
return false;
}
if (stf && last_myvs != r.last) {
stf->unmarshal_state(r.state);
}
last_myvs = r.last;
- tprintf("rsm::statetransfer transfer from %s success, vs(%d,%d)\n",
- m.c_str(), last_myvs.vid, last_myvs.seqno);
+ LOG("transfer from " << m << " success, vs(" << last_myvs.vid << "," << last_myvs.seqno << ")");
return true;
}
-bool rsm::statetransferdone(std::string m) {
- adopt_lock ml(rsm_mutex);
- ml.unlock();
+bool rsm::statetransferdone(const string & m, lock & rsm_mutex_lock) {
+ rsm_mutex_lock.unlock();
handle h(m);
rpcc *cl = h.safebind();
bool done = false;
if (cl) {
int r;
- rsm_protocol::status ret = cl->call(rsm_protocol::transferdonereq, r, cfg->myaddr(), vid_insync);
+ auto ret = (rsm_protocol::status)cl->call(rsm_protocol::transferdonereq, r, cfg->myaddr(), vid_insync);
done = (ret == rsm_protocol::OK);
}
- ml.lock();
+ rsm_mutex_lock.lock();
return done;
}
-bool rsm::join(std::string m) {
+bool rsm::join(const string & m, lock & rsm_mutex_lock) {
handle h(m);
int ret = 0;
- rsm_protocol::joinres r;
+ string log;
- tprintf("rsm::join: %s mylast (%d,%d)\n", m.c_str(), last_myvs.vid,
- last_myvs.seqno);
+ LOG("contacting " << m << " mylast (" << last_myvs.vid << "," << last_myvs.seqno << ")");
rpcc *cl;
{
- adopt_lock ml(rsm_mutex);
- ml.unlock();
+ rsm_mutex_lock.unlock();
cl = h.safebind();
if (cl != 0) {
- ret = cl->call_timeout(rsm_protocol::joinreq, rpcc::to(120000), r,
+ ret = cl->call_timeout(rsm_protocol::joinreq, rpcc::to(120000), log,
cfg->myaddr(), last_myvs);
}
- ml.lock();
+ rsm_mutex_lock.lock();
}
if (cl == 0 || ret != rsm_protocol::OK) {
- tprintf("rsm::join: couldn't reach %s %p %d\n", m.c_str(),
- cl, ret);
+ LOG("couldn't reach " << m << " " << hex << cl << " " << dec << ret);
return false;
}
- tprintf("rsm::join: succeeded %s\n", r.log.c_str());
- cfg->restore(r.log);
+ LOG("succeeded " << log);
+ cfg->restore(log);
return true;
}
-/*
- * Config informs rsm whenever it has successfully
- * completed a view change
- */
+//
+// Config informs rsm whenever it has successfully
+// completed a view change
+//
void rsm::commit_change(unsigned vid) {
lock ml(rsm_mutex);
- commit_change_wo(vid);
+ commit_change(vid, ml);
if (cfg->ismember(cfg->myaddr(), vid_commit))
breakpoint2();
}
-void rsm::commit_change_wo(unsigned vid) {
+void rsm::commit_change(unsigned vid, lock &) {
if (vid <= vid_commit)
return;
- tprintf("commit_change: new view (%d) last vs (%d,%d) %s insync %d\n",
- vid, last_myvs.vid, last_myvs.seqno, primary.c_str(), insync);
+ LOG("commit_change: new view (" << vid << ") last vs (" << last_myvs.vid << "," <<
+ last_myvs.seqno << ") " << primary << " insync " << insync);
vid_commit = vid;
inviewchange = true;
set_primary(vid);
}
-void rsm::execute(int procno, std::string req, std::string &r) {
- tprintf("execute\n");
+void rsm::execute(int procno, const string & req, string & r) {
+ LOG("execute");
handler *h = procs[procno];
VERIFY(h);
- unmarshall args(req);
+ unmarshall args(req, false);
marshall rep;
- std::string reps;
- rsm_protocol::status ret = (*h)(args, rep);
- marshall rep1;
- rep1 << ret;
- rep1 << rep.str();
- r = rep1.str();
+ auto ret = (rsm_protocol::status)(*h)(args, rep);
+ r = marshall{ret, rep.content()}.content();
}
//
// number, and invokes it on all members of the replicated state
// machine.
//
-rsm_client_protocol::status rsm::client_invoke(std::string &r, int procno, std::string req) {
- LOG("rsm::client_invoke: procno 0x" << std::hex << procno);
+rsm_client_protocol::status rsm::client_invoke(string & r, int procno, const string & req) {
+ LOG("invoke procno 0x" << hex << procno);
lock ml(invoke_mutex);
- std::vector<std::string> m;
- std::string myaddr;
+ vector<string> m;
+ string myaddr;
viewstamp vs;
{
- lock ml(rsm_mutex);
+ lock ml2(rsm_mutex);
LOG("Checking for inviewchange");
if (inviewchange)
return rsm_client_protocol::BUSY;
rpcc *cl = h.safebind();
if (!cl)
return rsm_client_protocol::BUSY;
- rsm_protocol::status ret;
- int r;
- ret = cl->call_timeout(rsm_protocol::invoke, rpcc::to(1000), r, procno, vs, req);
+ int ignored_rval;
+ auto ret = (rsm_protocol::status)cl->call_timeout(rsm_protocol::invoke, rpcc::to(1000), ignored_rval, procno, vs, req);
LOG("Invoke returned " << ret);
if (ret != rsm_protocol::OK)
return rsm_client_protocol::BUSY;
breakpoint1();
- partition1();
+ lock rsm_mutex_lock(rsm_mutex);
+ partition1(rsm_mutex_lock);
}
}
execute(procno, req, r);
// the replica must execute requests in order (with no gaps)
// according to requests' seqno
-rsm_protocol::status rsm::invoke(int &, int proc, viewstamp vs, std::string req) {
- LOG("rsm::invoke: procno 0x" << std::hex << proc);
+rsm_protocol::status rsm::invoke(int &, int proc, viewstamp vs, const string & req) {
+ LOG("invoke procno 0x" << hex << proc);
lock ml(invoke_mutex);
- std::vector<std::string> m;
- std::string myaddr;
+ vector<string> m;
+ string myaddr;
{
- lock ml(rsm_mutex);
+ lock ml2(rsm_mutex);
// check if !inviewchange
LOG("Checking for view change");
if (inviewchange)
return rsm_protocol::ERR;
myvs++;
}
- std::string r;
+ string r;
execute(proc, req, r);
last_myvs = vs;
breakpoint1();
return rsm_protocol::OK;
}
-/**
- * RPC handler: Send back the local node's state to the caller
- */
-rsm_protocol::status rsm::transferreq(rsm_protocol::transferres &r, std::string src,
+//
+// RPC handler: Send back the local node's state to the caller
+//
+rsm_protocol::status rsm::transferreq(rsm_protocol::transferres &r, const string & src,
viewstamp last, unsigned vid) {
lock ml(rsm_mutex);
- int ret = rsm_protocol::OK;
- tprintf("transferreq from %s (%d,%d) vs (%d,%d)\n", src.c_str(),
- last.vid, last.seqno, last_myvs.vid, last_myvs.seqno);
- if (!insync || vid != vid_insync) {
+ LOG("transferreq from " << src << " (" << last.vid << "," << last.seqno << ") vs (" <<
+ last_myvs.vid << "," << last_myvs.seqno << ")");
+ if (!insync || vid != vid_insync)
return rsm_protocol::BUSY;
- }
if (stf && last != last_myvs)
r.state = stf->marshal_state();
r.last = last_myvs;
- return ret;
+ return rsm_protocol::OK;
}
-/**
- * RPC handler: Inform the local node (the primary) that node m has synchronized
- * for view vid
- */
-rsm_protocol::status rsm::transferdonereq(int &, std::string m, unsigned vid) {
+//
+// RPC handler: Inform the local node (the primary) that node m has synchronized
+// for view vid
+//
+rsm_protocol::status rsm::transferdonereq(int &, const string & m, unsigned vid) {
lock ml(rsm_mutex);
if (!insync || vid != vid_insync)
return rsm_protocol::BUSY;
// a node that wants to join an RSM as a server sends a
// joinreq to the RSM's current primary; this is the
// handler for that RPC.
-rsm_protocol::status rsm::joinreq(rsm_protocol::joinres &r, std::string m, viewstamp last) {
- int ret = rsm_protocol::OK;
+rsm_protocol::status rsm::joinreq(string & log, const string & m, viewstamp last) {
+ auto ret = rsm_protocol::OK;
lock ml(rsm_mutex);
- tprintf("joinreq: src %s last (%d,%d) mylast (%d,%d)\n", m.c_str(),
- last.vid, last.seqno, last_myvs.vid, last_myvs.seqno);
+ LOG("join request from " << m << "; last=(" << last.vid << "," << last.seqno << "), mylast=(" <<
+ last_myvs.vid << "," << last_myvs.seqno << ")");
if (cfg->ismember(m, vid_commit)) {
- tprintf("joinreq: is still a member\n");
- r.log = cfg->dump();
+ LOG(m << " is still a member -- nothing to do");
+ log = cfg->dump();
} else if (cfg->myaddr() != primary) {
- tprintf("joinreq: busy\n");
+ LOG("but I, " << cfg->myaddr() << ", am not the primary, " << primary << "!");
ret = rsm_protocol::BUSY;
} else {
// We cache vid_commit to avoid adding m to a view which already contains
// m due to race condition
+ LOG("calling down to config layer");
unsigned vid_cache = vid_commit;
bool succ;
{
ml.lock();
}
if (cfg->ismember(m, cfg->view_id())) {
- r.log = cfg->dump();
- tprintf("joinreq: ret %d log %s\n:", ret, r.log.c_str());
+ log = cfg->dump();
+ LOG("ret " << ret << " log " << log);
} else {
- tprintf("joinreq: failed; proposer couldn't add %d\n", succ);
+ LOG("failed; proposer couldn't add " << succ);
ret = rsm_protocol::BUSY;
}
}
return ret;
}
-/*
- * RPC handler: Send back all the nodes this local knows about to client
- * so the client can switch to a different primary
- * when it existing primary fails
- */
-rsm_client_protocol::status rsm::client_members(std::vector<std::string> &r, int i) {
- std::vector<std::string> m;
+//
+// RPC handler: Send back all the nodes this local knows about to client
+// so the client can switch to a different primary
+// when it existing primary fails
+//
+rsm_client_protocol::status rsm::client_members(vector<string> &r, int) {
+ vector<string> m;
lock ml(rsm_mutex);
cfg->get_view(vid_commit, m);
m.push_back(primary);
r = m;
- tprintf("rsm::client_members return %s m %s\n", print_members(m).c_str(),
- primary.c_str());
+ LOG("return " << m << " m " << primary);
return rsm_client_protocol::OK;
}
// otherwise, the lowest number node of the previous view.
// caller should hold rsm_mutex
void rsm::set_primary(unsigned vid) {
- std::vector<std::string> c, p;
+ vector<string> c, p;
cfg->get_view(vid, c);
cfg->get_view(vid - 1, p);
VERIFY (c.size() > 0);
if (isamember(primary,c)) {
- tprintf("set_primary: primary stays %s\n", primary.c_str());
+ LOG("set_primary: primary stays " << primary);
return;
}
for (unsigned i = 0; i < p.size(); i++) {
if (isamember(p[i], c)) {
primary = p[i];
- tprintf("set_primary: primary is %s\n", primary.c_str());
+ LOG("set_primary: primary is " << primary);
return;
}
}
// Simulate partitions
// assumes caller holds rsm_mutex
-void rsm::net_repair_wo(bool heal) {
- std::vector<std::string> m;
+void rsm::net_repair(bool heal, lock &) {
+ vector<string> m;
cfg->get_view(vid_commit, m);
for (unsigned i = 0; i < m.size(); i++) {
if (m[i] != cfg->myaddr()) {
handle h(m[i]);
- tprintf("rsm::net_repair_wo: %s %d\n", m[i].c_str(), heal);
+ LOG("member " << m[i] << " " << heal);
if (h.safebind()) h.safebind()->set_reachable(heal);
}
}
rsmrpc->set_reachable(heal);
}
-rsm_test_protocol::status rsm::test_net_repairreq(int &r, int heal) {
+rsm_test_protocol::status rsm::test_net_repairreq(rsm_test_protocol::status &r, int heal) {
lock ml(rsm_mutex);
- tprintf("rsm::test_net_repairreq: %d (dopartition %d, partitioned %d)\n",
- heal, dopartition, partitioned);
+ LOG("heal " << heal << " (dopartition " <<
+ dopartition << ", partitioned " << partitioned << ")");
if (heal) {
- net_repair_wo(heal);
+ net_repair(heal, ml);
partitioned = false;
} else {
dopartition = true;
void rsm::breakpoint1() {
if (break1) {
- tprintf("Dying at breakpoint 1 in rsm!\n");
+ LOG("Dying at breakpoint 1 in rsm!");
exit(1);
}
}
void rsm::breakpoint2() {
if (break2) {
- tprintf("Dying at breakpoint 2 in rsm!\n");
+ LOG("Dying at breakpoint 2 in rsm!");
exit(1);
}
}
-void rsm::partition1() {
+void rsm::partition1(lock & rsm_mutex_lock) {
if (dopartition) {
- net_repair_wo(false);
+ net_repair(false, rsm_mutex_lock);
dopartition = false;
partitioned = true;
}
}
-rsm_test_protocol::status rsm::breakpointreq(int &r, int b) {
+rsm_test_protocol::status rsm::breakpointreq(rsm_test_protocol::status &r, int b) {
r = rsm_test_protocol::OK;
lock ml(rsm_mutex);
- tprintf("rsm::breakpointreq: %d\n", b);
+ LOG("breakpoint " << b);
if (b == 1) break1 = true;
else if (b == 2) break2 = true;
else if (b == 3 || b == 4) cfg->breakpoint(b);