thread(&rsm::recovery, this).detach();
}
-void rsm::reg1(int proc, handler *h) {
+void rsm::reg1(rpc_protocol::proc_id_t proc, handler *h) {
lock ml(rsm_mutex);
procs[proc] = h;
}
// The recovery thread runs this function
-void rsm::recovery() [[noreturn]] {
+void rsm::recovery() {
bool r = true;
lock ml(rsm_mutex);
// XXX iannucci 2013/09/15 -- I don't understand whether accessing
// cfg->view_id in this manner involves a race. I suspect not.
if (join(primary, ml)) {
- LOG("recovery: joined");
+ LOG("joined");
commit_change(cfg->view_id(), ml);
} else {
ml.unlock();
}
}
vid_insync = vid_commit;
- LOG("recovery: sync vid_insync " << vid_insync);
+ LOG("sync vid_insync " << vid_insync);
if (primary == cfg->myaddr()) {
r = sync_with_backups(ml);
} else {
r = sync_with_primary(ml);
}
- LOG("recovery: sync done");
+ LOG("sync done");
// If there was a commited viewchange during the synchronization, restart
// the recovery
myvs.seqno = 1;
inviewchange = false;
}
- LOG("recovery: go to sleep " << insync << " " << inviewchange);
+ LOG("go to sleep " << insync << " " << inviewchange);
recovery_cond.wait(ml);
}
}
void rsm::commit_change(unsigned vid, lock &) {
if (vid <= vid_commit)
return;
- LOG("commit_change: new view (" << vid << ") last vs (" << last_myvs.vid << "," <<
+ LOG("new view (" << vid << ") last vs (" << last_myvs.vid << "," <<
last_myvs.seqno << ") " << primary << " insync " << insync);
vid_commit = vid;
inviewchange = true;
}
-void rsm::execute(int procno, const string & req, string & r) {
+void rsm::execute(rpc_protocol::proc_id_t procno, const string & req, string & r) {
LOG("execute");
handler *h = procs[procno];
VERIFY(h);
// number, and invokes it on all members of the replicated state
// machine.
//
-rsm_client_protocol::status rsm::client_invoke(string & r, int procno, const string & req) {
+rsm_client_protocol::status rsm::client_invoke(string & r, rpc_protocol::proc_id_t procno, const string & req) {
LOG("invoke procno 0x" << hex << procno);
lock ml(invoke_mutex);
vector<string> m;
}
}
execute(procno, req, r);
+ for (size_t i=0; i<r.size(); i++) {
+ LOG(hex << setfill('0') << setw(2) << (unsigned int)(unsigned char)r[i]);
+ }
last_myvs = vs;
return rsm_client_protocol::OK;
}
// the replica must execute requests in order (with no gaps)
// according to requests' seqno
-rsm_protocol::status rsm::invoke(int &, int proc, viewstamp vs, const string & req) {
+rsm_protocol::status rsm::invoke(int &, rpc_protocol::proc_id_t proc, viewstamp vs, const string & req) {
LOG("invoke procno 0x" << hex << proc);
lock ml(invoke_mutex);
vector<string> m;
}
//
-// RPC handler: Send back all the nodes this local knows about to client
-// so the client can switch to a different primary
-// when it existing primary fails
+// RPC handler: Responds with the list of known nodes for fall-back on a
+// primary failure
//
rsm_client_protocol::status rsm::client_members(vector<string> &r, int) {
vector<string> m;
VERIFY (c.size() > 0);
if (isamember(primary,c)) {
- LOG("set_primary: primary stays " << primary);
+ LOG("primary stays " << primary);
return;
}
for (unsigned i = 0; i < p.size(); i++) {
if (isamember(p[i], c)) {
primary = p[i];
- LOG("set_primary: primary is " << primary);
+ LOG("primary is " << primary);
return;
}
}
}
-// Testing server
-
-// Simulate partitions
+// Test RPCs -- simulate partitions and failures
-// assumes caller holds rsm_mutex
-void rsm::net_repair(bool heal, lock &) {
+void rsm::net_repair(bool heal, lock &/*rsm_mutex_lock*/) {
vector<string> m;
cfg->get_view(vid_commit, m);
for (unsigned i = 0; i < m.size(); i++) {
lock ml(rsm_mutex);
LOG("heal " << heal << " (dopartition " <<
dopartition << ", partitioned " << partitioned << ")");
- if (heal) {
+ if (heal)
net_repair(heal, ml);
- partitioned = false;
- } else {
+ else
dopartition = true;
- partitioned = false;
- }
- r = rsm_test_protocol::OK;
- return r;
+ partitioned = false;
+ return r = rsm_test_protocol::OK;
}
// simulate failure at breakpoint 1 and 2