}
// The recovery thread runs this function
-void rsm::recovery() [[noreturn]] {
+void rsm::recovery() {
bool r = true;
lock ml(rsm_mutex);
}
}
execute(procno, req, r);
+ for (size_t i=0; i<r.size(); i++) {
+ LOG(hex << setfill('0') << setw(2) << (unsigned int)(unsigned char)r[i]);
+ }
last_myvs = vs;
return rsm_client_protocol::OK;
}
}
//
-// RPC handler: Send back all the nodes this local knows about to client
-// so the client can switch to a different primary
-// when it existing primary fails
+// RPC handler: Responds with the list of known nodes for fall-back on a
+// primary failure
//
rsm_client_protocol::status rsm::client_members(vector<string> &r, int) {
vector<string> m;
}
-// Testing server
-
-// Simulate partitions
+// Test RPCs -- simulate partitions and failures
-// assumes caller holds rsm_mutex
-void rsm::net_repair(bool heal, lock &) {
+void rsm::net_repair(bool heal, lock &/*rsm_mutex_lock*/) {
vector<string> m;
cfg->get_view(vid_commit, m);
for (unsigned i = 0; i < m.size(); i++) {
lock ml(rsm_mutex);
LOG("heal " << heal << " (dopartition " <<
dopartition << ", partitioned " << partitioned << ")");
- if (heal) {
+ if (heal)
net_repair(heal, ml);
- partitioned = false;
- } else {
+ else
dopartition = true;
- partitioned = false;
- }
- r = rsm_test_protocol::OK;
- return r;
+ partitioned = false;
+ return r = rsm_test_protocol::OK;
}
// simulate failure at breakpoint 1 and 2