rsm::rsm(std::string _first, std::string _me) :
stf(0), primary(_first), insync (false), inviewchange (true), vid_commit(0),
partitioned (false), dopartition(false), break1(false), break2(false)
{
rsm::rsm(std::string _first, std::string _me) :
stf(0), primary(_first), insync (false), inviewchange (true), vid_commit(0),
partitioned (false), dopartition(false), break1(false), break2(false)
{
- rsmrpc->reg(rsm_client_protocol::invoke, this, &rsm::client_invoke);
- rsmrpc->reg(rsm_client_protocol::members, this, &rsm::client_members);
- rsmrpc->reg(rsm_protocol::invoke, this, &rsm::invoke);
- rsmrpc->reg(rsm_protocol::transferreq, this, &rsm::transferreq);
- rsmrpc->reg(rsm_protocol::transferdonereq, this, &rsm::transferdonereq);
- rsmrpc->reg(rsm_protocol::joinreq, this, &rsm::joinreq);
+ rsmrpc->reg(rsm_client_protocol::invoke, &rsm::client_invoke, this);
+ rsmrpc->reg(rsm_client_protocol::members, &rsm::client_members, this);
+ rsmrpc->reg(rsm_protocol::invoke, &rsm::invoke, this);
+ rsmrpc->reg(rsm_protocol::transferreq, &rsm::transferreq, this);
+ rsmrpc->reg(rsm_protocol::transferdonereq, &rsm::transferdonereq, this);
+ rsmrpc->reg(rsm_protocol::joinreq, &rsm::joinreq, this);
- testsvr = new rpcs(atoi(_me.c_str()) + 1);
- testsvr->reg(rsm_test_protocol::net_repair, this, &rsm::test_net_repairreq);
- testsvr->reg(rsm_test_protocol::breakpoint, this, &rsm::breakpointreq);
+ testsvr = new rpcs((uint32_t)std::stoi(_me) + 1);
+ testsvr->reg(rsm_test_protocol::net_repair, &rsm::test_net_repairreq, this);
+ testsvr->reg(rsm_test_protocol::breakpoint, &rsm::breakpointreq, this);
- ScopedLock ml(rsm_mutex);
- VERIFY(pthread_create(&th, NULL, &recoverythread, (void *) this) == 0);
+ lock ml(rsm_mutex);
+ std::thread(&rsm::recovery, this).detach();
- if (join(primary)) {
- tprintf("recovery: joined\n");
- commit_change_wo(cfg->vid());
+ // XXX iannucci 2013/09/15 -- I don't understand whether accessing
+ // cfg->view_id in this manner involves a race. I suspect not.
+ if (join(primary, ml)) {
+ LOG("recovery: joined");
+ commit_change(cfg->view_id(), ml);
- tprintf("recovery: go to sleep %d %d\n", insync, inviewchange);
- recovery_cond.wait(rsm_mutex);
+ LOG("recovery: go to sleep " << insync << " " << inviewchange);
+ recovery_cond.wait(ml);
-template <class A>
-std::ostream & operator<<(std::ostream &o, const std::vector<A> &d) {
- o << "[";
- for (typename std::vector<A>::const_iterator i=d.begin(); i!=d.end(); i++) {
- o << *i;
- if (i+1 != d.end())
- o << ", ";
- }
- o << "]";
- return o;
-}
-
-bool rsm::sync_with_backups() {
+bool rsm::sync_with_backups(lock & rsm_mutex_lock) {
+ rsm_mutex_lock.unlock();
// synchronization; otherwise, the primary's state may be more recent
// than replicas after the synchronization.
// synchronization; otherwise, the primary's state may be more recent
// than replicas after the synchronization.
// replicas are synchronized. The reason is that client_invoke arrives
// after this point of time will see inviewchange == true, and returns
// BUSY.
}
// replicas are synchronized. The reason is that client_invoke arrives
// after this point of time will see inviewchange == true, and returns
// BUSY.
}
- LOG("rsm::sync_with_backups " << backups);
- sync_cond.wait(rsm_mutex);
+ LOG("rsm::sync_with_backups " << make_iterator_pair(backups.begin(), backups.end()));
+ sync_cond.wait(rsm_mutex_lock);
- int ret;
- tprintf("rsm::statetransfer: contact %s w. my last_myvs(%d,%d)\n",
- m.c_str(), last_myvs.vid, last_myvs.seqno);
+ int ret = 0;
+ LOG("rsm::statetransfer: contact " << m << " w. my last_myvs(" << last_myvs.vid << "," << last_myvs.seqno << ")");
- ret = cl->call(rsm_protocol::transferreq, cfg->myaddr(),
- last_myvs, vid_insync, r, rpcc::to(1000));
+ ret = cl->call_timeout(rsm_protocol::transferreq, rpcc::to(1000),
+ r, cfg->myaddr(), last_myvs, vid_insync);
- tprintf("rsm::statetransfer: couldn't reach %s %lx %d\n", m.c_str(),
- (long unsigned) cl, ret);
+ LOG("rsm::statetransfer: couldn't reach " << m << " " << std::hex << cl << " " << std::dec << ret);
- tprintf("rsm::statetransfer transfer from %s success, vs(%d,%d)\n",
- m.c_str(), last_myvs.vid, last_myvs.seqno);
+ LOG("rsm::statetransfer transfer from " << m << " success, vs(" << last_myvs.vid << "," << last_myvs.seqno << ")");
-bool rsm::statetransferdone(std::string m) {
- ScopedUnlock su(rsm_mutex);
+bool rsm::statetransferdone(std::string m, lock & rsm_mutex_lock) {
+ rsm_mutex_lock.unlock();
- if (!cl)
- return false;
- int r;
- rsm_protocol::status ret = cl->call(rsm_protocol::transferdonereq, cfg->myaddr(), vid_insync, r);
- if (ret != rsm_protocol::OK)
- return false;
- return true;
+ bool done = false;
+ if (cl) {
+ int r;
+ auto ret = (rsm_protocol::status)cl->call(rsm_protocol::transferdonereq, r, cfg->myaddr(), vid_insync);
+ done = (ret == rsm_protocol::OK);
+ }
+ rsm_mutex_lock.lock();
+ return done;
- tprintf("rsm::join: %s mylast (%d,%d)\n", m.c_str(), last_myvs.vid,
- last_myvs.seqno);
+ LOG("rsm::join: " << m << " mylast (" << last_myvs.vid << "," << last_myvs.seqno << ")");
- ret = cl->call(rsm_protocol::joinreq, cfg->myaddr(), last_myvs,
- r, rpcc::to(120000));
+ ret = cl->call_timeout(rsm_protocol::joinreq, rpcc::to(120000), r,
+ cfg->myaddr(), last_myvs);
- tprintf("commit_change: new view (%d) last vs (%d,%d) %s insync %d\n",
- vid, last_myvs.vid, last_myvs.seqno, primary.c_str(), insync);
+ LOG("commit_change: new view (" << vid << ") last vs (" << last_myvs.vid << "," <<
+ last_myvs.seqno << ") " << primary << " insync " << insync);
if (cfg->ismember(cfg->myaddr(), vid_commit))
breakpoint2();
}
void rsm::execute(int procno, std::string req, std::string &r) {
if (cfg->ismember(cfg->myaddr(), vid_commit))
breakpoint2();
}
void rsm::execute(int procno, std::string req, std::string &r) {
- rsm_protocol::status ret;
- int r;
- ret = cl->call(rsm_protocol::invoke, procno, vs, req, r, rpcc::to(1000));
+ int ignored_rval;
+ auto ret = (rsm_protocol::status)cl->call_timeout(rsm_protocol::invoke, rpcc::to(1000), ignored_rval, procno, vs, req);
LOG("Invoke returned " << ret);
if (ret != rsm_protocol::OK)
return rsm_client_protocol::BUSY;
breakpoint1();
LOG("Invoke returned " << ret);
if (ret != rsm_protocol::OK)
return rsm_client_protocol::BUSY;
breakpoint1();
-rsm_protocol::status rsm::invoke(int proc, viewstamp vs, std::string req, int &dummy) {
+rsm_protocol::status rsm::invoke(int &, int proc, viewstamp vs, std::string req) {
if (find(m.begin(), m.end(), myaddr) == m.end())
return rsm_protocol::ERR;
// check sequence number
if (find(m.begin(), m.end(), myaddr) == m.end())
return rsm_protocol::ERR;
// check sequence number
-rsm_protocol::status rsm::transferreq(std::string src, viewstamp last, unsigned vid,
- rsm_protocol::transferres &r) {
- ScopedLock ml(rsm_mutex);
- int ret = rsm_protocol::OK;
- tprintf("transferreq from %s (%d,%d) vs (%d,%d)\n", src.c_str(),
- last.vid, last.seqno, last_myvs.vid, last_myvs.seqno);
- if (!insync || vid != vid_insync) {
+rsm_protocol::status rsm::transferreq(rsm_protocol::transferres &r, std::string src,
+ viewstamp last, unsigned vid) {
+ lock ml(rsm_mutex);
+ LOG("transferreq from " << src << " (" << last.vid << "," << last.seqno << ") vs (" <<
+ last_myvs.vid << "," << last_myvs.seqno << ")");
+ if (!insync || vid != vid_insync)
-rsm_protocol::status rsm::transferdonereq(std::string m, unsigned vid, int &) {
- ScopedLock ml(rsm_mutex);
+rsm_protocol::status rsm::transferdonereq(int &, std::string m, unsigned vid) {
+ lock ml(rsm_mutex);
if (!insync || vid != vid_insync)
return rsm_protocol::BUSY;
backups.erase(find(backups.begin(), backups.end(), m));
if (backups.empty())
if (!insync || vid != vid_insync)
return rsm_protocol::BUSY;
backups.erase(find(backups.begin(), backups.end(), m));
if (backups.empty())
return rsm_protocol::OK;
}
// a node that wants to join an RSM as a server sends a
// joinreq to the RSM's current primary; this is the
// handler for that RPC.
return rsm_protocol::OK;
}
// a node that wants to join an RSM as a server sends a
// joinreq to the RSM's current primary; this is the
// handler for that RPC.
-rsm_protocol::status rsm::joinreq(std::string m, viewstamp last, rsm_protocol::joinres &r) {
- int ret = rsm_protocol::OK;
+rsm_protocol::status rsm::joinreq(rsm_protocol::joinres &r, std::string m, viewstamp last) {
+ auto ret = rsm_protocol::OK;
- ScopedLock ml(rsm_mutex);
- tprintf("joinreq: src %s last (%d,%d) mylast (%d,%d)\n", m.c_str(),
- last.vid, last.seqno, last_myvs.vid, last_myvs.seqno);
+ lock ml(rsm_mutex);
+ LOG("joinreq: src " << m << " last (" << last.vid << "," << last.seqno << ") mylast (" <<
+ last_myvs.vid << "," << last_myvs.seqno << ")");
// otherwise, the lowest number node of the previous view.
// caller should hold rsm_mutex
void rsm::set_primary(unsigned vid) {
// otherwise, the lowest number node of the previous view.
// caller should hold rsm_mutex
void rsm::set_primary(unsigned vid) {
- std::vector<std::string> c = cfg->get_view(vid);
- std::vector<std::string> p = cfg->get_view(vid - 1);
+ std::vector<std::string> c, p;
+ cfg->get_view(vid, c);
+ cfg->get_view(vid - 1, p);
for (unsigned i = 0; i < p.size(); i++) {
if (isamember(p[i], c)) {
primary = p[i];
for (unsigned i = 0; i < p.size(); i++) {
if (isamember(p[i], c)) {
primary = p[i];
-rsm_test_protocol::status rsm::test_net_repairreq(int heal, int &r) {
- ScopedLock ml(rsm_mutex);
- tprintf("rsm::test_net_repairreq: %d (dopartition %d, partitioned %d)\n",
- heal, dopartition, partitioned);
+rsm_test_protocol::status rsm::test_net_repairreq(rsm_test_protocol::status &r, int heal) {
+ lock ml(rsm_mutex);
+ LOG("rsm::test_net_repairreq: " << heal << " (dopartition " <<
+ dopartition << ", partitioned " << partitioned << ")");
if (b == 1) break1 = true;
else if (b == 2) break2 = true;
else if (b == 3 || b == 4) cfg->breakpoint(b);
if (b == 1) break1 = true;
else if (b == 2) break2 = true;
else if (b == 3 || b == 4) cfg->breakpoint(b);