- if lvchange -a n "$DISK" >/dev/null 2>&1 && lvchange -a ey "$DISK" >/dev/null 2>&1; then
- # If we can disable and then re-enable the VMs disk, then the
- # VM can't be running. If the lvchange -a ey succeeds, then we
- # have an exclusive lock across the cluster on enabling the
- # disk, which avoids the potential race condition of two hosts
- # starting a VM at the same time
- [ "$VERBOSE" != no ] && log_daemon_msg "Starting sysvm $VM"
- xm create "sysvms/$VM" >/dev/null
- [ "$VERBOSE" != no ] && log_end_msg $?
- RET=0
- else
- RET=1
- fi
+ RET=1
+ # To keep multiple hosts from trying to start a VM at the same
+ # time, lock VM creation at startup-time with a lock LV, since LV
+ # creation is atomic
+ if lvcreate -L 1K -n "lock_${LV}" xenvg >/dev/null 2>&1; then
+ # If we can disable the LV, then the VM isn't already running
+ # somewhere else
+ if lvchange -a n "$DISK" >/dev/null 2>&1; then
+ lvchange -a y "$DISK" >/dev/null 2>&1
+
+ [ "$VERBOSE" != no ] && log_daemon_msg "Starting sysvm $VM"
+ xm create "sysvms/$VM" >/dev/null
+ [ "$VERBOSE" != no ] && log_end_msg $?
+ RET=0
+ fi
+
+ # Regardless of whether we could get the lock or not, the
+ # lvchange -a n probably disabled the LV somewhere; be sure we
+ # clean up
+ lvchange -a y "$DISK" >/dev/null 2>&1