# somewhere on the cluster
VM="$1"
- DISK="/dev/xenvg/${VM}_hda"
+ LV="${VM}_hda"
+ DISK="/dev/xenvg/$LV"
# Don't bother trying to start the VM if it's already running
- if xm list "$1" >/dev/null 2>&1; then
+ if xm list "$VM" >/dev/null 2>&1; then
return 1
fi
- if lvchange -a n "$DISK" >/dev/null 2>&1 && lvchange -a ey "$DISK" >/dev/null 2>&1; then
- # If we can disable and then re-enable the VMs disk, then the
- # VM can't be running. If the lvchange -a ey succeeds, then we
- # have an exclusive lock across the cluster on enabling the
- # disk, which avoids the potential race condition of two hosts
- # starting a VM at the same time
- [ "$VERBOSE" != no ] && log_daemon_msg "Starting sysvm $VM"
- xm create "sysvms/$VM" >/dev/null
- [ "$VERBOSE" != no ] && log_end_msg $?
- RET=0
- else
- RET=1
- fi
+ RET=1
+ # To keep multiple hosts from trying to start a VM at the same
+ # time, lock VM creation at startup-time with a lock LV, since LV
+ # creation is atomic
+ if lvcreate -L 1K -n "lock_${LV}" xenvg >/dev/null 2>&1; then
+ # If we can disable the LV, then the VM isn't already running
+ # somewhere else
+ if lvchange -a n "$DISK" >/dev/null 2>&1; then
+ lvchange -a y "$DISK" >/dev/null 2>&1
+
+ [ "$VERBOSE" != no ] && log_daemon_msg "Starting sysvm $VM"
+ xm create "sysvms/$VM" >/dev/null
+ [ "$VERBOSE" != no ] && log_end_msg $?
+ RET=0
+ fi
+
+ # Regardless of whether we could get the lock or not, the
+ # lvchange -a n probably disabled the LV somewhere; be sure we
+ # clean up
+ lvchange -a y "$DISK" >/dev/null 2>&1
- # Regardless of whether we could get the lock or not, the
- # lvchange -a n probably disabled the LV somewhere; be sure we
- # clean up
- lvchange -a y "$DISK" >/dev/null 2>&1
+ # Cleanup the lock, regardless of whether we started the LV
+ lvchange -a n "/dev/xenvg/lock_${LV}" >/dev/null 2>&1
+ lvchange -a ey "/dev/xenvg/lock_${LV}" >/dev/null 2>&1
+ lvremove -f "/dev/xenvg/lock_${LV}" >/dev/null 2>&1
+ fi
return $RET
}