X-Git-Url: http://xvm.mit.edu/gitweb/invirt/packages/invirt-base.git/blobdiff_plain/c4e45141f8612ae5977869c4ea6babb777cf6bd6..bed209caf968dd992a7f849f8b982b4b46a0f876:/files/usr/share/python-support/sipb-xen-base/invirt/config.py diff --git a/files/usr/share/python-support/sipb-xen-base/invirt/config.py b/files/usr/share/python-support/sipb-xen-base/invirt/config.py index 29f7ce5..8648c70 100644 --- a/files/usr/share/python-support/sipb-xen-base/invirt/config.py +++ b/files/usr/share/python-support/sipb-xen-base/invirt/config.py @@ -1,5 +1,6 @@ import json from invirt.common import * +from os import rename from os.path import getmtime default_src_path = '/etc/invirt/master.yaml' @@ -15,40 +16,69 @@ def load(src_path = default_src_path, instead from the original YAML file at src_path and regenerate the cache. I assume I have the permissions to write to the cache directory. """ - # Namespace container for various state variables, so that they can be - # updated by closures. + + # Namespace container for state variables, so that they can be updated by + # closures. ns = struct() if force_refresh: - ns.do_refresh = True + do_refresh = True else: src_mtime = getmtime(src_path) - try: cache_mtime = getmtime(cache_path) - except OSError: ns.do_refresh = True - else: ns.do_refresh = src_mtime > cache_mtime + try: cache_mtime = getmtime(cache_path) + except OSError: do_refresh = True + else: do_refresh = src_mtime + 1 >= cache_mtime + + # We chose not to simply say + # + # do_refresh = src_mtime >= cache_time + # + # because between the getmtime(src_path) and the time the cache is + # rewritten, the master configuration may have been updated, so future + # checks here would find a cache with a newer mtime than the master + # (and thus treat the cache as containing the latest version of the + # master). The +1 means that for at least a full second following the + # update to the master, this function will refresh the cache, giving us + # 1 second to write the cache. Note that if it takes longer than 1 + # second to write the cache, then this situation could still arise. + # + # The getmtime calls should logically be part of the same transaction + # as the rest of this function (cache read + conditional cache + # refresh), but to wrap everything in an flock would cause the + # following cache read to be less streamlined. - if not ns.do_refresh: + if not do_refresh: # Try reading from the cache first. This must be transactionally # isolated from concurrent writes to prevent reading an incomplete # (changing) version of the data (but the transaction can share the - # lock with other concurrent reads). - @with_lock_file(lock_file, False) - def read_cache(): - try: ns.cfg = with_closing(file(cache_path))(lambda f: json.read(f.read())) - except: ns.do_refresh = True + # lock with other concurrent reads). This isolation is accomplished + # using an atomic filesystem rename in the refreshing stage. + try: ns.cfg = with_closing(file(cache_path)) ( + lambda f: json.read(f.read())) + except: do_refresh = True - if ns.do_refresh: + if do_refresh: # Atomically reload the source and regenerate the cache. The read and # write must be a single transaction, or a stale version may be - # written. - @with_lock_file(lock_file) - def refresh_cache(): - import yaml - try: default_loader = yaml.CSafeLoader - except: default_loader = yaml.SafeLoader - ns.cfg = with_closing(file(src_path))(lambda f: yaml.load(f, default_loader)) - try: with_closing(file(cache_path, 'w'))(lambda f: f.write(json.write(ns.cfg))) - except: pass # silent failure + # written (if another read/write of a more recent configuration + # is interleaved). The final atomic rename is to keep this + # transactionally isolated from the above cache read. If we fail to + # acquire the lock, just try to load the master configuration. + import yaml + try: loader = yaml.CSafeLoader + except: loader = yaml.SafeLoader + try: + @with_lock_file(lock_file) + def refresh_cache(): + ns.cfg = with_closing(file(src_path)) ( + lambda f: yaml.load(f, loader)) + try: with_closing(file(cache_path + '.tmp', 'w')) ( + lambda f: f.write(json.write(ns.cfg))) + except: pass # silent failure + else: rename(cache_path + '.tmp', cache_path) + except IOError: + ns.cfg = with_closing(file(src_path)) ( + lambda f: yaml.load(f, loader)) return ns.cfg dicts = load()