X-Git-Url: http://xvm.mit.edu/gitweb/invirt/packages/invirt-base.git/blobdiff_plain/2c05818e4e51df1dde27db1f25b0eb493bc8be6f..f2acfa0565a11ed251ad390461a5c428640681c2:/files/usr/share/python-support/sipb-xen-base/invirt/config.py diff --git a/files/usr/share/python-support/sipb-xen-base/invirt/config.py b/files/usr/share/python-support/sipb-xen-base/invirt/config.py deleted file mode 100644 index cead926..0000000 --- a/files/usr/share/python-support/sipb-xen-base/invirt/config.py +++ /dev/null @@ -1,86 +0,0 @@ -import json -from invirt.common import * -from os.path import getmtime - -default_src_path = '/etc/invirt/master.yaml' -default_cache_path = '/var/lib/invirt/cache.json' -lock_file = '/var/lib/invirt/cache.lock' - -def load(src_path = default_src_path, - cache_path = default_cache_path, - force_refresh = False): - """ - Try loading the configuration from the faster-to-load JSON cache at - cache_path. If it doesn't exist or is outdated, load the configuration - instead from the original YAML file at src_path and regenerate the cache. - I assume I have the permissions to write to the cache directory. - """ - - # Namespace container for state variables, so that they can be updated by - # closures. - ns = struct() - - if force_refresh: - do_refresh = True - else: - src_mtime = getmtime(src_path) - try: cache_mtime = getmtime(cache_path) - except OSError: do_refresh = True - else: do_refresh = src_mtime + 1 >= cache_mtime - - # We chose not to simply say - # - # do_refresh = src_mtime >= cache_time - # - # because between the getmtime(src_path) and the time the cache is - # rewritten, the master configuration may have been updated, so future - # checks here would find a cache with a newer mtime than the master - # (and thus treat the cache as containing the latest version of the - # master). The +1 means that for at least a full second following the - # update to the master, this function will refresh the cache, giving us - # 1 second to write the cache. Note that if it takes longer than 1 - # second to write the cache, then this situation could still arise. - # - # The getmtime calls should logically be part of the same transaction - # as the rest of this function (cache read + conditional cache - # refresh), but to wrap everything in an flock would cause the - # following cache read to be less streamlined. - - if not do_refresh: - # Try reading from the cache first. This must be transactionally - # isolated from concurrent writes to prevent reading an incomplete - # (changing) version of the data (but the transaction can share the - # lock with other concurrent reads). This isolation is accomplished - # using an atomic filesystem rename in the refreshing stage. - try: ns.cfg = with_closing(file(cache_path)) ( - lambda f: json.read(f.read())) - except: do_refresh = True - - if do_refresh: - # Atomically reload the source and regenerate the cache. The read and - # write must be a single transaction, or a stale version may be - # written (if another read/write of a more recent configuration - # is interleaved). The final atomic rename is to keep this - # transactionally isolated from the above cache read. If we fail to - # acquire the lock, just try to load the master configuration. - import yaml - try: loader = yaml.CSafeLoader - except: loader = yaml.SafeLoader - try: - @with_lock_file(lock_file) - def refresh_cache(): - ns.cfg = with_closing(file(src_path)) ( - lambda f: yaml.load(f, loader)) - try: with_closing(file(cache_path + '.tmp', 'w')) ( - lambda f: f.write(json.write(ns.cfg))) - except: pass # silent failure - else: os.rename(cache_path + '.tmp', cache_path) - except IOError: - ns.cfg = with_closing(file(src_path)) ( - lambda f: yaml.load(f, loader)) - return ns.cfg - -dicts = load() -structs = dicts2struct(dicts) - -# vim:et:sw=4:ts=4