1 from __future__ import with_statement
4 from invirt.common import *
6 from os.path import getmtime
7 from contextlib import closing
10 try: loader = yaml.CSafeLoader
11 except: loader = yaml.SafeLoader
13 src_path = '/etc/invirt/master.yaml'
14 cache_path = '/var/lib/invirt/cache.json'
15 lock_path = '/var/lib/invirt/cache.lock'
18 with closing(file(src_path)) as f:
19 return yaml.load(f, loader)
22 return getmtime(src_path)
24 def load(force_refresh = False):
26 Try loading the configuration from the faster-to-load JSON cache at
27 cache_path. If it doesn't exist or is outdated, load the configuration
28 instead from the original YAML file at src_path and regenerate the cache.
29 I assume I have the permissions to write to the cache directory.
32 # Namespace container for state variables, so that they can be updated by
39 src_mtime = get_src_mtime()
40 try: cache_mtime = getmtime(cache_path)
41 except OSError: do_refresh = True
42 else: do_refresh = src_mtime + 1 >= cache_mtime
44 # We chose not to simply say
46 # do_refresh = src_mtime >= cache_time
48 # because between the getmtime(src_path) and the time the cache is
49 # rewritten, the master configuration may have been updated, so future
50 # checks here would find a cache with a newer mtime than the master
51 # (and thus treat the cache as containing the latest version of the
52 # master). The +1 means that for at least a full second following the
53 # update to the master, this function will refresh the cache, giving us
54 # 1 second to write the cache. Note that if it takes longer than 1
55 # second to write the cache, then this situation could still arise.
57 # The getmtime calls should logically be part of the same transaction
58 # as the rest of this function (cache read + conditional cache
59 # refresh), but to wrap everything in an flock would cause the
60 # following cache read to be less streamlined.
63 # Try reading from the cache first. This must be transactionally
64 # isolated from concurrent writes to prevent reading an incomplete
65 # (changing) version of the data (but the transaction can share the
66 # lock with other concurrent reads). This isolation is accomplished
67 # using an atomic filesystem rename in the refreshing stage.
69 with closing(file(cache_path)) as f:
70 ns.cfg = json.read(f.read())
71 except: do_refresh = True
74 # Atomically reload the source and regenerate the cache. The read and
75 # write must be a single transaction, or a stale version may be
76 # written (if another read/write of a more recent configuration
77 # is interleaved). The final atomic rename is to keep this
78 # transactionally isolated from the above cache read. If we fail to
79 # acquire the lock, just try to load the master configuration.
81 with lock_file(lock_path):
82 ns.cfg = load_master()
84 with closing(file(cache_path + '.tmp', 'w')) as f:
85 f.write(json.write(ns.cfg))
86 except: pass # silent failure
87 else: rename(cache_path + '.tmp', cache_path)
89 ns.cfg = load_master()
93 structs = dicts2struct(dicts)