summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
a11a45e)
svn path=/trunk/packages/sipb-xen-base/; revision=793
finally: rsrc.close()
return wrapper
finally: rsrc.close()
return wrapper
-def with_lock_file(path):
+def with_lock_file(path, exclusive = True):
"""
Context manager for lock files. Example:
"""
Context manager for lock files. Example:
def wrapper(func):
@with_closing(file(path, 'w'))
def g(f):
def wrapper(func):
@with_closing(file(path, 'w'))
def g(f):
+ if exclusive: locktype = LOCK_EX
+ else: locktype = LOCK_SH
+ flock(f, locktype)
try: return func()
finally: flock(f, LOCK_UN)
return g
try: return func()
finally: flock(f, LOCK_UN)
return g
default_src_path = '/etc/invirt/master.yaml'
default_cache_path = '/var/lib/invirt/cache.json'
default_src_path = '/etc/invirt/master.yaml'
default_cache_path = '/var/lib/invirt/cache.json'
+lock_file = '/var/lib/invirt/cache.lock'
def load(src_path = default_src_path,
cache_path = default_cache_path,
def load(src_path = default_src_path,
cache_path = default_cache_path,
instead from the original YAML file at src_path and regenerate the cache.
I assume I have the permissions to write to the cache directory.
"""
instead from the original YAML file at src_path and regenerate the cache.
I assume I have the permissions to write to the cache directory.
"""
+ # Namespace container for various state variables, so that they can be
+ # updated by closures.
+ ns = struct()
+
else:
src_mtime = getmtime(src_path)
else:
src_mtime = getmtime(src_path)
- try: cache_mtime = getmtime(cache_path)
- except OSError: do_refresh = True
- else: do_refresh = src_mtime > cache_mtime
+ try: cache_mtime = getmtime(cache_path)
+ except OSError: ns.do_refresh = True
+ else: ns.do_refresh = src_mtime > cache_mtime
- if not do_refresh:
- # try reading from the cache first
- try: cfg = with_closing(file(cache_path))(lambda f: json.read(f.read()))
- except: do_refresh = True
+ if not ns.do_refresh:
+ # Try reading from the cache first. This must be transactionally
+ # isolated from concurrent writes to prevent reading an incomplete
+ # (changing) version of the data (but the transaction can share the
+ # lock with other concurrent reads).
+ @with_lock_file(lock_file, False)
+ def read_cache():
+ try: ns.cfg = with_closing(file(cache_path))(lambda f: json.read(f.read()))
+ except: ns.do_refresh = True
# Atomically reload the source and regenerate the cache. The read and
# write must be a single transaction, or a stale version may be
# written.
# Atomically reload the source and regenerate the cache. The read and
# write must be a single transaction, or a stale version may be
# written.
- @with_lock_file('/var/lib/invirt/cache.lock')
- def cfg():
+ @with_lock_file(lock_file)
+ def refresh_cache():
import yaml
try: default_loader = yaml.CSafeLoader
except: default_loader = yaml.SafeLoader
import yaml
try: default_loader = yaml.CSafeLoader
except: default_loader = yaml.SafeLoader
- cfg = with_closing(file(src_path))(lambda f: yaml.load(f, default_loader))
- try: with_closing(file(cache_path, 'w'))(lambda f: f.write(json.write(cfg)))
+ ns.cfg = with_closing(file(src_path))(lambda f: yaml.load(f, default_loader))
+ try: with_closing(file(cache_path, 'w'))(lambda f: f.write(json.write(ns.cfg)))
except: pass # silent failure
except: pass # silent failure
- return cfg
- return cfg
dicts = load()
structs = dicts2struct(dicts)
dicts = load()
structs = dicts2struct(dicts)