added shared/exclusive locking; added shared locking of initial JSON cache read
authorYang Zhang <y_z@mit.edu>
Thu, 31 Jul 2008 01:50:42 +0000 (21:50 -0400)
committerYang Zhang <y_z@mit.edu>
Thu, 31 Jul 2008 01:50:42 +0000 (21:50 -0400)
svn path=/trunk/packages/sipb-xen-base/; revision=793

files/usr/share/python-support/sipb-xen-base/invirt/common.py
files/usr/share/python-support/sipb-xen-base/invirt/config.py

index 5f669ba..3dc67a4 100644 (file)
@@ -39,7 +39,7 @@ def with_closing(rsrc):
         finally: rsrc.close()
     return wrapper
 
-def with_lock_file(path):
+def with_lock_file(path, exclusive = True):
     """
     Context manager for lock files.  Example:
 
@@ -53,7 +53,9 @@ def with_lock_file(path):
     def wrapper(func):
         @with_closing(file(path, 'w'))
         def g(f):
-            flock(f, LOCK_EX)
+            if exclusive: locktype = LOCK_EX
+            else:         locktype = LOCK_SH
+            flock(f, locktype)
             try: return func()
             finally: flock(f, LOCK_UN)
         return g
index c27ba6b..29f7ce5 100644 (file)
@@ -4,6 +4,7 @@ from os.path import getmtime
 
 default_src_path   = '/etc/invirt/master.yaml'
 default_cache_path = '/var/lib/invirt/cache.json'
+lock_file          = '/var/lib/invirt/cache.lock'
 
 def load(src_path = default_src_path,
          cache_path = default_cache_path,
@@ -14,33 +15,41 @@ def load(src_path = default_src_path,
     instead from the original YAML file at src_path and regenerate the cache.
     I assume I have the permissions to write to the cache directory.
     """
+    # Namespace container for various state variables, so that they can be
+    # updated by closures.
+    ns = struct()
+
     if force_refresh:
-        do_refresh = True
+        ns.do_refresh = True
     else:
         src_mtime = getmtime(src_path)
-        try:            cache_mtime = getmtime(cache_path)
-        except OSError: do_refresh  = True
-        else:           do_refresh  = src_mtime > cache_mtime
+        try:            cache_mtime   = getmtime(cache_path)
+        except OSError: ns.do_refresh = True
+        else:           ns.do_refresh = src_mtime > cache_mtime
 
-    if not do_refresh:
-        # try reading from the cache first
-        try: cfg = with_closing(file(cache_path))(lambda f: json.read(f.read()))
-        except: do_refresh = True
+    if not ns.do_refresh:
+        # Try reading from the cache first.  This must be transactionally
+        # isolated from concurrent writes to prevent reading an incomplete
+        # (changing) version of the data (but the transaction can share the
+        # lock with other concurrent reads).
+        @with_lock_file(lock_file, False)
+        def read_cache():
+            try: ns.cfg = with_closing(file(cache_path))(lambda f: json.read(f.read()))
+            except: ns.do_refresh = True
 
-    if do_refresh:
+    if ns.do_refresh:
         # Atomically reload the source and regenerate the cache.  The read and
         # write must be a single transaction, or a stale version may be
         # written.
-        @with_lock_file('/var/lib/invirt/cache.lock')
-        def cfg():
+        @with_lock_file(lock_file)
+        def refresh_cache():
             import yaml
             try:    default_loader = yaml.CSafeLoader
             except: default_loader = yaml.SafeLoader
-            cfg = with_closing(file(src_path))(lambda f: yaml.load(f, default_loader))
-            try: with_closing(file(cache_path, 'w'))(lambda f: f.write(json.write(cfg)))
+            ns.cfg = with_closing(file(src_path))(lambda f: yaml.load(f, default_loader))
+            try: with_closing(file(cache_path, 'w'))(lambda f: f.write(json.write(ns.cfg)))
             except: pass # silent failure
-            return cfg
-    return cfg
+    return ns.cfg
 
 dicts = load()
 structs = dicts2struct(dicts)