Let's step through a simple example: a cache of number to name mappings. The cache keeps a count of how often each of the objects is used, and when it gets full, throws out the least used one.
For our first example, we assume that all operations are in user context (ie. from system calls), so we can sleep. This means we can use a semaphore to protect the cache and all the objects within it. Here's the code:
#include <linux/list.h> #include <linux/slab.h> #include <linux/string.h> #include <asm/semaphore.h> #include <asm/errno.h> struct object { struct list_head list; int id; char name[32]; int popularity; }; /* Protects the cache, cache_num, and the objects within it */ static DECLARE_MUTEX(cache_lock); static LIST_HEAD(cache); static unsigned int cache_num = 0; #define MAX_CACHE_SIZE 10 /* Must be holding cache_lock */ static struct object *__cache_find(int id) { struct object *i; list_for_each_entry(i, &cache, list) if (i->id == id) { i->popularity++; return i; } return NULL; } /* Must be holding cache_lock */ static void __cache_delete(struct object *obj) { BUG_ON(!obj); list_del(&obj->list); kfree(obj); cache_num--; } /* Must be holding cache_lock */ static void __cache_add(struct object *obj) { list_add(&obj->list, &cache); if (++cache_num > MAX_CACHE_SIZE) { struct object *i, *outcast = NULL; list_for_each_entry(i, &cache, list) { if (!outcast || i->popularity < outcast->popularity) outcast = i; } __cache_delete(outcast); } } int cache_add(int id, const char *name) { struct object *obj; if ((obj = kmalloc(sizeof(*obj), GFP_KERNEL)) == NULL) return -ENOMEM; strlcpy(obj->name, name, sizeof(obj->name)); obj->id = id; obj->popularity = 0; down(&cache_lock); __cache_add(obj); up(&cache_lock); return 0; } void cache_delete(int id) { down(&cache_lock); __cache_delete(__cache_find(id)); up(&cache_lock); } int cache_find(int id, char *name) { struct object *obj; int ret = -ENOENT; down(&cache_lock); obj = __cache_find(id); if (obj) { ret = 0; strcpy(name, obj->name); } up(&cache_lock); return ret; }
Note that we always make sure we have the cache_lock when we add, delete, or look up the cache: both the cache infrastructure itself and the contents of the objects are protected by the lock. In this case it's easy, since we copy the data for the user, and never let them access the objects directly.
There is a slight (and common) optimization here: in
cache_add
we set up the fields of the object
before grabbing the lock. This is safe, as no-one else can access it
until we put it in cache.