Avoid infinite loops in caching layer
Add a global variable, cgit_max_lock_attemps, to avoid the possibility of
infinite loops when failing to acquire a lockfile. This could happen on
broken setups or under crazy server load.
Incidentally, this also fixes a lurking bug in cache_lock() where an
uninitialized returnvalue was used.
Signed-off-by: Lars Hjemli <hjemli@gmail.com>
diff --git a/cgit.c b/cgit.c
index 7f14016..dc91125 100644
--- a/cgit.c
+++ b/cgit.c
@@ -31,6 +31,7 @@
char *cgit_cache_root = "/var/cache/cgit";
+int cgit_max_lock_attempts = 5;
int cgit_cache_root_ttl = 5;
int cgit_cache_repo_ttl = 5;
int cgit_cache_dynamic_ttl = 5;
@@ -465,11 +466,17 @@
static void cgit_refresh_cache(struct cacheitem *item)
{
+ int i = 0;
+
cache_prepare(item);
top:
+ if (++i > cgit_max_lock_attempts) {
+ die("cgit_refresh_cache: unable to lock %s: %s",
+ item->name, strerror(errno));
+ }
if (!cache_exist(item)) {
if (!cache_lock(item)) {
- sched_yield();
+ sleep(1);
goto top;
}
if (!cache_exist(item))