[Tux3] Patch: Convert log mutex to spinlock
Daniel Phillips
phillips at phunq.net
Tue Jan 6 19:19:23 PST 2009
Hi,
This completely untested patch converts the log mutex to a spinlock.
This is just for fun, we don't really need to be optimizing at this
level right now. The mutex does the same job in 17 less lines of
code, and is much less likely to be buggy.
As for performance, there is not a lot of difference when uncontended.
When contended, the mutex goes through sleep/wake, which is far more
expensive than just spinning while some other CPU finishes its log
entry. When contended on the buffer advance, which is quite likely,
the spinlock version will tend to have multiple tasks falling into the
blockget at the same time and landing on the page lock, where some of
them may sleep, so spinlock does not have an enormous advantage in
that case. The efficiency equation therefore comes down to whether
contention will be high on logging operations. If it is high, then
spinlock is a clear win, otherwise the CPU time saved over the
lifetime of the filesystem may never exceed the time I spent writing
the code and writing a post about it, and the time that hopefully
someone will spend trying it out.
Anyway, the point of this is, solving yet another spinlock design
puzzle was fun and if it actually creates a measurable performance
boost, that's a bonus.
diff -r 68f754221c50 user/kernel/commit.c
--- a/user/kernel/commit.c Tue Jan 06 14:09:37 2009 -0800
+++ b/user/kernel/commit.c Tue Jan 06 17:56:49 2009 -0800
@@ -59,11 +59,16 @@ enum { LOG_ALLOC, LOG_FREE, LOG_UPDATE }
struct commit_entry { be_u64 previous; };
+void log_start(struct sb *sb)
+{
+ sb->logpos = bufdata(sb->logbuf) + sizeof(struct logblock);
+ sb->logtop = bufdata(sb->logbuf) + sb->blocksize;
+}
+
void log_next(struct sb *sb)
{
sb->logbuf = blockget(mapping(sb->logmap), sb->lognext++);
- sb->logpos = bufdata(sb->logbuf) + sizeof(struct logblock);
- sb->logtop = bufdata(sb->logbuf) + sb->blocksize;
+ log_start(sb);
}
void log_finish(struct sb *sb)
@@ -78,11 +83,23 @@ void log_finish(struct sb *sb)
void *log_begin(struct sb *sb, unsigned bytes)
{
- mutex_lock(&sb->loglock);
+ spin_lock(&sb->loglock);
if (sb->logpos + bytes > sb->logtop) {
+ struct buffer_head *logbuf;
+ while (1) {
+ unsigned lognext = sb->lognext;
+ spin_unlock(&sb->loglock);
+ logbuf = blockget(mapping(sb->logmap), sb->lognext);
+ spin_lock(&sb->loglock);
+ if (lognext == sb->lognext)
+ break;
+ brelse(logbuf);
+ }
if (sb->logbuf)
log_finish(sb);
- log_next(sb);
+ sb->logbuf = logbuf;
+ sb->lognext++;
+ log_start(sb);
*(struct logblock *)bufdata(sb->logbuf) = (struct logblock){
.magic = to_be_u16(0xc0de) };
}
@@ -92,7 +109,7 @@ void log_end(struct sb *sb, void *pos)
void log_end(struct sb *sb, void *pos)
{
sb->logpos = pos;
- mutex_unlock(&sb->loglock);
+ spin_unlock(&sb->loglock);
}
void log_alloc(struct sb *sb, block_t block, unsigned count, unsigned alloc)
diff -r 68f754221c50 user/kernel/super.c
--- a/user/kernel/super.c Tue Jan 06 14:09:37 2009 -0800
+++ b/user/kernel/super.c Tue Jan 06 17:56:49 2009 -0800
@@ -156,7 +156,7 @@ static int tux3_fill_super(struct super_
sb->s_op = &tux3_super_ops;
sb->s_time_gran = 1;
- mutex_init(&sbi->loglock);
+ spin_lock_init(&sbi->loglock);
err = -EIO;
blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
diff -r 68f754221c50 user/kernel/tux3.h
--- a/user/kernel/tux3.h Tue Jan 06 14:09:37 2009 -0800
+++ b/user/kernel/tux3.h Tue Jan 06 17:56:49 2009 -0800
@@ -230,7 +230,7 @@ struct sb {
unsigned lognext; /* Index of next log block in log map */
struct buffer_head *logbuf; /* Cached log block */
unsigned char *logpos, *logtop; /* Where to emit next log entry */
- struct mutex loglock; /* serialize log entries (spinlock me) */
+ spinlock_t loglock; /* atomic log entries */
#ifdef __KERNEL__
struct super_block *vfs_sb; /* Generic kernel superblock */
#else
diff -r 68f754221c50 user/tux3.h
--- a/user/tux3.h Tue Jan 06 14:09:37 2009 -0800
+++ b/user/tux3.h Tue Jan 06 17:56:49 2009 -0800
@@ -69,6 +69,11 @@ struct mutex { };
static inline void mutex_lock(struct mutex *mutex) { };
static inline void mutex_unlock(struct mutex *mutex) { };
+
+typedef struct { } spinlock_t;
+
+static inline void spin_lock(spinlock_t *lock) { };
+static inline void spin_unlock(spinlock_t *lock) { };
/* Bitmaps */
_______________________________________________
Tux3 mailing list
Tux3 at tux3.org
http://mailman.tux3.org/cgi-bin/mailman/listinfo/tux3
More information about the Tux3
mailing list