From e8be52a1ff49173033e2307dfef05964bed31954 Mon Sep 17 00:00:00 2001 From: NAKAMURA Gou Date: Thu, 10 Mar 2016 20:45:57 +0900 Subject: [PATCH] shm: trace the amount of locked segment per user --- kernel/include/shm.h | 27 ++++++++++++++++++++++++ kernel/shmobj.c | 50 ++++++++++++++++++++++++++++++++++++++++++++ kernel/syscall.c | 32 ++++++++++++++++++++++++++-- 3 files changed, 107 insertions(+), 2 deletions(-) diff --git a/kernel/include/shm.h b/kernel/include/shm.h index 7532967a..95e81264 100644 --- a/kernel/include/shm.h +++ b/kernel/include/shm.h @@ -47,11 +47,14 @@ enum { SHM_INFO = 14, }; +struct shmlock_user; + struct shmobj { struct memobj memobj; /* must be first */ int index; int pgshift; size_t real_segsz; + struct shmlock_user * user; struct shmid_ds ds; struct list_head page_list; struct list_head chain; /* shmobj_list */ @@ -76,9 +79,33 @@ struct shm_info { uint64_t swap_successes; }; +struct shmlock_user { + uid_t ruid; + int padding; + size_t locked; + + struct list_head chain; +}; + +extern ihk_spinlock_t shmlock_users_lock_body; + +static inline void shmlock_users_lock(void) +{ + ihk_mc_spinlock_lock_noirq(&shmlock_users_lock_body); + return; +} + +static inline void shmlock_users_unlock(void) +{ + ihk_mc_spinlock_unlock_noirq(&shmlock_users_lock_body); + return; +} + void shmobj_list_lock(void); void shmobj_list_unlock(void); int shmobj_create_indexed(struct shmid_ds *ds, struct shmobj **objp); void shmobj_destroy(struct shmobj *obj); +void shmlock_user_free(struct shmlock_user *user); +int shmlock_user_get(uid_t ruid, struct shmlock_user **userp); #endif /* HEADER_SHM_H */ diff --git a/kernel/shmobj.c b/kernel/shmobj.c index 251cd76c..b49dd8f6 100644 --- a/kernel/shmobj.c +++ b/kernel/shmobj.c @@ -112,6 +112,43 @@ void shmobj_list_unlock(void) return; } +/*********************************************************************** + * shmlock_users + */ +ihk_spinlock_t shmlock_users_lock_body = SPIN_LOCK_UNLOCKED; +static LIST_HEAD(shmlock_users); + +void shmlock_user_free(struct shmlock_user *user) +{ + if (user->locked) { + panic("shmlock_user_free()"); + } + list_del(&user->chain); + kfree(user); +} + +int shmlock_user_get(uid_t ruid, struct shmlock_user **userp) +{ + struct shmlock_user *user; + + list_for_each_entry(user, &shmlock_users, chain) { + if (user->ruid == ruid) { + break; + } + } + if (&user->chain == &shmlock_users) { + user = kmalloc(sizeof(*user), IHK_MC_AP_NOWAIT); + if (!user) { + return -ENOMEM; + } + user->ruid = ruid; + user->locked = 0; + list_add(&user->chain, &shmlock_users); + } + *userp = user; + return 0; +} + /*********************************************************************** * operations */ @@ -172,8 +209,21 @@ void shmobj_destroy(struct shmobj *obj) extern struct shm_info the_shm_info; extern struct list_head kds_free_list; extern int the_maxi; + struct shmlock_user *user; + size_t size; dkprintf("shmobj_destroy(%p [%d %o])\n", obj, obj->index, obj->ds.shm_perm.mode); + if (obj->user) { + user = obj->user; + obj->user = NULL; + shmlock_users_lock(); + size = (obj->ds.shm_segsz + PAGE_SIZE - 1) & PAGE_MASK; + user->locked -= size; + if (!user->locked) { + shmlock_user_free(user); + } + shmlock_users_unlock(); + } /* zap page_list */ for (;;) { struct page *page; diff --git a/kernel/syscall.c b/kernel/syscall.c index d95a5e90..b70ec756 100644 --- a/kernel/syscall.c +++ b/kernel/syscall.c @@ -3783,6 +3783,9 @@ SYSCALL_DECLARE(shmctl) int req; int maxi; struct shmobj *obj; + size_t size; + struct shmlock_user *user; + uid_t ruid = proc->ruid; dkprintf("shmctl(%#x,%d,%p)\n", shmid, cmd, buf); if (0) ; @@ -3909,7 +3912,21 @@ SYSCALL_DECLARE(shmctl) dkprintf("shmctl(%#x,%d,%p): lookup: %d\n", shmid, cmd, buf, error); return error; } - obj->ds.shm_perm.mode |= SHM_LOCKED; + if (!(obj->ds.shm_perm.mode & SHM_LOCKED)) { + shmlock_users_lock(); + error = shmlock_user_get(ruid, &user); + if (error) { + shmlock_users_unlock(); + shmobj_list_unlock(); + ekprintf("shmctl(%#x,%d,%p): user lookup: %d\n", shmid, cmd, buf, error); + return -ENOMEM; + } + size = (obj->ds.shm_segsz + PAGE_SIZE - 1) & PAGE_MASK; + obj->ds.shm_perm.mode |= SHM_LOCKED; + obj->user = user; + user->locked += size; + shmlock_users_unlock(); + } shmobj_list_unlock(); dkprintf("shmctl(%#x,%d,%p): 0\n", shmid, cmd, buf); @@ -3923,7 +3940,18 @@ SYSCALL_DECLARE(shmctl) dkprintf("shmctl(%#x,%d,%p): lookup: %d\n", shmid, cmd, buf, error); return error; } - obj->ds.shm_perm.mode &= ~SHM_LOCKED; + if (obj->ds.shm_perm.mode & SHM_LOCKED) { + size = (obj->ds.shm_segsz + PAGE_SIZE - 1) & PAGE_MASK; + shmlock_users_lock(); + user = obj->user; + obj->user = NULL; + user->locked -= size; + if (!user->locked) { + shmlock_user_free(user); + } + shmlock_users_unlock(); + obj->ds.shm_perm.mode &= ~SHM_LOCKED; + } shmobj_list_unlock(); dkprintf("shmctl(%#x,%d,%p): 0\n", shmid, cmd, buf); return 0;