ihk_os_getrusage(): Add per-page-size memory usage accounting

This commit is contained in:
Masamichi Takagi
2017-08-04 12:00:31 +09:00
parent 184c2d311c
commit 74f15783d2
22 changed files with 572 additions and 130 deletions

View File

@@ -37,6 +37,9 @@ enum {
MF_REG_FILE = 0x1000,
MF_DEV_FILE = 0x2000,
MF_PREMAP = 0x8000,
MF_XPMEM = 0x10000, /* To identify XPMEM attachment pages for rusage accounting */
MF_ZEROOBJ = 0x20000, /* To identify pages of anonymous, on-demand paging ranges for rusage accounting */
MF_SHM = 0x40000,
MF_HOST_RELEASED = 0x80000000,
MF_END
};

View File

@@ -21,6 +21,7 @@ struct page {
uint8_t mode;
uint64_t phys;
ihk_atomic_t count;
ihk_atomic64_t mapped;
off_t offset;
};

View File

@@ -2,7 +2,10 @@
#define __RUSAGE_H
#include <config.h>
#include <page.h>
#include <ihk/rusage.h>
#include <ihk/atomic.h>
#include <memobj.h>
#ifdef ENABLE_RUSAGE
#define RUSAGE_MEM_LIMIT (2 * 1024 * 1024) // 2MB
@@ -23,9 +26,9 @@ rusage_rss_add(unsigned long size)
unsigned long retval;
newval = __sync_add_and_fetch(&monitor->rusage_rss_current, size);
oldval = monitor->rusage_rss_max;
oldval = monitor->rusage_memory_max_usage;
while (newval > oldval) {
retval = __sync_val_compare_and_swap(&monitor->rusage_rss_max,
retval = __sync_val_compare_and_swap(&monitor->rusage_memory_max_usage,
oldval, newval);
if (retval == oldval) {
break;
@@ -40,6 +43,88 @@ rusage_rss_sub(unsigned long size)
__sync_sub_and_fetch(&monitor->rusage_rss_current, size);
}
static inline void memory_stat_rss_add(unsigned long size, int pgsize)
{
ihk_atomic_add_long(size, &monitor->rusage_memory_stat_rss[rusage_pgsize_to_pgtype(pgsize)]);
}
static inline void memory_stat_rss_sub(unsigned long size, int pgsize)
{
ihk_atomic_add_long(-size, &monitor->rusage_memory_stat_rss[rusage_pgsize_to_pgtype(pgsize)]);
}
static inline void rusage_memory_stat_mapped_file_add(unsigned long size, int pgsize)
{
ihk_atomic_add_long(size, &monitor->rusage_memory_stat_mapped_file[rusage_pgsize_to_pgtype(pgsize)]);
}
static inline void rusage_memory_stat_mapped_file_sub(unsigned long size, int pgsize)
{
ihk_atomic_add_long(-size, &monitor->rusage_memory_stat_mapped_file[rusage_pgsize_to_pgtype(pgsize)]);
}
static inline int rusage_memory_stat_add(struct vm_range *range, uintptr_t phys, unsigned long size, int pgsize)
{
/* Is it resident in main memory? */
if (range->flag & (VR_REMOTE | VR_IO_NOCACHE | VR_RESERVED)) {
return 0;
}
/* Is it anonymous and pre-paging? */
if (!range->memobj) {
memory_stat_rss_add(size, pgsize);
return 1;
}
/* Is it devobj or (fileobj and pre-map) or xpmem attachment? */
if ((range->memobj->flags & MF_DEV_FILE) ||
(range->memobj->flags & MF_PREMAP) ||
(range->memobj->flags & MF_XPMEM)
) {
return 0;
}
/* Is it anonymous and demand-paging? */
if (range->memobj->flags & MF_ZEROOBJ) {
memory_stat_rss_add(size, pgsize);
return 1;
}
struct page *page = phys_to_page(phys);
/* Is It file map and cow page? */
if ((range->memobj->flags & (MF_DEV_FILE | MF_REG_FILE)) &&
!page) {
//kprintf("%s: cow,phys=%lx\n", __FUNCTION__, phys);
memory_stat_rss_add(size, pgsize);
return 1;
}
/* Is it a sharable page? */
if (!page) {
kprintf("%s: WARNING !page,phys=%lx\n", __FUNCTION__, phys);
return 0;
}
/* Is this the first attempt to map the sharable page? */
if(__sync_bool_compare_and_swap(&page->mapped.counter64, 0, 1)) {
if(range->memobj->flags & MF_SHM) {
memory_stat_rss_add(size, pgsize);
} else {
rusage_memory_stat_mapped_file_add(size, pgsize);
}
return 1;
} else {
return 0;
}
return 0;
}
static inline void rusage_memory_stat_sub(struct memobj *memobj, unsigned long size, int pgsize)
{
if(memobj->flags & MF_SHM) {
memory_stat_rss_sub(size, pgsize);
} else {
rusage_memory_stat_mapped_file_sub(size, pgsize);
}
}
static inline void
rusage_kmem_add(unsigned long size)
{
@@ -47,11 +132,11 @@ rusage_kmem_add(unsigned long size)
unsigned long oldval;
unsigned long retval;
newval = __sync_add_and_fetch(&monitor->rusage_kmem_usage, size);
oldval = monitor->rusage_kmem_max_usage;
newval = __sync_add_and_fetch(&monitor->rusage_memory_kmem_usage, size);
oldval = monitor->rusage_memory_kmem_max_usage;
while (newval > oldval) {
retval = __sync_val_compare_and_swap(
&monitor->rusage_kmem_max_usage,
&monitor->rusage_memory_kmem_max_usage,
oldval, newval);
if (retval == oldval) {
break;
@@ -63,13 +148,13 @@ rusage_kmem_add(unsigned long size)
static inline void
rusage_kmem_sub(unsigned long size)
{
__sync_sub_and_fetch(&monitor->rusage_kmem_usage, size);
__sync_sub_and_fetch(&monitor->rusage_memory_kmem_usage, size);
}
static inline void
rusage_numa_add(int numa_id, unsigned long size)
{
__sync_add_and_fetch(monitor->rusage_numa_stat + numa_id, size);
__sync_add_and_fetch(monitor->rusage_memory_numa_stat + numa_id, size);
rusage_rss_add(size);
}
@@ -77,7 +162,7 @@ static inline void
rusage_numa_sub(int numa_id, unsigned long size)
{
rusage_rss_sub(size);
__sync_sub_and_fetch(monitor->rusage_numa_stat + numa_id, size);
__sync_sub_and_fetch(monitor->rusage_memory_numa_stat + numa_id, size);
}
static inline void
@@ -163,6 +248,31 @@ rusage_rss_sub(unsigned long size)
{
}
static inline void memory_stat_rss_add(unsigned long size, size_t pgsize)
{
}
static inline void memory_stat_rss_sub(unsigned long size, size_t pgsize)
{
}
static inline void rusage_memory_stat_mapped_file_add(unsigned long size, int pgsize)
{
}
static inline void rusage_memory_stat_mapped_file_sub(unsigned long size, int pgsize)
{
}
static inline int rusage_memory_stat_add(struct vm_range *range, uintptr_t phys, unsigned long size, int pgsize)
{
return 0;
}
static inline void rusage_memory_stat_sub(struct memobj *memobj, unsigned long size, int pgsize)
{
}
static inline void
rusage_numa_add(int numa_id, unsigned long size)
{

View File

@@ -32,7 +32,7 @@ enum {
SHM_EXEC = 0100000,
/* for shm_mode */
SHM_DEST = 01000,
SHM_DEST = 01000, /* Marked for destruction */
SHM_LOCKED = 02000,
/* for cmd of shmctl() */

View File

@@ -318,6 +318,8 @@ static inline struct xpmem_thread_group *__xpmem_tg_ref_by_tgid(
tgid, return_destroying);
index = xpmem_tg_hashtable_index(tgid);
XPMEM_DEBUG("xpmem_my_part=%p\n", xpmem_my_part);
XPMEM_DEBUG("xpmem_my_part->tg_hashtable=%p\n", xpmem_my_part->tg_hashtable);
mcs_rwlock_reader_lock(&xpmem_my_part->tg_hashtable[index].lock, &lock);
tg = __xpmem_tg_ref_by_tgid_nolock_internal(tgid, index,
return_destroying);