We had a deadlock between: - free_process_memory_range (take lock) -> ihk_mc_pt_free_range -> ... -> remote_flush_tlb_array_cpumask -> "/* Wait for all cores */" and - obj_list_lookup() under fileobj_list_lock that disabled irqs and thus never ack'd the remote flush The rework is quite big but removes the need for the big lock, although devobj and shmobj needed a new smaller lock to be introduced - the new locks are used much more locally and should not cause problems. On the bright side, refcounting being moved to memobj level means we could remove refcounting implemented separately in all object types and simplifies code a bit. Change-Id: I6bc8438a98b1d8edddc91c4ac33c11b88e097ebb
156 lines
4.1 KiB
C
156 lines
4.1 KiB
C
/* memobj.h COPYRIGHT FUJITSU LIMITED 2015-2016 */
|
|
/**
|
|
* \file memobj.h
|
|
* License details are found in the file LICENSE.
|
|
* \brief
|
|
* defines and declares for memory object
|
|
* \author Gou Nakamura <go.nakamura.yw@hitachi-solutions.com> \par
|
|
* Copyright (C) 2013 Hitachi, Ltd.
|
|
*/
|
|
/*
|
|
* HISTORY:
|
|
*/
|
|
|
|
#ifndef HEADER_MEMOBJ_H
|
|
#define HEADER_MEMOBJ_H
|
|
|
|
#include <ihk/types.h>
|
|
#include <ihk/atomic.h>
|
|
#include <ihk/lock.h>
|
|
#include <errno.h>
|
|
#include <list.h>
|
|
|
|
#ifdef POSTK_DEBUG_ARCH_DEP_18 /* coredump arch separation. */
|
|
#else /* POSTK_DEBUG_ARCH_DEP_18 */
|
|
/* begin types.h */
|
|
typedef int32_t key_t;
|
|
typedef uint32_t uid_t;
|
|
typedef uint32_t gid_t;
|
|
typedef int64_t time_t;
|
|
typedef int32_t pid_t;
|
|
/* end types.h */
|
|
#endif /* POSTK_DEBUG_ARCH_DEP_18 */
|
|
|
|
enum {
|
|
/* for memobj.flags */
|
|
MF_HAS_PAGER = 0x0001,
|
|
MF_SHMDT_OK = 0x0002,
|
|
MF_IS_REMOVABLE = 0x0004,
|
|
MF_PREFETCH = 0x0008,
|
|
MF_ZEROFILL = 0x0010,
|
|
MF_REG_FILE = 0x1000,
|
|
MF_DEV_FILE = 0x2000,
|
|
MF_PREMAP = 0x8000,
|
|
MF_XPMEM = 0x10000, /* To identify XPMEM attachment pages for rusage accounting */
|
|
MF_ZEROOBJ = 0x20000, /* To identify pages of anonymous, on-demand paging ranges for rusage accounting */
|
|
MF_SHM = 0x40000,
|
|
MF_END
|
|
};
|
|
|
|
#define MEMOBJ_READY 0
|
|
#define MEMOBJ_TO_BE_PREFETCHED 1
|
|
|
|
struct memobj {
|
|
struct memobj_ops *ops;
|
|
uint32_t flags;
|
|
uint32_t status;
|
|
size_t size;
|
|
ihk_atomic_t refcnt;
|
|
|
|
/* For pre-mapped memobjects */
|
|
void **pages;
|
|
int nr_pages;
|
|
char *path;
|
|
};
|
|
|
|
typedef void memobj_free_func_t(struct memobj *obj);
|
|
typedef int memobj_get_page_func_t(struct memobj *obj, off_t off, int p2align, uintptr_t *physp, unsigned long *flag, uintptr_t virt_addr);
|
|
typedef uintptr_t memobj_copy_page_func_t(struct memobj *obj, uintptr_t orgphys, int p2align);
|
|
typedef int memobj_flush_page_func_t(struct memobj *obj, uintptr_t phys, size_t pgsize);
|
|
typedef int memobj_invalidate_page_func_t(struct memobj *obj, uintptr_t phys, size_t pgsize);
|
|
typedef int memobj_lookup_page_func_t(struct memobj *obj, off_t off, int p2align, uintptr_t *physp, unsigned long *flag);
|
|
|
|
struct memobj_ops {
|
|
memobj_free_func_t *free;
|
|
memobj_get_page_func_t *get_page;
|
|
memobj_copy_page_func_t *copy_page;
|
|
memobj_flush_page_func_t *flush_page;
|
|
memobj_invalidate_page_func_t *invalidate_page;
|
|
memobj_lookup_page_func_t *lookup_page;
|
|
};
|
|
|
|
static inline int memobj_ref(struct memobj *obj)
|
|
{
|
|
return ihk_atomic_inc_return(&obj->refcnt);
|
|
}
|
|
|
|
static inline void memobj_unref(struct memobj *obj)
|
|
{
|
|
if (ihk_atomic_dec_return(&obj->refcnt) == 0) {
|
|
(*obj->ops->free)(obj);
|
|
}
|
|
}
|
|
|
|
static inline int memobj_get_page(struct memobj *obj, off_t off,
|
|
int p2align, uintptr_t *physp, unsigned long *pflag, uintptr_t virt_addr)
|
|
{
|
|
if (obj->ops->get_page) {
|
|
return (*obj->ops->get_page)(obj, off, p2align, physp, pflag, virt_addr);
|
|
}
|
|
return -ENXIO;
|
|
}
|
|
|
|
static inline uintptr_t memobj_copy_page(struct memobj *obj,
|
|
uintptr_t orgphys, int p2align)
|
|
{
|
|
if (obj->ops->copy_page) {
|
|
return (*obj->ops->copy_page)(obj, orgphys, p2align);
|
|
}
|
|
return -ENXIO;
|
|
}
|
|
|
|
static inline int memobj_flush_page(struct memobj *obj, uintptr_t phys, size_t pgsize)
|
|
{
|
|
if (obj->ops->flush_page) {
|
|
return (*obj->ops->flush_page)(obj, phys, pgsize);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static inline int memobj_invalidate_page(struct memobj *obj, uintptr_t phys,
|
|
size_t pgsize)
|
|
{
|
|
if (obj->ops->invalidate_page) {
|
|
return (*obj->ops->invalidate_page)(obj, phys, pgsize);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static inline int memobj_lookup_page(struct memobj *obj, off_t off,
|
|
int p2align, uintptr_t *physp, unsigned long *pflag)
|
|
{
|
|
if (obj->ops->lookup_page) {
|
|
return (*obj->ops->lookup_page)(obj, off, p2align, physp, pflag);
|
|
}
|
|
return -ENXIO;
|
|
}
|
|
|
|
static inline int memobj_has_pager(struct memobj *obj)
|
|
{
|
|
return !!(obj->flags & MF_HAS_PAGER);
|
|
}
|
|
|
|
static inline int memobj_is_removable(struct memobj *obj)
|
|
{
|
|
return !!(obj->flags & MF_IS_REMOVABLE);
|
|
}
|
|
|
|
int fileobj_create(int fd, struct memobj **objp, int *maxprotp, uintptr_t virt_addr);
|
|
struct shmid_ds;
|
|
int shmobj_create(struct shmid_ds *ds, struct memobj **objp);
|
|
int zeroobj_create(struct memobj **objp);
|
|
int devobj_create(int fd, size_t len, off_t off, struct memobj **objp, int *maxprotp,
|
|
int prot, int populate_flags);
|
|
|
|
#endif /* HEADER_MEMOBJ_H */
|