VM: use RW spinlock for vm_range_lock

Change-Id: Id4654084207d55bf77cc9f8b42795e0f9873cfa0
This commit is contained in:
Balazs Gerofi
2020-04-30 13:32:32 +09:00
committed by Masamichi Takagi
parent 54b529c82d
commit d5aafca1ae
7 changed files with 94 additions and 94 deletions

View File

@@ -243,7 +243,7 @@ static int
init_process_vm(struct process *owner, struct address_space *asp, struct process_vm *vm)
{
int i;
ihk_mc_spinlock_init(&vm->memory_range_lock);
ihk_rwspinlock_init(&vm->memory_range_lock);
ihk_mc_spinlock_init(&vm->page_table_lock);
ihk_atomic_set(&vm->refcount, 1);
@@ -792,7 +792,7 @@ static int copy_user_ranges(struct process_vm *vm, struct process_vm *orgvm)
struct vm_range *last_insert;
struct copy_args args;
ihk_mc_spinlock_lock_noirq(&orgvm->memory_range_lock);
ihk_rwspinlock_read_lock_noirq(&orgvm->memory_range_lock);
/* Iterate original process' vm_range list and take a copy one-by-one */
last_insert = NULL;
@@ -854,7 +854,7 @@ static int copy_user_ranges(struct process_vm *vm, struct process_vm *orgvm)
// memory_stat_rss_add() is called in child-node, i.e. copy_user_pte()
}
ihk_mc_spinlock_unlock_noirq(&orgvm->memory_range_lock);
ihk_rwspinlock_read_unlock_noirq(&orgvm->memory_range_lock);
return 0;
@@ -881,7 +881,7 @@ err_rollback:
}
}
ihk_mc_spinlock_unlock_noirq(&orgvm->memory_range_lock);
ihk_rwspinlock_read_unlock_noirq(&orgvm->memory_range_lock);
return -1;
}
@@ -2190,7 +2190,7 @@ static int do_page_fault_process_vm(struct process_vm *vm, void *fault_addr0, ui
if (thread->vm->is_memory_range_lock_taken == -1 ||
thread->vm->is_memory_range_lock_taken != ihk_mc_get_processor_id()) {
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
ihk_rwspinlock_read_lock_noirq(&vm->memory_range_lock);
locked = 1;
} else {
dkprintf("%s: INFO: skip locking of memory_range_lock,pid=%d,tid=%d\n",
@@ -2305,7 +2305,7 @@ static int do_page_fault_process_vm(struct process_vm *vm, void *fault_addr0, ui
error = 0;
out:
if (locked) {
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
ihk_rwspinlock_read_unlock_noirq(&vm->memory_range_lock);
}
dkprintf("[%d]do_page_fault_process_vm(%p,%lx,%lx): %d\n",
ihk_mc_get_processor_id(), vm, fault_addr0,
@@ -2609,7 +2609,7 @@ void flush_process_memory(struct process_vm *vm)
int error;
dkprintf("flush_process_memory(%p)\n", vm);
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
ihk_rwspinlock_write_lock_noirq(&vm->memory_range_lock);
/* Let concurrent page faults know the VM will be gone */
vm->exiting = 1;
while ((node = next)) {
@@ -2627,7 +2627,7 @@ void flush_process_memory(struct process_vm *vm)
}
}
}
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
ihk_rwspinlock_write_unlock_noirq(&vm->memory_range_lock);
dkprintf("flush_process_memory(%p):\n", vm);
return;
}
@@ -2642,7 +2642,7 @@ void free_process_memory_ranges(struct process_vm *vm)
return;
}
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
ihk_rwspinlock_write_lock_noirq(&vm->memory_range_lock);
while ((node = next)) {
range = rb_entry(node, struct vm_range, vm_rb_node);
next = rb_next(node);
@@ -2655,7 +2655,7 @@ void free_process_memory_ranges(struct process_vm *vm)
/* through */
}
}
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
ihk_rwspinlock_write_unlock_noirq(&vm->memory_range_lock);
}
static void free_thread_pages(struct thread *thread)
@@ -2745,7 +2745,7 @@ free_all_process_memory_range(struct process_vm *vm)
struct rb_node *node, *next = rb_first(&vm->vm_range_tree);
int error;
ihk_mc_spinlock_lock_noirq(&vm->memory_range_lock);
ihk_rwspinlock_write_lock_noirq(&vm->memory_range_lock);
while ((node = next)) {
range = rb_entry(node, struct vm_range, vm_rb_node);
next = rb_next(node);
@@ -2758,7 +2758,7 @@ free_all_process_memory_range(struct process_vm *vm)
/* through */
}
}
ihk_mc_spinlock_unlock_noirq(&vm->memory_range_lock);
ihk_rwspinlock_write_unlock_noirq(&vm->memory_range_lock);
}
void
@@ -3189,7 +3189,7 @@ void sched_init(void)
&idle_thread->proc->children_list);
ihk_mc_init_context(&idle_thread->ctx, NULL, idle);
ihk_mc_spinlock_init(&idle_thread->vm->memory_range_lock);
ihk_rwspinlock_init(&idle_thread->vm->memory_range_lock);
idle_thread->vm->vm_range_tree = RB_ROOT;
idle_thread->vm->vm_range_numa_policy_tree = RB_ROOT;
idle_thread->proc->pid = 0;