remote_flush_tlb_array_cpumask(): bundle remote TLB invalidations
This commit is contained in:
@@ -1077,8 +1077,27 @@ struct clear_range_args {
|
||||
int free_physical;
|
||||
struct memobj *memobj;
|
||||
struct process_vm *vm;
|
||||
unsigned long *addr;
|
||||
int nr_addr;
|
||||
int max_nr_addr;
|
||||
};
|
||||
|
||||
static void remote_flush_tlb_add_addr(struct clear_range_args *args,
|
||||
unsigned long addr)
|
||||
{
|
||||
if (args->nr_addr < args->max_nr_addr) {
|
||||
args->addr[args->nr_addr] = addr;
|
||||
++args->nr_addr;
|
||||
return;
|
||||
}
|
||||
|
||||
remote_flush_tlb_array_cpumask(args->vm, args->addr, args->nr_addr,
|
||||
ihk_mc_get_processor_id());
|
||||
|
||||
args->addr[0] = addr;
|
||||
args->nr_addr = 1;
|
||||
}
|
||||
|
||||
static int clear_range_l1(void *args0, pte_t *ptep, uint64_t base,
|
||||
uint64_t start, uint64_t end)
|
||||
{
|
||||
@@ -1092,7 +1111,7 @@ static int clear_range_l1(void *args0, pte_t *ptep, uint64_t base,
|
||||
}
|
||||
|
||||
old = xchg(ptep, PTE_NULL);
|
||||
remote_flush_tlb_cpumask(args->vm, base, ihk_mc_get_processor_id());
|
||||
remote_flush_tlb_add_addr(args, base);
|
||||
|
||||
page = NULL;
|
||||
if (!pte_is_fileoff(&old, PTL1_SIZE)) {
|
||||
@@ -1141,8 +1160,7 @@ static int clear_range_l2(void *args0, pte_t *ptep, uint64_t base,
|
||||
|
||||
if (*ptep & PFL2_SIZE) {
|
||||
old = xchg(ptep, PTE_NULL);
|
||||
remote_flush_tlb_cpumask(args->vm, base,
|
||||
ihk_mc_get_processor_id());
|
||||
remote_flush_tlb_add_addr(args, base);
|
||||
|
||||
page = NULL;
|
||||
if (!pte_is_fileoff(&old, PTL2_SIZE)) {
|
||||
@@ -1174,8 +1192,7 @@ static int clear_range_l2(void *args0, pte_t *ptep, uint64_t base,
|
||||
|
||||
if ((start <= base) && ((base + PTL2_SIZE) <= end)) {
|
||||
*ptep = PTE_NULL;
|
||||
remote_flush_tlb_cpumask(args->vm, base,
|
||||
ihk_mc_get_processor_id());
|
||||
remote_flush_tlb_add_addr(args, base);
|
||||
ihk_mc_free_pages(pt, 1);
|
||||
}
|
||||
|
||||
@@ -1207,8 +1224,7 @@ static int clear_range_l3(void *args0, pte_t *ptep, uint64_t base,
|
||||
|
||||
if (*ptep & PFL3_SIZE) {
|
||||
old = xchg(ptep, PTE_NULL);
|
||||
remote_flush_tlb_cpumask(args->vm, base,
|
||||
ihk_mc_get_processor_id());
|
||||
remote_flush_tlb_add_addr(args, base);
|
||||
|
||||
page = NULL;
|
||||
if (!pte_is_fileoff(&old, PTL3_SIZE)) {
|
||||
@@ -1239,8 +1255,7 @@ static int clear_range_l3(void *args0, pte_t *ptep, uint64_t base,
|
||||
|
||||
if (use_1gb_page && (start <= base) && ((base + PTL3_SIZE) <= end)) {
|
||||
*ptep = PTE_NULL;
|
||||
remote_flush_tlb_cpumask(args->vm, base,
|
||||
ihk_mc_get_processor_id());
|
||||
remote_flush_tlb_add_addr(args, base);
|
||||
ihk_mc_free_pages(pt, 1);
|
||||
}
|
||||
|
||||
@@ -1260,6 +1275,8 @@ static int clear_range_l4(void *args0, pte_t *ptep, uint64_t base,
|
||||
return walk_pte_l3(pt, base, start, end, &clear_range_l3, args0);
|
||||
}
|
||||
|
||||
#define TLB_INVALID_ARRAY_PAGES (4)
|
||||
|
||||
static int clear_range(struct page_table *pt, struct process_vm *vm,
|
||||
uintptr_t start, uintptr_t end, int free_physical,
|
||||
struct memobj *memobj)
|
||||
@@ -1276,6 +1293,17 @@ static int clear_range(struct page_table *pt, struct process_vm *vm,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* TODO: embedd this in tlb_flush_entry? */
|
||||
args.addr = (unsigned long *)ihk_mc_alloc_pages(
|
||||
TLB_INVALID_ARRAY_PAGES, IHK_MC_AP_CRITICAL);
|
||||
if (!args.addr) {
|
||||
ekprintf("%s: error: allocating address array\n", __FUNCTION__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
args.nr_addr = 0;
|
||||
args.max_nr_addr = (TLB_INVALID_ARRAY_PAGES * PAGE_SIZE /
|
||||
sizeof(uint64_t));
|
||||
|
||||
args.free_physical = free_physical;
|
||||
if (memobj && (memobj->flags & MF_DEV_FILE)) {
|
||||
args.free_physical = 0;
|
||||
@@ -1287,6 +1315,13 @@ static int clear_range(struct page_table *pt, struct process_vm *vm,
|
||||
args.vm = vm;
|
||||
|
||||
error = walk_pte_l4(pt, 0, start, end, &clear_range_l4, &args);
|
||||
if (args.nr_addr) {
|
||||
remote_flush_tlb_array_cpumask(vm, args.addr, args.nr_addr,
|
||||
ihk_mc_get_processor_id());
|
||||
}
|
||||
|
||||
ihk_mc_free_pages(args.addr, TLB_INVALID_ARRAY_PAGES);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
44
kernel/mem.c
44
kernel/mem.c
@@ -887,14 +887,23 @@ void coredump(struct thread *thread, void *regs)
|
||||
|
||||
void remote_flush_tlb_cpumask(struct process_vm *vm,
|
||||
unsigned long addr, int cpu_id)
|
||||
{
|
||||
unsigned long __addr = addr;
|
||||
return remote_flush_tlb_array_cpumask(vm, &__addr, 1, cpu_id);
|
||||
}
|
||||
|
||||
void remote_flush_tlb_array_cpumask(struct process_vm *vm,
|
||||
unsigned long *addr,
|
||||
int nr_addr,
|
||||
int cpu_id)
|
||||
{
|
||||
unsigned long cpu;
|
||||
int flush_ind;
|
||||
struct tlb_flush_entry *flush_entry;
|
||||
cpu_set_t _cpu_set;
|
||||
|
||||
if (addr) {
|
||||
flush_ind = (addr >> PAGE_SHIFT) % IHK_TLB_FLUSH_IRQ_VECTOR_SIZE;
|
||||
if (addr[0]) {
|
||||
flush_ind = (addr[0] >> PAGE_SHIFT) % IHK_TLB_FLUSH_IRQ_VECTOR_SIZE;
|
||||
}
|
||||
/* Zero address denotes full TLB flush */
|
||||
else {
|
||||
@@ -916,6 +925,7 @@ void remote_flush_tlb_cpumask(struct process_vm *vm,
|
||||
|
||||
flush_entry->vm = vm;
|
||||
flush_entry->addr = addr;
|
||||
flush_entry->nr_addr = nr_addr;
|
||||
ihk_atomic_set(&flush_entry->pending, 0);
|
||||
|
||||
dkprintf("lock aquired, iterating cpu mask.. flush_ind: %d\n", flush_ind);
|
||||
@@ -940,16 +950,18 @@ void remote_flush_tlb_cpumask(struct process_vm *vm,
|
||||
unsigned long tsc;
|
||||
tsc = rdtsc() + 12884901888; /* 1.2GHz =>10 sec */
|
||||
#endif
|
||||
if (flush_entry->addr) {
|
||||
flush_tlb_single(flush_entry->addr & PAGE_MASK);
|
||||
if (flush_entry->addr[0]) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < flush_entry->nr_addr; ++i) {
|
||||
flush_tlb_single(flush_entry->addr[i] & PAGE_MASK);
|
||||
}
|
||||
}
|
||||
/* Zero address denotes full TLB flush */
|
||||
else {
|
||||
flush_tlb();
|
||||
}
|
||||
|
||||
/* Flush on this core */
|
||||
flush_tlb_single(addr & PAGE_MASK);
|
||||
/* Wait for all cores */
|
||||
while (ihk_atomic_read(&flush_entry->pending) != 0) {
|
||||
cpu_pause();
|
||||
@@ -978,22 +990,24 @@ void tlb_flush_handler(int vector)
|
||||
struct tlb_flush_entry *flush_entry = &tlb_flush_vector[vector -
|
||||
IHK_TLB_FLUSH_IRQ_VECTOR_START];
|
||||
|
||||
dkprintf("decreasing pending cnt for %d\n",
|
||||
vector - IHK_TLB_FLUSH_IRQ_VECTOR_START);
|
||||
if (flush_entry->addr[0]) {
|
||||
int i;
|
||||
|
||||
/* Decrease counter */
|
||||
ihk_atomic_dec(&flush_entry->pending);
|
||||
|
||||
dkprintf("flusing TLB for addr: 0x%lX\n", flush_entry->addr);
|
||||
|
||||
if (flush_entry->addr) {
|
||||
flush_tlb_single(flush_entry->addr & PAGE_MASK);
|
||||
for (i = 0; i < flush_entry->nr_addr; ++i) {
|
||||
flush_tlb_single(flush_entry->addr[i] & PAGE_MASK);
|
||||
dkprintf("flusing TLB for addr: 0x%lX\n", flush_entry->addr[i]);
|
||||
}
|
||||
}
|
||||
/* Zero address denotes full TLB flush */
|
||||
else {
|
||||
flush_tlb();
|
||||
}
|
||||
|
||||
/* Decrease counter */
|
||||
dkprintf("decreasing pending cnt for %d\n",
|
||||
vector - IHK_TLB_FLUSH_IRQ_VECTOR_START);
|
||||
ihk_atomic_dec(&flush_entry->pending);
|
||||
|
||||
cpu_restore_interrupt(flags);
|
||||
#ifdef PROFILE_ENABLE
|
||||
{
|
||||
|
||||
@@ -215,6 +215,10 @@ int ihk_mc_get_memory_chunk(int id,
|
||||
|
||||
void remote_flush_tlb_cpumask(struct process_vm *vm,
|
||||
unsigned long addr, int cpu_id);
|
||||
void remote_flush_tlb_array_cpumask(struct process_vm *vm,
|
||||
unsigned long *addr,
|
||||
int nr_addr,
|
||||
int cpu_id);
|
||||
|
||||
int ihk_set_kmsg(unsigned long addr, unsigned long size);
|
||||
char *ihk_get_kargs();
|
||||
@@ -226,7 +230,8 @@ extern void (*__tlb_flush_handler)(int vector);
|
||||
|
||||
struct tlb_flush_entry {
|
||||
struct process_vm *vm;
|
||||
unsigned long addr;
|
||||
unsigned long *addr;
|
||||
int nr_addr;
|
||||
ihk_atomic_t pending;
|
||||
ihk_spinlock_t lock;
|
||||
} __attribute__((aligned(64)));
|
||||
|
||||
Reference in New Issue
Block a user