diff options
| author | John Baldwin <jhb@FreeBSD.org> | 2001-08-21 23:15:25 +0000 |
|---|---|---|
| committer | John Baldwin <jhb@FreeBSD.org> | 2001-08-21 23:15:25 +0000 |
| commit | 3a9e0f5bd17f78e6fed188a0f2e23af658ee9aaf (patch) | |
| tree | aa7f73bcb980af873a1da06907267cd9a1f8c903 | |
| parent | b285782b29175fdf365b66d4bb448b7d215a5399 (diff) | |
Notes
| -rw-r--r-- | sys/amd64/amd64/trap.c | 9 | ||||
| -rw-r--r-- | sys/i386/i386/trap.c | 9 |
2 files changed, 10 insertions, 8 deletions
diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c index e3f45edf62e2..a84a4f24302d 100644 --- a/sys/amd64/amd64/trap.c +++ b/sys/amd64/amd64/trap.c @@ -286,9 +286,7 @@ restart: */ eva = rcr2(); enable_intr(); - mtx_lock(&Giant); i = trap_pfault(&frame, TRUE, eva); - mtx_unlock(&Giant); #if defined(I586_CPU) && !defined(NO_F00F_HACK) if (i == -2) { /* @@ -404,9 +402,7 @@ restart: */ eva = rcr2(); enable_intr(); - mtx_lock(&Giant); (void) trap_pfault(&frame, FALSE, eva); - mtx_unlock(&Giant); goto out; case T_DNA: @@ -682,6 +678,7 @@ trap_pfault(frame, usermode, eva) if (vm == NULL) goto nogo; + mtx_lock(&Giant); map = &vm->vm_map; /* @@ -719,6 +716,7 @@ trap_pfault(frame, usermode, eva) if (usermode) goto nogo; + mtx_lock(&Giant); /* * Since we know that kernel virtual address addresses * always have pte pages mapped, we just have to fault @@ -726,6 +724,7 @@ trap_pfault(frame, usermode, eva) */ rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL); } + mtx_unlock(&Giant); if (rv == KERN_SUCCESS) return (0); @@ -799,6 +798,7 @@ trap_pfault(frame, usermode, eva) else ftype = VM_PROT_READ; + mtx_lock(&Giant); if (map != kernel_map) { /* * Keep swapout from messing with us during this @@ -835,6 +835,7 @@ trap_pfault(frame, usermode, eva) */ rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); } + mtx_unlock(&Giant); if (rv == KERN_SUCCESS) return (0); diff --git a/sys/i386/i386/trap.c b/sys/i386/i386/trap.c index e3f45edf62e2..a84a4f24302d 100644 --- a/sys/i386/i386/trap.c +++ b/sys/i386/i386/trap.c @@ -286,9 +286,7 @@ restart: */ eva = rcr2(); enable_intr(); - mtx_lock(&Giant); i = trap_pfault(&frame, TRUE, eva); - mtx_unlock(&Giant); #if defined(I586_CPU) && !defined(NO_F00F_HACK) if (i == -2) { /* @@ -404,9 +402,7 @@ restart: */ eva = rcr2(); enable_intr(); - mtx_lock(&Giant); (void) trap_pfault(&frame, FALSE, eva); - mtx_unlock(&Giant); goto out; case T_DNA: @@ -682,6 +678,7 @@ trap_pfault(frame, usermode, eva) if (vm == NULL) goto nogo; + mtx_lock(&Giant); map = &vm->vm_map; /* @@ -719,6 +716,7 @@ trap_pfault(frame, usermode, eva) if (usermode) goto nogo; + mtx_lock(&Giant); /* * Since we know that kernel virtual address addresses * always have pte pages mapped, we just have to fault @@ -726,6 +724,7 @@ trap_pfault(frame, usermode, eva) */ rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL); } + mtx_unlock(&Giant); if (rv == KERN_SUCCESS) return (0); @@ -799,6 +798,7 @@ trap_pfault(frame, usermode, eva) else ftype = VM_PROT_READ; + mtx_lock(&Giant); if (map != kernel_map) { /* * Keep swapout from messing with us during this @@ -835,6 +835,7 @@ trap_pfault(frame, usermode, eva) */ rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); } + mtx_unlock(&Giant); if (rv == KERN_SUCCESS) return (0); |
