diff options
37 files changed, 271 insertions, 275 deletions
diff --git a/sys/arm/arm/bcopyinout_xscale.S b/sys/arm/arm/bcopyinout_xscale.S index d1017c3eb8c1..68c2701f5940 100644 --- a/sys/arm/arm/bcopyinout_xscale.S +++ b/sys/arm/arm/bcopyinout_xscale.S @@ -333,10 +333,10 @@ ENTRY(copyin) str r6, [r1], #0x04 str r7, [r1], #0x04 .Lcopyin_bad1: - subs r2, r2, #0x10 + subs r2, r2, #0x10 bge .Lcopyin_bad1_loop16 - adds r2, r2, #0x10 + adds r2, r2, #0x10 ldmeqfd sp!, {r4-r7} RETeq /* Return now if done */ subs r2, r2, #0x04 @@ -394,10 +394,10 @@ ENTRY(copyin) str r6, [r1], #0x04 str r7, [r1], #0x04 .Lcopyin_bad2: - subs r2, r2, #0x10 + subs r2, r2, #0x10 bge .Lcopyin_bad2_loop16 - adds r2, r2, #0x10 + adds r2, r2, #0x10 ldmeqfd sp!, {r4-r7} RETeq /* Return now if done */ subs r2, r2, #0x04 @@ -455,10 +455,10 @@ ENTRY(copyin) str r6, [r1], #0x04 str r7, [r1], #0x04 .Lcopyin_bad3: - subs r2, r2, #0x10 + subs r2, r2, #0x10 bge .Lcopyin_bad3_loop16 - adds r2, r2, #0x10 + adds r2, r2, #0x10 ldmeqfd sp!, {r4-r7} RETeq /* Return now if done */ subs r2, r2, #0x04 @@ -785,10 +785,10 @@ ENTRY(copyout) strt r6, [r1], #0x04 strt r7, [r1], #0x04 .Lcopyout_bad1: - subs r2, r2, #0x10 + subs r2, r2, #0x10 bge .Lcopyout_bad1_loop16 - adds r2, r2, #0x10 + adds r2, r2, #0x10 ldmeqfd sp!, {r4-r7} RETeq /* Return now if done */ subs r2, r2, #0x04 @@ -846,10 +846,10 @@ ENTRY(copyout) strt r6, [r1], #0x04 strt r7, [r1], #0x04 .Lcopyout_bad2: - subs r2, r2, #0x10 + subs r2, r2, #0x10 bge .Lcopyout_bad2_loop16 - adds r2, r2, #0x10 + adds r2, r2, #0x10 ldmeqfd sp!, {r4-r7} RETeq /* Return now if done */ subs r2, r2, #0x04 @@ -907,10 +907,10 @@ ENTRY(copyout) strt r6, [r1], #0x04 strt r7, [r1], #0x04 .Lcopyout_bad3: - subs r2, r2, #0x10 + subs r2, r2, #0x10 bge .Lcopyout_bad3_loop16 - adds r2, r2, #0x10 + adds r2, r2, #0x10 ldmeqfd sp!, {r4-r7} RETeq /* Return now if done */ subs r2, r2, #0x04 diff --git a/sys/arm/arm/bootconfig.c b/sys/arm/arm/bootconfig.c index 0a5d3057a2c0..3e9397e6667a 100644 --- a/sys/arm/arm/bootconfig.c +++ b/sys/arm/arm/bootconfig.c @@ -46,7 +46,7 @@ __FBSDID("$FreeBSD$"); #include <machine/bootconfig.h> -/* +/* * Function to identify and process different types of boot argument */ diff --git a/sys/arm/arm/busdma_machdep.c b/sys/arm/arm/busdma_machdep.c index f2fe377fc154..7deffed2406f 100644 --- a/sys/arm/arm/busdma_machdep.c +++ b/sys/arm/arm/busdma_machdep.c @@ -156,7 +156,7 @@ struct bus_dmamap { static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; -static TAILQ_HEAD(,bus_dmamap) dmamap_freelist = +static TAILQ_HEAD(,bus_dmamap) dmamap_freelist = TAILQ_HEAD_INITIALIZER(dmamap_freelist); #define BUSDMA_STATIC_MAPS 500 @@ -210,7 +210,7 @@ arm_dmamap_freelist_init(void *dummy) { int i; - for (i = 0; i < BUSDMA_STATIC_MAPS; i++) + for (i = 0; i < BUSDMA_STATIC_MAPS; i++) TAILQ_INSERT_HEAD(&dmamap_freelist, &map_pool[i], freelist); } @@ -231,7 +231,7 @@ _bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) int i; for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) - || (lowaddr < phys_avail[i] && + || (lowaddr < phys_avail[i] && highaddr > phys_avail[i])) return (1); } @@ -313,7 +313,7 @@ _busdma_alloc_dmamap(void) return (map); } -static __inline void +static __inline void _busdma_free_dmamap(bus_dmamap_t map) { if (map->flags & DMAMAP_ALLOCATED) @@ -633,7 +633,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, *vaddr = tmpaddr; } else newmap->origbuffer = newmap->allocbuffer = NULL; - } else + } else newmap->origbuffer = newmap->allocbuffer = NULL; return (0); } @@ -844,7 +844,7 @@ bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t *segs, if (seg >= 0 && curaddr == lastaddr && (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && (dmat->boundary == 0 || - (segs[seg].ds_addr & bmask) == + (segs[seg].ds_addr & bmask) == (curaddr & bmask))) { segs[seg].ds_len += sgsize; goto segdone; @@ -941,7 +941,7 @@ bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, for (m = m0; m != NULL && error == 0; m = m->m_next) { if (m->m_len > 0) { error = bus_dmamap_load_buffer(dmat, - dm_segments, map, m->m_data, m->m_len, + dm_segments, map, m->m_data, m->m_len, pmap_kernel(), flags, &lastaddr, &nsegs); map->len += m->m_len; } @@ -951,7 +951,7 @@ bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m0, } if (error) { - /* + /* * force "no valid mappings" on error in callback. */ (*callback)(callback_arg, dm_segments, 0, 0, error); @@ -1057,7 +1057,7 @@ bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map, struct uio *uio, } if (error) { - /* + /* * force "no valid mappings" on error in callback. */ (*callback)(callback_arg, dm_segments, 0, 0, error); @@ -1092,7 +1092,7 @@ bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op) { char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align]; register_t s; - int partial; + int partial; if ((op & BUS_DMASYNC_PREWRITE) && !(op & BUS_DMASYNC_PREREAD)) { cpu_dcache_wb_range((vm_offset_t)buf, len); @@ -1116,7 +1116,7 @@ bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op) ~arm_dcache_align_mask), (vm_offset_t)buf & arm_dcache_align_mask); if (((vm_offset_t)buf + len) & arm_dcache_align_mask) - memcpy(_tmp_clend, + memcpy(_tmp_clend, (void *)((vm_offset_t)buf + len), arm_dcache_align - (((vm_offset_t)(buf) + len) & arm_dcache_align_mask)); @@ -1126,11 +1126,11 @@ bus_dmamap_sync_buf(void *buf, int len, bus_dmasync_op_t op) if (partial) { if ((vm_offset_t)buf & arm_dcache_align_mask) memcpy((void *)((vm_offset_t)buf & - ~arm_dcache_align_mask), _tmp_cl, + ~arm_dcache_align_mask), _tmp_cl, (vm_offset_t)buf & arm_dcache_align_mask); if (((vm_offset_t)buf + len) & arm_dcache_align_mask) - memcpy((void *)((vm_offset_t)buf + len), - _tmp_clend, arm_dcache_align - + memcpy((void *)((vm_offset_t)buf + len), + _tmp_clend, arm_dcache_align - (((vm_offset_t)(buf) + len) & arm_dcache_align_mask)); intr_restore(s); @@ -1146,7 +1146,7 @@ _bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) STAILQ_FOREACH(bpage, &map->bpages, links) { if (op & BUS_DMASYNC_PREWRITE) { bcopy((void *)bpage->datavaddr, - (void *)(bpage->vaddr_nocache != 0 ? + (void *)(bpage->vaddr_nocache != 0 ? bpage->vaddr_nocache : bpage->vaddr), bpage->datacount); if (bpage->vaddr_nocache == 0) { @@ -1164,7 +1164,7 @@ _bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) cpu_l2cache_inv_range(bpage->vaddr, bpage->datacount); } - bcopy((void *)(bpage->vaddr_nocache != 0 ? + bcopy((void *)(bpage->vaddr_nocache != 0 ? bpage->vaddr_nocache : bpage->vaddr), (void *)bpage->datavaddr, bpage->datacount); dmat->bounce_zone->total_bounced++; @@ -1179,7 +1179,7 @@ _bus_dma_buf_is_in_bp(bus_dmamap_t map, void *buf, int len) STAILQ_FOREACH(bpage, &map->bpages, links) { if ((vm_offset_t)buf >= bpage->datavaddr && - (vm_offset_t)buf + len <= bpage->datavaddr + + (vm_offset_t)buf + len <= bpage->datavaddr + bpage->datacount) return (1); } diff --git a/sys/arm/arm/cpufunc.c b/sys/arm/arm/cpufunc.c index 4b62e9846dfb..b29dfd9d227a 100644 --- a/sys/arm/arm/cpufunc.c +++ b/sys/arm/arm/cpufunc.c @@ -222,7 +222,7 @@ struct cpu_functions arm8_cpufuncs = { arm8_context_switch, /* context_switch */ arm8_setup /* cpu setup */ -}; +}; #endif /* CPU_ARM8 */ #ifdef CPU_ARM9 @@ -328,7 +328,7 @@ struct cpu_functions armv5_ec_cpufuncs = { (void *)cpufunc_nullop, /* l2cache_wbinv_range */ (void *)cpufunc_nullop, /* l2cache_inv_range */ (void *)cpufunc_nullop, /* l2cache_wb_range */ - + /* Other functions */ cpufunc_nullop, /* flush_prefetchbuf */ @@ -530,7 +530,7 @@ struct cpu_functions sa110_cpufuncs = { sa110_context_switch, /* context_switch */ sa110_setup /* cpu setup */ -}; +}; #endif /* CPU_SA110 */ #if defined(CPU_SA1100) || defined(CPU_SA1110) @@ -591,7 +591,7 @@ struct cpu_functions sa11x0_cpufuncs = { sa11x0_context_switch, /* context_switch */ sa11x0_setup /* cpu setup */ -}; +}; #endif /* CPU_SA1100 || CPU_SA1110 */ #ifdef CPU_IXP12X0 @@ -652,7 +652,7 @@ struct cpu_functions ixp12x0_cpufuncs = { ixp12x0_context_switch, /* context_switch */ ixp12x0_setup /* cpu setup */ -}; +}; #endif /* CPU_IXP12X0 */ #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ @@ -841,7 +841,7 @@ struct cpu_functions fa526_cpufuncs = { fa526_context_switch, /* context_switch */ fa526_setup /* cpu setup */ -}; +}; #endif /* CPU_FA526 || CPU_FA626TE */ @@ -1099,7 +1099,7 @@ set_cpufuncs() cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */ get_cachetype_cp15(); arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize; - arm10_dcache_sets_max = + arm10_dcache_sets_max = (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) - arm10_dcache_sets_inc; arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc); @@ -1353,7 +1353,7 @@ early_abort_fixup(arg) int loop; int count; int *registers = &frame->tf_r0; - + DFC_PRINTF(("LDM/STM\n")); DFC_DISASSEMBLE(fault_pc); if (fault_instruction & (1 << 21)) { @@ -1533,7 +1533,7 @@ late_abort_fixup(arg) offset = fault_instruction & 0x0f; if (offset == base) return ABORT_FIXUP_FAILED; - + /* * Register offset - hard we have to * cope with shifts ! @@ -1647,8 +1647,8 @@ static u_int parse_cpu_options(char *, struct cpu_option *, u_int); static u_int parse_cpu_options(args, optlist, cpuctrl) char *args; - struct cpu_option *optlist; - u_int cpuctrl; + struct cpu_option *optlist; + u_int cpuctrl; { int integer; @@ -1811,7 +1811,7 @@ arm8_setup(args) ctrl = cpuctrl; cpu_control(0xffffffff, cpuctrl); - /* Set the clock/test register */ + /* Set the clock/test register */ if (setclock) arm8_clock_config(0x7f, clocktest); } @@ -1891,7 +1891,7 @@ arm10_setup(args) int cpuctrl, cpuctrlmask; cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE - | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE + | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE; cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE @@ -2031,7 +2031,7 @@ sa110_setup(args) /* cpu_control(cpuctrlmask, cpuctrl);*/ cpu_control(0xffffffff, cpuctrl); - /* + /* * enable clockswitching, note that this doesn't read or write to r0, * r0 is just to make it valid asm */ @@ -2089,7 +2089,7 @@ sa11x0_setup(args) cpuctrl |= CPU_CONTROL_VECRELOC; /* Clear out the cache */ cpu_idcache_wbinv_all(); - /* Set the control register */ + /* Set the control register */ ctrl = cpuctrl; cpu_control(0xffffffff, cpuctrl); } @@ -2198,7 +2198,7 @@ ixp12x0_setup(args) /* Clear out the cache */ cpu_idcache_wbinv_all(); - /* Set the control register */ + /* Set the control register */ ctrl = cpuctrl; /* cpu_control(0xffffffff, cpuctrl); */ cpu_control(cpuctrlmask, cpuctrl); @@ -2292,5 +2292,5 @@ xscale_setup(args) __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); } -#endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 +#endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 CPU_XSCALE_80219 */ diff --git a/sys/arm/arm/cpufunc_asm.S b/sys/arm/arm/cpufunc_asm.S index d9cc70d5495f..99b40f4fc37d 100644 --- a/sys/arm/arm/cpufunc_asm.S +++ b/sys/arm/arm/cpufunc_asm.S @@ -34,14 +34,14 @@ * * RiscBSD kernel project * - * cpufunc.S + * cpufunc.S * * Assembly functions for CPU / MMU / TLB specific operations * * Created : 30/01/97 * */ - + #include <machine/asm.h> __FBSDID("$FreeBSD$"); @@ -86,13 +86,13 @@ ENTRY(cpufunc_faultaddress) * Generic functions to write the internal coprocessor registers * * - * Currently these registers are + * Currently these registers are * c1 - CPU Control * c3 - Domain Access Control * * All other registers are CPU architecture specific */ - + #if 0 /* See below. */ ENTRY(cpufunc_control) mcr p15, 0, r0, c1, c0, 0 @@ -107,12 +107,12 @@ ENTRY(cpufunc_domains) * Generic functions to read/modify/write the internal coprocessor registers * * - * Currently these registers are + * Currently these registers are * c1 - CPU Control * * All other registers are CPU architecture specific */ - + ENTRY(cpufunc_control) mrc p15, 0, r3, c1, c0, 0 /* Read the control register */ bic r2, r3, r0 /* Clear bits */ diff --git a/sys/arm/arm/cpufunc_asm_arm10.S b/sys/arm/arm/cpufunc_asm_arm10.S index d1d55ff254aa..22da6aadc950 100644 --- a/sys/arm/arm/cpufunc_asm_arm10.S +++ b/sys/arm/arm/cpufunc_asm_arm10.S @@ -31,7 +31,7 @@ * ARM10 assembly functions for CPU / MMU / TLB specific operations * */ - + #include <machine/asm.h> __FBSDID("$FreeBSD$"); @@ -255,7 +255,7 @@ ENTRY(arm10_context_switch) /* * Parameters for the cache cleaning code. Note that the order of these - * four variables is assumed in the code above. Hence the reason for + * four variables is assumed in the code above. Hence the reason for * declaring them in the assembler file. */ .align 0 diff --git a/sys/arm/arm/cpufunc_asm_arm11.S b/sys/arm/arm/cpufunc_asm_arm11.S index 1ea495767a78..81914db2a5e2 100644 --- a/sys/arm/arm/cpufunc_asm_arm11.S +++ b/sys/arm/arm/cpufunc_asm_arm11.S @@ -33,7 +33,7 @@ * XXX We make no attempt at present to take advantage of the v6 memroy * architecture or physically tagged cache. */ - + #include <machine/asm.h> __FBSDID("$FreeBSD$"); diff --git a/sys/arm/arm/cpufunc_asm_arm7tdmi.S b/sys/arm/arm/cpufunc_asm_arm7tdmi.S index d301efc1f7f0..fed6f16f9237 100644 --- a/sys/arm/arm/cpufunc_asm_arm7tdmi.S +++ b/sys/arm/arm/cpufunc_asm_arm7tdmi.S @@ -34,7 +34,7 @@ * ARM7TDMI assembly functions for CPU / MMU / TLB specific operations * */ - + #include <machine/asm.h> __FBSDID("$FreeBSD$"); diff --git a/sys/arm/arm/cpufunc_asm_arm8.S b/sys/arm/arm/cpufunc_asm_arm8.S index febe8f0a545e..9f23548ffdca 100644 --- a/sys/arm/arm/cpufunc_asm_arm8.S +++ b/sys/arm/arm/cpufunc_asm_arm8.S @@ -35,7 +35,7 @@ * ARM8 assembly functions for CPU / MMU / TLB specific operations * */ - + #include <machine/asm.h> __FBSDID("$FreeBSD$"); diff --git a/sys/arm/arm/cpufunc_asm_arm9.S b/sys/arm/arm/cpufunc_asm_arm9.S index 0734b03ee68a..291d3f7bdbfc 100644 --- a/sys/arm/arm/cpufunc_asm_arm9.S +++ b/sys/arm/arm/cpufunc_asm_arm9.S @@ -30,7 +30,7 @@ * * ARM9 assembly functions for CPU / MMU / TLB specific operations */ - + #include <machine/asm.h> __FBSDID("$FreeBSD$"); @@ -242,7 +242,7 @@ ENTRY(arm9_context_switch) /* * Parameters for the cache cleaning code. Note that the order of these - * four variables is assumed in the code above. Hence the reason for + * four variables is assumed in the code above. Hence the reason for * declaring them in the assembler file. */ .align 0 diff --git a/sys/arm/arm/cpufunc_asm_armv4.S b/sys/arm/arm/cpufunc_asm_armv4.S index ccdcce213c89..1b8797dc97fc 100644 --- a/sys/arm/arm/cpufunc_asm_armv4.S +++ b/sys/arm/arm/cpufunc_asm_armv4.S @@ -36,7 +36,7 @@ * ARM9 assembly functions for CPU / MMU / TLB specific operations * */ - + #include <machine/asm.h> __FBSDID("$FreeBSD$"); diff --git a/sys/arm/arm/cpufunc_asm_armv5.S b/sys/arm/arm/cpufunc_asm_armv5.S index 53e38e0516ce..2faa5f48f93c 100644 --- a/sys/arm/arm/cpufunc_asm_armv5.S +++ b/sys/arm/arm/cpufunc_asm_armv5.S @@ -32,7 +32,7 @@ * These routines can be used by any core that supports the set/index * operations. */ - + #include <machine/asm.h> __FBSDID("$FreeBSD$"); @@ -224,7 +224,7 @@ ENTRY(armv5_dcache_wbinv_all) /* * Parameters for the cache cleaning code. Note that the order of these - * four variables is assumed in the code above. Hence the reason for + * four variables is assumed in the code above. Hence the reason for * declaring them in the assembler file. */ .align 0 diff --git a/sys/arm/arm/cpufunc_asm_sa1.S b/sys/arm/arm/cpufunc_asm_sa1.S index 7279e65077e7..0bdd6e7a768a 100644 --- a/sys/arm/arm/cpufunc_asm_sa1.S +++ b/sys/arm/arm/cpufunc_asm_sa1.S @@ -35,7 +35,7 @@ * SA-1 assembly functions for CPU / MMU / TLB specific operations * */ - + #include <machine/asm.h> __FBSDID("$FreeBSD$"); @@ -58,7 +58,7 @@ ENTRY(sa1_setttb) #else ldr r3, .Lblock_userspace_access ldr r2, [r3] - orr r1, r2, #1 + orr r1, r2, #1 str r1, [r3] #endif stmfd sp!, {r0-r3, lr} @@ -67,7 +67,7 @@ ENTRY(sa1_setttb) mcr p15, 0, r0, c7, c5, 0 /* invalidate I$ and BTB */ mcr p15, 0, r0, c7, c10, 4 /* drain write and fill buffer */ - /* Write the TTB */ + /* Write the TTB */ mcr p15, 0, r0, c2, c0, 0 /* If we have updated the TTB we must flush the TLB */ diff --git a/sys/arm/arm/cpufunc_asm_xscale.S b/sys/arm/arm/cpufunc_asm_xscale.S index b4700e4a4810..3601b9a3059f 100644 --- a/sys/arm/arm/cpufunc_asm_xscale.S +++ b/sys/arm/arm/cpufunc_asm_xscale.S @@ -71,7 +71,7 @@ * * XScale assembly functions for CPU / MMU / TLB specific operations */ - + #include <machine/asm.h> __FBSDID("$FreeBSD$"); @@ -138,7 +138,7 @@ ENTRY(xscale_setttb) #else ldr r3, .Lblock_userspace_access ldr r2, [r3] - orr r1, r2, #1 + orr r1, r2, #1 str r1, [r3] #endif stmfd sp!, {r0-r3, lr} @@ -150,7 +150,7 @@ ENTRY(xscale_setttb) ldmfd sp!, {r0-r3, lr} - /* Write the TTB */ + /* Write the TTB */ mcr p15, 0, r0, c2, c0, 0 /* If we have updated the TTB we must flush the TLB */ diff --git a/sys/arm/arm/cpufunc_asm_xscale_c3.S b/sys/arm/arm/cpufunc_asm_xscale_c3.S index 8975fb5c06c3..9a003d0e4aad 100644 --- a/sys/arm/arm/cpufunc_asm_xscale_c3.S +++ b/sys/arm/arm/cpufunc_asm_xscale_c3.S @@ -72,7 +72,7 @@ * * XScale core 3 assembly functions for CPU / MMU / TLB specific operations */ - + #include <machine/asm.h> __FBSDID("$FreeBSD$"); @@ -339,7 +339,7 @@ ENTRY(xscalec3_setttb) #else ldr r3, .Lblock_userspace_access ldr r2, [r3] - orr r1, r2, #1 + orr r1, r2, #1 str r1, [r3] #endif stmfd sp!, {r0-r3, lr} @@ -354,7 +354,7 @@ ENTRY(xscalec3_setttb) #ifdef ARM_USE_L2_CACHE orr r0, r0, #0x18 /* cache the page table in L2 */ #endif - /* Write the TTB */ + /* Write the TTB */ mcr p15, 0, r0, c2, c0, 0 /* If we have updated the TTB we must flush the TLB */ diff --git a/sys/arm/arm/db_disasm.c b/sys/arm/arm/db_disasm.c index 52ff7d700235..d7581fd9c008 100644 --- a/sys/arm/arm/db_disasm.c +++ b/sys/arm/arm/db_disasm.c @@ -50,7 +50,7 @@ static u_int db_disasm_read_word(u_int); static void db_disasm_printaddr(u_int); static const disasm_interface_t db_disasm_interface = { - db_disasm_read_word, + db_disasm_read_word, db_disasm_printaddr, db_printf }; diff --git a/sys/arm/arm/db_interface.c b/sys/arm/arm/db_interface.c index 5ec9110282e8..690665087678 100644 --- a/sys/arm/arm/db_interface.c +++ b/sys/arm/arm/db_interface.c @@ -6,24 +6,24 @@ * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * @@ -325,7 +325,7 @@ branch_taken(u_int insn, db_addr_t pc) break; default: break; /* XXX */ - } + } } return (addr + offset); diff --git a/sys/arm/arm/db_trace.c b/sys/arm/arm/db_trace.c index 5a0f599e9987..d6e1c3a0f4b9 100644 --- a/sys/arm/arm/db_trace.c +++ b/sys/arm/arm/db_trace.c @@ -7,24 +7,24 @@ * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ @@ -55,7 +55,7 @@ __FBSDID("$FreeBSD$"); * a structure to represent them is a good idea. * * Here's the diagram from the APCS. Increasing address is _up_ the page. - * + * * save code pointer [fp] <- fp points to here * return link value [fp, #-4] * return sp value [fp, #-8] @@ -72,9 +72,9 @@ __FBSDID("$FreeBSD$"); * [saved a2 value] * [saved a1 value] * - * The save code pointer points twelve bytes beyond the start of the - * code sequence (usually a single STM) that created the stack frame. - * We have to disassemble it if we want to know which of the optional + * The save code pointer points twelve bytes beyond the start of the + * code sequence (usually a single STM) that created the stack frame. + * We have to disassemble it if we want to know which of the optional * fields are actually present. */ diff --git a/sys/arm/arm/disassem.c b/sys/arm/arm/disassem.c index 9aa63d886eb8..f05fb3fbdfc7 100644 --- a/sys/arm/arm/disassem.c +++ b/sys/arm/arm/disassem.c @@ -131,9 +131,9 @@ static const struct arm32_insn arm32_i[] = { { 0x0c500000, 0x04400000, "strb", "daW" }, { 0x0c500000, 0x04500000, "ldrb", "daW" }, { 0x0e1f0000, 0x080d0000, "stm", "YnWl" },/* separate out r13 base */ - { 0x0e1f0000, 0x081d0000, "ldm", "YnWl" },/* separate out r13 base */ + { 0x0e1f0000, 0x081d0000, "ldm", "YnWl" },/* separate out r13 base */ { 0x0e100000, 0x08000000, "stm", "XnWl" }, - { 0x0e100000, 0x08100000, "ldm", "XnWl" }, + { 0x0e100000, 0x08100000, "ldm", "XnWl" }, { 0x0e1000f0, 0x00100090, "ldrb", "de" }, { 0x0e1000f0, 0x00000090, "strb", "de" }, { 0x0e1000f0, 0x001000d0, "ldrsb", "de" }, @@ -329,7 +329,7 @@ disasm(const disasm_interface_t *di, vm_offset_t loc, int altfmt) di->di_printf("#0x%08x", (insn & 0xff) << (32 - rotate) | (insn & 0xff) >> rotate); - } else { + } else { disasm_register_shift(di, insn); } break; diff --git a/sys/arm/arm/dump_machdep.c b/sys/arm/arm/dump_machdep.c index cbff96b996ea..e8ba5768f9ab 100644 --- a/sys/arm/arm/dump_machdep.c +++ b/sys/arm/arm/dump_machdep.c @@ -197,7 +197,7 @@ cb_dumpdata(struct md_pa *mdp, int seqnr, void *arg) #ifdef SW_WATCHDOG wdog_kern_pat(WD_LASTVAL); #endif - error = dump_write(di, + error = dump_write(di, (void *)(pa - (pa & L1_ADDR_BITS)),0, dumplo, sz); if (error) break; diff --git a/sys/arm/arm/elf_trampoline.c b/sys/arm/arm/elf_trampoline.c index 403c012e49b3..121bd56bdb7f 100644 --- a/sys/arm/arm/elf_trampoline.c +++ b/sys/arm/arm/elf_trampoline.c @@ -202,7 +202,7 @@ _startC(void) "orr %0, %0, %1\n" "mrc p15, 0, %1, c1, c0, 0\n" "bic %1, %1, #1\n" /* Disable MMU */ - "orr %1, %1, #(4 | 8)\n" /* Add DC enable, + "orr %1, %1, #(4 | 8)\n" /* Add DC enable, WBUF enable */ "orr %1, %1, #0x1000\n" /* Add IC enable */ "orr %1, %1, #(0x800)\n" /* BPRD enable */ @@ -397,7 +397,7 @@ inflate_kernel(void *kernel, void *startaddr) #endif void * -load_kernel(unsigned int kstart, unsigned int curaddr,unsigned int func_end, +load_kernel(unsigned int kstart, unsigned int curaddr,unsigned int func_end, int d) { Elf32_Ehdr *eh; @@ -436,7 +436,7 @@ load_kernel(unsigned int kstart, unsigned int curaddr,unsigned int func_end, if (phdr[j].p_type == PT_LOAD && shdr[i].sh_offset >= phdr[j].p_offset && - (shdr[i].sh_offset + + (shdr[i].sh_offset + shdr[i].sh_size <= phdr[j].p_offset + phdr[j].p_filesz)) { @@ -445,7 +445,7 @@ load_kernel(unsigned int kstart, unsigned int curaddr,unsigned int func_end, j = eh->e_phnum; } } - if (shdr[i].sh_offset != 0 && + if (shdr[i].sh_offset != 0 && shdr[i].sh_size != 0) { symtabindex = i; symstrindex = shdr[i].sh_link; @@ -457,7 +457,7 @@ load_kernel(unsigned int kstart, unsigned int curaddr,unsigned int func_end, ssym = lastaddr; if (d) { memcpy((void *)func_end, (void *)( - shdr[symtabindex].sh_offset + kstart), + shdr[symtabindex].sh_offset + kstart), shdr[symtabindex].sh_size); memcpy((void *)(func_end + shdr[symtabindex].sh_size), @@ -469,7 +469,7 @@ load_kernel(unsigned int kstart, unsigned int curaddr,unsigned int func_end, sizeof(shdr[symtabindex].sh_size)); lastaddr += sizeof(shdr[symstrindex].sh_size); lastaddr += shdr[symstrindex].sh_size; - lastaddr = roundup(lastaddr, + lastaddr = roundup(lastaddr, sizeof(shdr[symstrindex].sh_size)); } @@ -488,13 +488,13 @@ load_kernel(unsigned int kstart, unsigned int curaddr,unsigned int func_end, (void*)(kstart + phdr[i].p_offset), phdr[i].p_filesz); /* Clean space from oversized segments, eg: bss. */ if (phdr[i].p_filesz < phdr[i].p_memsz) - bzero((void *)(phdr[i].p_vaddr - KERNVIRTADDR + + bzero((void *)(phdr[i].p_vaddr - KERNVIRTADDR + curaddr + phdr[i].p_filesz), phdr[i].p_memsz - phdr[i].p_filesz); } /* Now grab the symbol tables. */ if (symtabindex >= 0 && symstrindex >= 0) { - *(Elf_Size *)lastaddr = + *(Elf_Size *)lastaddr = shdr[symtabindex].sh_size; lastaddr += sizeof(shdr[symtabindex].sh_size); memcpy((void*)lastaddr, @@ -511,7 +511,7 @@ load_kernel(unsigned int kstart, unsigned int curaddr,unsigned int func_end, shdr[symtabindex].sh_size), shdr[symstrindex].sh_size); lastaddr += shdr[symstrindex].sh_size; - lastaddr = roundup(lastaddr, + lastaddr = roundup(lastaddr, sizeof(shdr[symstrindex].sh_size)); *(Elf_Addr *)curaddr = MAGIC_TRAMP_NUMBER; *((Elf_Addr *)curaddr + 1) = ssym - curaddr + KERNVIRTADDR; @@ -572,10 +572,10 @@ setup_pagetables(unsigned int pt_addr, vm_paddr_t physstart, vm_paddr_t physend, "sub pc, pc, #4\n" : "=r" (tmp) : "r" (pd), "r" (domain)); - /* + /* * XXX: This is the most stupid workaround I've ever wrote. * For some reason, the KB9202 won't boot the kernel unless - * we access an address which is not in the + * we access an address which is not in the * 0x20000000 - 0x20ffffff range. I hope I'll understand * what's going on later. */ @@ -596,7 +596,7 @@ __start(void) curaddr = (void*)((unsigned int)curaddr & 0xfff00000); #ifdef KZIP if (*kernel == 0x1f && kernel[1] == 0x8b) { - pt_addr = (((int)&_end + KERNSIZE + 0x100) & + pt_addr = (((int)&_end + KERNSIZE + 0x100) & ~(L1_TABLE_SIZE - 1)) + L1_TABLE_SIZE; #ifdef CPU_ARM9 @@ -609,7 +609,7 @@ __start(void) /* Gzipped kernel */ dst = inflate_kernel(kernel, &_end); kernel = (char *)&_end; - altdst = 4 + load_kernel((unsigned int)kernel, + altdst = 4 + load_kernel((unsigned int)kernel, (unsigned int)curaddr, (unsigned int)&func_end + 800 , 0); if (altdst > dst) @@ -627,8 +627,8 @@ __start(void) :"=r" (pt_addr)); } else #endif - dst = 4 + load_kernel((unsigned int)&kernel_start, - (unsigned int)curaddr, + dst = 4 + load_kernel((unsigned int)&kernel_start, + (unsigned int)curaddr, (unsigned int)&func_end, 0); dst = (void *)(((vm_offset_t)dst & ~3)); pt_addr = ((unsigned int)dst &~(L1_TABLE_SIZE - 1)) + L1_TABLE_SIZE; @@ -637,8 +637,8 @@ __start(void) sp = pt_addr + L1_TABLE_SIZE + 8192; sp = sp &~3; dst = (void *)(sp + 4); - memcpy((void *)dst, (void *)&load_kernel, (unsigned int)&func_end - + memcpy((void *)dst, (void *)&load_kernel, (unsigned int)&func_end - (unsigned int)&load_kernel + 800); - do_call(dst, kernel, dst + (unsigned int)(&func_end) - + do_call(dst, kernel, dst + (unsigned int)(&func_end) - (unsigned int)(&load_kernel) + 800, sp); } diff --git a/sys/arm/arm/exception.S b/sys/arm/arm/exception.S index 30df0b19e552..23c6dca84bc1 100644 --- a/sys/arm/arm/exception.S +++ b/sys/arm/arm/exception.S @@ -189,7 +189,7 @@ Laddress_exception_msg: * This function uses PULLFRAMEFROMSVCANDEXIT and * DO_AST * only be called if the exception handler used PUSHFRAMEINSVC - * + * */ exception_exit: diff --git a/sys/arm/arm/gdb_machdep.c b/sys/arm/arm/gdb_machdep.c index 2cccb7e60341..11b9c0d2d912 100644 --- a/sys/arm/arm/gdb_machdep.c +++ b/sys/arm/arm/gdb_machdep.c @@ -74,8 +74,8 @@ gdb_cpu_getreg(int regnum, size_t *regsz) case 12: return (&kdb_thrctx->un_32.pcb32_r12); case 13: stacktest = kdb_thrctx->un_32.pcb32_sp + 5 * 4; return (&stacktest); - case 15: - /* + case 15: + /* * On context switch, the PC is not put in the PCB, but * we can retrieve it from the stack. */ diff --git a/sys/arm/arm/in_cksum.c b/sys/arm/arm/in_cksum.c index 4222bc893c3b..3bce65018397 100644 --- a/sys/arm/arm/in_cksum.c +++ b/sys/arm/arm/in_cksum.c @@ -149,4 +149,4 @@ u_int in_cksum_hdr(const struct ip *ip) union l_util l_util; REDUCE16; return (~sum & 0xffff); -} +} diff --git a/sys/arm/arm/intr.c b/sys/arm/arm/intr.c index 181606489679..e79e44c70aa7 100644 --- a/sys/arm/arm/intr.c +++ b/sys/arm/arm/intr.c @@ -40,7 +40,7 @@ __FBSDID("$FreeBSD$"); #include <sys/param.h> #include <sys/systm.h> -#include <sys/syslog.h> +#include <sys/syslog.h> #include <sys/malloc.h> #include <sys/proc.h> #include <sys/bus.h> @@ -62,7 +62,7 @@ void arm_handler_execute(struct trapframe *, int); void (*arm_post_filter)(void *) = NULL; void -arm_setup_irqhandler(const char *name, driver_filter_t *filt, +arm_setup_irqhandler(const char *name, driver_filter_t *filt, void (*hand)(void*), void *arg, int irq, int flags, void **cookiep) { struct intr_event *event; @@ -78,7 +78,7 @@ arm_setup_irqhandler(const char *name, driver_filter_t *filt, if (error) return; intr_events[irq] = event; - last_printed += + last_printed += snprintf(intrnames + last_printed, MAXCOMLEN + 1, "irq%d: %s", irq, name); diff --git a/sys/arm/arm/irq_dispatch.S b/sys/arm/arm/irq_dispatch.S index 3995731d84c9..6e510dd33d74 100644 --- a/sys/arm/arm/irq_dispatch.S +++ b/sys/arm/arm/irq_dispatch.S @@ -103,7 +103,7 @@ ASENTRY_NP(irq_entry) .global _C_LABEL(intrnames), _C_LABEL(sintrnames) .global _C_LABEL(intrcnt), _C_LABEL(sintrcnt) -_C_LABEL(intrnames): +_C_LABEL(intrnames): .space NIRQ * (MAXCOMLEN + 1) _C_LABEL(intrcnt): .space NIRQ * 4 diff --git a/sys/arm/arm/machdep.c b/sys/arm/arm/machdep.c index 0cceab0f4cae..7f5cdfecec21 100644 --- a/sys/arm/arm/machdep.c +++ b/sys/arm/arm/machdep.c @@ -141,14 +141,14 @@ sendsig(catcher, ksi, mask) /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) && SIGISMEMBER(psp->ps_sigonstack, sig)) { - fp = (struct sigframe *)(td->td_sigstk.ss_sp + + fp = (struct sigframe *)(td->td_sigstk.ss_sp + td->td_sigstk.ss_size); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else fp = (struct sigframe *)td->td_frame->tf_usr_sp; - + /* make room on the stack */ fp--; @@ -158,7 +158,7 @@ sendsig(catcher, ksi, mask) get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); frame.sf_si = ksi->ksi_info; frame.sf_uc.uc_sigmask = *mask; - frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK ) + frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK ) ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE; frame.sf_uc.uc_stack = td->td_sigstk; mtx_unlock(&psp->ps_mtx); @@ -451,7 +451,7 @@ ptrace_single_step(struct thread *td) ("Didn't clear single step")); p = td->td_proc; PROC_UNLOCK(p); - error = ptrace_read_int(td, td->td_frame->tf_pc + 4, + error = ptrace_read_int(td, td->td_frame->tf_pc + 4, &td->td_md.md_ptrace_instr); if (error) goto out; @@ -726,4 +726,3 @@ init_proc0(vm_offset_t kstack) thread0.td_frame = &proc0_tf; pcpup->pc_curpcb = thread0.td_pcb; } - diff --git a/sys/arm/arm/mem.c b/sys/arm/arm/mem.c index 2a710d496290..e2e8588903f4 100644 --- a/sys/arm/arm/mem.c +++ b/sys/arm/arm/mem.c @@ -101,7 +101,7 @@ memrw(struct cdev *dev, struct uio *uio, int flags) v &= ~PAGE_MASK; for (i = 0; dump_avail[i] || dump_avail[i + 1]; i += 2) { - if (v >= dump_avail[i] && + if (v >= dump_avail[i] && v < dump_avail[i + 1]) { address_valid = 1; break; @@ -129,11 +129,11 @@ memrw(struct cdev *dev, struct uio *uio, int flags) addr = trunc_page(uio->uio_offset); eaddr = round_page(uio->uio_offset + c); - for (; addr < eaddr; addr += PAGE_SIZE) + for (; addr < eaddr; addr += PAGE_SIZE) if (pmap_extract(kernel_pmap, addr) == 0) return (EFAULT); if (!kernacc((caddr_t)(int)uio->uio_offset, c, - uio->uio_rw == UIO_READ ? + uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE)) #ifdef ARM_USE_SMALL_ALLOC if (addr <= VM_MAXUSER_ADDRESS || diff --git a/sys/arm/arm/nexus.c b/sys/arm/arm/nexus.c index 422889e23acb..04cf1bfa8f6f 100644 --- a/sys/arm/arm/nexus.c +++ b/sys/arm/arm/nexus.c @@ -121,7 +121,7 @@ nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, if ((rman_get_flags(res) & RF_SHAREABLE) == 0) flags |= INTR_EXCL; - arm_setup_irqhandler(device_get_nameunit(child), + arm_setup_irqhandler(device_get_nameunit(child), filt, intr, arg, rman_get_start(res), flags, cookiep); return (0); } diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c index 5e49a440a06b..6a7ebfd0feba 100644 --- a/sys/arm/arm/pmap.c +++ b/sys/arm/arm/pmap.c @@ -177,7 +177,7 @@ __FBSDID("$FreeBSD$"); #define dprintf printf int pmap_debug_level = 0; -#define PMAP_INLINE +#define PMAP_INLINE #else /* PMAP_DEBUG */ #define PDEBUG(_lev_,_stat_) /* Nothing */ #define dprintf(x, arg...) @@ -852,7 +852,7 @@ pmap_get_l2_bucket(pmap_t pm, vm_offset_t va) * bucket/page table in place. * * Note that if a new L2 bucket/page was allocated, the caller *must* - * increment the bucket occupancy counter appropriately *before* + * increment the bucket occupancy counter appropriately *before* * releasing the pmap's lock to ensure no other thread or cpu deallocates * the bucket/page in the meantime. */ @@ -903,7 +903,7 @@ again_l2table: */ pm->pm_l2[L2_IDX(l1idx)] = l2; } - } + } l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; @@ -1100,7 +1100,7 @@ pmap_l2ptp_ctor(void *mem, int size, void *arg, int flags) if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { /* - * Page tables must have the cache-mode set to + * Page tables must have the cache-mode set to * Write-Thru. */ *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; @@ -1415,7 +1415,7 @@ pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va) * We use `maskbits' rather than `clearbits' because we're always passing * constants and the latter would require an extra inversion at run-time. */ -static int +static int pmap_clearbit(struct vm_page *pg, u_int maskbits) { struct l2_bucket *l2b; @@ -1473,10 +1473,10 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits) if (maskbits & (PVF_WRITE|PVF_MOD)) { if ((pv->pv_flags & PVF_NC)) { - /* + /* * Entry is not cacheable: * - * Don't turn caching on again if this is a + * Don't turn caching on again if this is a * modified emulation. This would be * inconsitent with the settings created by * pmap_fix_cache(). Otherwise, it's safe @@ -1493,7 +1493,7 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits) } else if (opte & L2_S_PROT_W) { vm_page_dirty(pg); - /* + /* * Entry is writable/cacheable: check if pmap * is current if it is flush it, otherwise it * won't be in the cache @@ -1760,7 +1760,7 @@ pmap_remove_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va) * => caller should hold lock on vm_page [so that attrs can be adjusted] * => caller should NOT adjust pmap's wire_count * => we return the old flags - * + * * Modify a physical-virtual mapping in the pv table */ static u_int @@ -1845,7 +1845,7 @@ pmap_init(void) /* * init the pv free list */ - pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, + pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); /* * Now it is safe to enable pv_table recording. @@ -1951,7 +1951,7 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user) vm_page_dirty(pg); pv->pv_flags |= PVF_REF | PVF_MOD; - /* + /* * Re-enable write permissions for the page. No need to call * pmap_fix_cache(), since this is just a * modified-emulation fault, and the PVF_WRITE bit isn't @@ -2249,7 +2249,7 @@ pmap_set_pt_cache_mode(pd_entry_t *kl1, vm_offset_t va) } static void -pmap_alloc_specials(vm_offset_t *availp, int pages, vm_offset_t *vap, +pmap_alloc_specials(vm_offset_t *availp, int pages, vm_offset_t *vap, pt_entry_t **ptep) { vm_offset_t va = *availp; @@ -2338,7 +2338,7 @@ pmap_bootstrap(vm_offset_t firstaddr, vm_offset_t lastaddr, struct pv_addr *l1pt if ((l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]) == NULL) { if (l2next == PMAP_STATIC_L2_SIZE) panic("pmap_bootstrap: out of static L2s"); - kernel_pmap->pm_l2[L2_IDX(l1idx)] = l2 = + kernel_pmap->pm_l2[L2_IDX(l1idx)] = l2 = &static_l2[l2next++]; } @@ -2407,7 +2407,7 @@ pmap_bootstrap(vm_offset_t firstaddr, vm_offset_t lastaddr, struct pv_addr *l1pt */ #define SYSMAP(c, p, v, n) \ v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); - + pmap_alloc_specials(&virtual_avail, 1, &csrcp, &csrc_pte); pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)csrc_pte); pmap_alloc_specials(&virtual_avail, 1, &cdstp, &cdst_pte); @@ -2561,7 +2561,7 @@ pmap_grow_l2_bucket(pmap_t pm, vm_offset_t va) l2 = (struct l2_dtable *)nva; nva += sizeof(struct l2_dtable); - if ((nva & PAGE_MASK) < (pmap_kernel_l2dtable_kva & + if ((nva & PAGE_MASK) < (pmap_kernel_l2dtable_kva & PAGE_MASK)) { /* * The new l2_dtable straddles a page boundary. @@ -2809,7 +2809,7 @@ pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags) if (opte == 0) l2b->l2b_occupancy++; } - *pte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, + *pte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); if (flags & KENTER_CACHE) *pte |= pte_l2_s_cache_mode; @@ -2822,7 +2822,7 @@ pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags) * * The pvzone is used to delay the recording of kernel * mappings until the VM is running. - * + * * This expects the physical memory to have vm_page_array entry. */ if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa))) { @@ -2906,7 +2906,7 @@ pmap_kremove(vm_offset_t va) PMAP_LOCK(pmap_kernel()); if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa)) && (pve = pmap_remove_pv(m, pmap_kernel(), va))) - pmap_free_pv_entry(pve); + pmap_free_pv_entry(pve); PMAP_UNLOCK(pmap_kernel()); vm_page_unlock_queues(); va = va & ~PAGE_MASK; @@ -2943,7 +2943,7 @@ pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) PDEBUG(1, printf("pmap_map: virt = %08x, start = %08x, end = %08x, " "prot = %d\n", (uint32_t) *virt, (uint32_t) start, (uint32_t) end, prot)); - + while (start < end) { pmap_kenter(va, start); va += PAGE_SIZE; @@ -2987,7 +2987,7 @@ pmap_qenter(vm_offset_t va, vm_page_t *m, int count) for (i = 0; i < count; i++) { pmap_wb_page(m[i]); - pmap_kenter_internal(va, VM_PAGE_TO_PHYS(m[i]), + pmap_kenter_internal(va, VM_PAGE_TO_PHYS(m[i]), KENTER_CACHE); va += PAGE_SIZE; } @@ -3343,7 +3343,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, nflags |= PVF_WIRED; PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = %x, " "wired = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, wired)); - + if (pmap == pmap_kernel()) { l2b = pmap_get_l2_bucket(pmap, va); if (l2b == NULL) @@ -3365,7 +3365,7 @@ do_l2b_alloc: } ptep = &l2b->l2b_kva[l2pte_index(va)]; - + opte = *ptep; npte = pa; oflags = 0; @@ -3528,8 +3528,7 @@ do_l2b_alloc: if (opte == 0) { l2b->l2b_occupancy++; pmap->pm_stats.resident_count++; - } - + } /* * If this is just a wiring change, the two PTEs will be @@ -3545,7 +3544,7 @@ do_l2b_alloc: * is current */ PTE_SYNC(ptep); - if (L1_IDX(va) != L1_IDX(vector_page) && + if (L1_IDX(va) != L1_IDX(vector_page) && l2pte_valid(npte)) { /* * This mapping is likely to be accessed as @@ -3650,7 +3649,7 @@ pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) ptep = &l2b->l2b_kva[l2pte_index(va)]; pte = *ptep; pg = PHYS_TO_VM_PAGE(l2pte_pa(pte)); - if (pg) + if (pg) pmap_modify_pv(pg, pmap, va, PVF_WIRED, wired ? PVF_WIRED : 0); vm_page_unlock_queues(); PMAP_UNLOCK(pmap); @@ -3695,7 +3694,7 @@ pmap_extract(pmap_t pm, vm_offset_t va) */ KASSERT(pm == pmap_kernel(), ("huh")); /* XXX: what to do about the bits > 32 ? */ - if (l1pd & L1_S_SUPERSEC) + if (l1pd & L1_S_SUPERSEC) pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); else pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); @@ -3763,7 +3762,7 @@ retry: */ KASSERT(pmap == pmap_kernel(), ("huh")); /* XXX: what to do about the bits > 32 ? */ - if (l1pd & L1_S_SUPERSEC) + if (l1pd & L1_S_SUPERSEC) pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); else pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); @@ -3840,7 +3839,7 @@ pmap_pinit(pmap_t pmap) pmap_enter(pmap, vector_page, VM_PROT_READ, PHYS_TO_VM_PAGE(systempage.pv_pa), VM_PROT_READ, 1); - } + } return (1); } @@ -4000,7 +3999,7 @@ pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) /* * pmap_zero_page() - * + * * Zero a given physical page by mapping it at a page hook point. * In doing the zero page op, the page we zero is mapped cachable, as with * StrongARM accesses to non-cached pages are non-burst making writing @@ -4137,7 +4136,7 @@ pmap_use_minicache(vm_offset_t va, vm_size_t size) #endif /* ARM_MMU_XSCALE == 1 */ /* - * pmap_zero_page zeros the specified hardware page by mapping + * pmap_zero_page zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. */ void @@ -4148,7 +4147,7 @@ pmap_zero_page(vm_page_t m) /* - * pmap_zero_page_area zeros the specified hardware page by mapping + * pmap_zero_page_area zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. * * off and size may not cover an area beyond a single hardware page. @@ -4162,7 +4161,7 @@ pmap_zero_page_area(vm_page_t m, int off, int size) /* - * pmap_zero_page_idle zeros the specified hardware page by mapping + * pmap_zero_page_idle zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. This * is intended to be called from the vm_pagezero process only and * outside of Giant. @@ -4225,7 +4224,7 @@ pmap_clean_page(struct pv_entry *pv, boolean_t is_src) if (npv->pv_pmap == pmap_kernel() || npv->pv_pmap == pm) { flags |= npv->pv_flags; /* - * The page is mapped non-cacheable in + * The page is mapped non-cacheable in * this map. No need to flush the cache. */ if (npv->pv_flags & PVF_NC) { @@ -4377,7 +4376,7 @@ pmap_copy_page(vm_page_t src, vm_page_t dst) cpu_dcache_wbinv_all(); cpu_l2cache_wbinv_all(); if (_arm_memcpy && PAGE_SIZE >= _min_memcpy_size && - _arm_memcpy((void *)VM_PAGE_TO_PHYS(dst), + _arm_memcpy((void *)VM_PAGE_TO_PHYS(dst), (void *)VM_PAGE_TO_PHYS(src), PAGE_SIZE, IS_PHYSICAL) == 0) return; #ifdef ARM_USE_SMALL_ALLOC @@ -4524,7 +4523,7 @@ pmap_clear_reference(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_reference: page %p is not managed", m)); - if (m->md.pvh_attrs & PVF_REF) + if (m->md.pvh_attrs & PVF_REF) pmap_clearbit(m, PVF_REF); } @@ -4710,7 +4709,7 @@ pmap_link_l2pt(vm_offset_t l1pt, vm_offset_t va, struct pv_addr *l2pv) proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO; -#ifdef VERBOSE_INIT_ARM +#ifdef VERBOSE_INIT_ARM printf("pmap_link_l2pt: pa=0x%x va=0x%x\n", l2pv->pv_pa, l2pv->pv_va); #endif @@ -4779,7 +4778,7 @@ pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, { pd_entry_t *pde = (pd_entry_t *) l1pt; pt_entry_t *pte, f1, f2s, f2l; - vm_size_t resid; + vm_size_t resid; int i; resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); @@ -4787,7 +4786,7 @@ pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, if (l1pt == 0) panic("pmap_map_chunk: no L1 table provided"); -#ifdef VERBOSE_INIT_ARM +#ifdef VERBOSE_INIT_ARM printf("pmap_map_chunk: pa=0x%x va=0x%x size=0x%x resid=0x%x " "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache); #endif diff --git a/sys/arm/arm/support.S b/sys/arm/arm/support.S index 2c88f229946f..d4c6fb45748f 100644 --- a/sys/arm/arm/support.S +++ b/sys/arm/arm/support.S @@ -389,9 +389,9 @@ ENTRY(bcmp) ENTRY(bcopy) /* switch the source and destination registers */ - eor r0, r1, r0 - eor r1, r0, r1 - eor r0, r1, r0 + eor r0, r1, r0 + eor r1, r0, r1 + eor r0, r1, r0 ENTRY(memmove) /* Do the buffers overlap? */ cmp r0, r1 @@ -420,7 +420,7 @@ ENTRY(memmove) /* We have aligned source and destination */ subs r2, r2, #8 blt .Lmemmove_fl12 /* less than 12 bytes (4 from above) */ - subs r2, r2, #0x14 + subs r2, r2, #0x14 blt .Lmemmove_fl32 /* less than 32 bytes (12 from above) */ stmdb sp!, {r4} /* borrow r4 */ @@ -431,23 +431,23 @@ ENTRY(memmove) stmia r0!, {r3, r4, r12, lr} ldmia r1!, {r3, r4, r12, lr} stmia r0!, {r3, r4, r12, lr} - subs r2, r2, #0x20 + subs r2, r2, #0x20 bge .Lmemmove_floop32 cmn r2, #0x10 ldmgeia r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */ stmgeia r0!, {r3, r4, r12, lr} - subge r2, r2, #0x10 + subge r2, r2, #0x10 ldmia sp!, {r4} /* return r4 */ .Lmemmove_fl32: - adds r2, r2, #0x14 + adds r2, r2, #0x14 /* blat 12 bytes at a time */ .Lmemmove_floop12: ldmgeia r1!, {r3, r12, lr} stmgeia r0!, {r3, r12, lr} - subges r2, r2, #0x0c + subges r2, r2, #0x0c bge .Lmemmove_floop12 .Lmemmove_fl12: @@ -502,9 +502,9 @@ ENTRY(memmove) cmp r12, #2 bgt .Lmemmove_fsrcul3 beq .Lmemmove_fsrcul2 - cmp r2, #0x0c + cmp r2, #0x0c blt .Lmemmove_fsrcul1loop4 - sub r2, r2, #0x0c + sub r2, r2, #0x0c stmdb sp!, {r4, r5} .Lmemmove_fsrcul1loop16: @@ -532,10 +532,10 @@ ENTRY(memmove) orr r12, r12, lr, lsl #24 #endif stmia r0!, {r3-r5, r12} - subs r2, r2, #0x10 + subs r2, r2, #0x10 bge .Lmemmove_fsrcul1loop16 ldmia sp!, {r4, r5} - adds r2, r2, #0x0c + adds r2, r2, #0x0c blt .Lmemmove_fsrcul1l4 .Lmemmove_fsrcul1loop4: @@ -559,9 +559,9 @@ ENTRY(memmove) b .Lmemmove_fl4 .Lmemmove_fsrcul2: - cmp r2, #0x0c + cmp r2, #0x0c blt .Lmemmove_fsrcul2loop4 - sub r2, r2, #0x0c + sub r2, r2, #0x0c stmdb sp!, {r4, r5} .Lmemmove_fsrcul2loop16: @@ -589,10 +589,10 @@ ENTRY(memmove) orr r12, r12, lr, lsl #16 #endif stmia r0!, {r3-r5, r12} - subs r2, r2, #0x10 + subs r2, r2, #0x10 bge .Lmemmove_fsrcul2loop16 ldmia sp!, {r4, r5} - adds r2, r2, #0x0c + adds r2, r2, #0x0c blt .Lmemmove_fsrcul2l4 .Lmemmove_fsrcul2loop4: @@ -616,9 +616,9 @@ ENTRY(memmove) b .Lmemmove_fl4 .Lmemmove_fsrcul3: - cmp r2, #0x0c + cmp r2, #0x0c blt .Lmemmove_fsrcul3loop4 - sub r2, r2, #0x0c + sub r2, r2, #0x0c stmdb sp!, {r4, r5} .Lmemmove_fsrcul3loop16: @@ -646,10 +646,10 @@ ENTRY(memmove) orr r12, r12, lr, lsl #8 #endif stmia r0!, {r3-r5, r12} - subs r2, r2, #0x10 + subs r2, r2, #0x10 bge .Lmemmove_fsrcul3loop16 ldmia sp!, {r4, r5} - adds r2, r2, #0x0c + adds r2, r2, #0x0c blt .Lmemmove_fsrcul3l4 .Lmemmove_fsrcul3loop4: @@ -697,18 +697,18 @@ ENTRY(memmove) stmdb r0!, {r3, r4, r12, lr} ldmdb r1!, {r3, r4, r12, lr} stmdb r0!, {r3, r4, r12, lr} - subs r2, r2, #0x20 + subs r2, r2, #0x20 bge .Lmemmove_bloop32 .Lmemmove_bl32: - cmn r2, #0x10 + cmn r2, #0x10 ldmgedb r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */ stmgedb r0!, {r3, r4, r12, lr} - subge r2, r2, #0x10 - adds r2, r2, #0x14 + subge r2, r2, #0x10 + adds r2, r2, #0x14 ldmgedb r1!, {r3, r12, lr} /* blat a remaining 12 bytes */ stmgedb r0!, {r3, r12, lr} - subge r2, r2, #0x0c + subge r2, r2, #0x0c ldmia sp!, {r4, lr} .Lmemmove_bl12: @@ -760,9 +760,9 @@ ENTRY(memmove) cmp r12, #2 blt .Lmemmove_bsrcul1 beq .Lmemmove_bsrcul2 - cmp r2, #0x0c + cmp r2, #0x0c blt .Lmemmove_bsrcul3loop4 - sub r2, r2, #0x0c + sub r2, r2, #0x0c stmdb sp!, {r4, r5, lr} .Lmemmove_bsrcul3loop16: @@ -790,10 +790,10 @@ ENTRY(memmove) orr r4, r4, r3, lsr #24 #endif stmdb r0!, {r4, r5, r12, lr} - subs r2, r2, #0x10 + subs r2, r2, #0x10 bge .Lmemmove_bsrcul3loop16 ldmia sp!, {r4, r5, lr} - adds r2, r2, #0x0c + adds r2, r2, #0x0c blt .Lmemmove_bsrcul3l4 .Lmemmove_bsrcul3loop4: @@ -817,9 +817,9 @@ ENTRY(memmove) b .Lmemmove_bl4 .Lmemmove_bsrcul2: - cmp r2, #0x0c + cmp r2, #0x0c blt .Lmemmove_bsrcul2loop4 - sub r2, r2, #0x0c + sub r2, r2, #0x0c stmdb sp!, {r4, r5, lr} .Lmemmove_bsrcul2loop16: @@ -847,10 +847,10 @@ ENTRY(memmove) orr r4, r4, r3, lsr #16 #endif stmdb r0!, {r4, r5, r12, lr} - subs r2, r2, #0x10 + subs r2, r2, #0x10 bge .Lmemmove_bsrcul2loop16 ldmia sp!, {r4, r5, lr} - adds r2, r2, #0x0c + adds r2, r2, #0x0c blt .Lmemmove_bsrcul2l4 .Lmemmove_bsrcul2loop4: @@ -874,9 +874,9 @@ ENTRY(memmove) b .Lmemmove_bl4 .Lmemmove_bsrcul1: - cmp r2, #0x0c + cmp r2, #0x0c blt .Lmemmove_bsrcul1loop4 - sub r2, r2, #0x0c + sub r2, r2, #0x0c stmdb sp!, {r4, r5, lr} .Lmemmove_bsrcul1loop32: @@ -904,10 +904,10 @@ ENTRY(memmove) orr r4, r4, r3, lsr #8 #endif stmdb r0!, {r4, r5, r12, lr} - subs r2, r2, #0x10 + subs r2, r2, #0x10 bge .Lmemmove_bsrcul1loop32 ldmia sp!, {r4, r5, lr} - adds r2, r2, #0x0c + adds r2, r2, #0x0c blt .Lmemmove_bsrcul1l4 .Lmemmove_bsrcul1loop4: @@ -976,7 +976,7 @@ ENTRY(memcpy) /* We have aligned source and destination */ subs r2, r2, #8 blt .Lmemcpy_l12 /* less than 12 bytes (4 from above) */ - subs r2, r2, #0x14 + subs r2, r2, #0x14 blt .Lmemcpy_l32 /* less than 32 bytes (12 from above) */ stmdb sp!, {r4} /* borrow r4 */ @@ -987,23 +987,23 @@ ENTRY(memcpy) stmia r0!, {r3, r4, r12, lr} ldmia r1!, {r3, r4, r12, lr} stmia r0!, {r3, r4, r12, lr} - subs r2, r2, #0x20 + subs r2, r2, #0x20 bge .Lmemcpy_loop32 cmn r2, #0x10 ldmgeia r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */ stmgeia r0!, {r3, r4, r12, lr} - subge r2, r2, #0x10 + subge r2, r2, #0x10 ldmia sp!, {r4} /* return r4 */ .Lmemcpy_l32: - adds r2, r2, #0x14 + adds r2, r2, #0x14 /* blat 12 bytes at a time */ .Lmemcpy_loop12: ldmgeia r1!, {r3, r12, lr} stmgeia r0!, {r3, r12, lr} - subges r2, r2, #0x0c + subges r2, r2, #0x0c bge .Lmemcpy_loop12 .Lmemcpy_l12: @@ -1061,9 +1061,9 @@ ENTRY(memcpy) cmp r12, #2 bgt .Lmemcpy_srcul3 beq .Lmemcpy_srcul2 - cmp r2, #0x0c + cmp r2, #0x0c blt .Lmemcpy_srcul1loop4 - sub r2, r2, #0x0c + sub r2, r2, #0x0c stmdb sp!, {r4, r5} .Lmemcpy_srcul1loop16: @@ -1077,10 +1077,10 @@ ENTRY(memcpy) mov r12, r12, lsr #8 orr r12, r12, lr, lsl #24 stmia r0!, {r3-r5, r12} - subs r2, r2, #0x10 + subs r2, r2, #0x10 bge .Lmemcpy_srcul1loop16 ldmia sp!, {r4, r5} - adds r2, r2, #0x0c + adds r2, r2, #0x0c blt .Lmemcpy_srcul1l4 .Lmemcpy_srcul1loop4: @@ -1096,9 +1096,9 @@ ENTRY(memcpy) b .Lmemcpy_l4 .Lmemcpy_srcul2: - cmp r2, #0x0c + cmp r2, #0x0c blt .Lmemcpy_srcul2loop4 - sub r2, r2, #0x0c + sub r2, r2, #0x0c stmdb sp!, {r4, r5} .Lmemcpy_srcul2loop16: @@ -1112,10 +1112,10 @@ ENTRY(memcpy) mov r12, r12, lsr #16 orr r12, r12, lr, lsl #16 stmia r0!, {r3-r5, r12} - subs r2, r2, #0x10 + subs r2, r2, #0x10 bge .Lmemcpy_srcul2loop16 ldmia sp!, {r4, r5} - adds r2, r2, #0x0c + adds r2, r2, #0x0c blt .Lmemcpy_srcul2l4 .Lmemcpy_srcul2loop4: @@ -1131,9 +1131,9 @@ ENTRY(memcpy) b .Lmemcpy_l4 .Lmemcpy_srcul3: - cmp r2, #0x0c + cmp r2, #0x0c blt .Lmemcpy_srcul3loop4 - sub r2, r2, #0x0c + sub r2, r2, #0x0c stmdb sp!, {r4, r5} .Lmemcpy_srcul3loop16: @@ -1147,10 +1147,10 @@ ENTRY(memcpy) mov r12, r12, lsr #24 orr r12, r12, lr, lsl #8 stmia r0!, {r3-r5, r12} - subs r2, r2, #0x10 + subs r2, r2, #0x10 bge .Lmemcpy_srcul3loop16 ldmia sp!, {r4, r5} - adds r2, r2, #0x0c + adds r2, r2, #0x0c blt .Lmemcpy_srcul3l4 .Lmemcpy_srcul3loop4: @@ -1404,10 +1404,10 @@ ENTRY(memcpy) str r6, [r3], #0x04 str r7, [r3], #0x04 .Lmemcpy_bad1: - subs r2, r2, #0x10 + subs r2, r2, #0x10 bge .Lmemcpy_bad1_loop16 - adds r2, r2, #0x10 + adds r2, r2, #0x10 ldmeqfd sp!, {r4-r7} RETeq /* Return now if done */ subs r2, r2, #0x04 @@ -1465,10 +1465,10 @@ ENTRY(memcpy) str r6, [r3], #0x04 str r7, [r3], #0x04 .Lmemcpy_bad2: - subs r2, r2, #0x10 + subs r2, r2, #0x10 bge .Lmemcpy_bad2_loop16 - adds r2, r2, #0x10 + adds r2, r2, #0x10 ldmeqfd sp!, {r4-r7} RETeq /* Return now if done */ subs r2, r2, #0x04 @@ -1526,10 +1526,10 @@ ENTRY(memcpy) str r6, [r3], #0x04 str r7, [r3], #0x04 .Lmemcpy_bad3: - subs r2, r2, #0x10 + subs r2, r2, #0x10 bge .Lmemcpy_bad3_loop16 - adds r2, r2, #0x10 + adds r2, r2, #0x10 ldmeqfd sp!, {r4-r7} RETeq /* Return now if done */ subs r2, r2, #0x04 diff --git a/sys/arm/arm/swtch.S b/sys/arm/arm/swtch.S index 762fc28a0147..a293c5cc96fa 100644 --- a/sys/arm/arm/swtch.S +++ b/sys/arm/arm/swtch.S @@ -262,7 +262,7 @@ ENTRY(cpu_switch) strd r12, [r2, #(PCB_R12)] #endif str pc, [r2, #(PCB_PC)] - + /* * NOTE: We can now use r8-r13 until it is time to restore * them for the new process. @@ -291,7 +291,7 @@ ENTRY(cpu_switch) mrs r3, cpsr /* - * We can do that, since + * We can do that, since * PSR_SVC32_MODE|PSR_UND32_MODE == MSR_UND32_MODE */ orr r8, r3, #(PSR_UND32_MODE) @@ -399,7 +399,7 @@ ENTRY(cpu_switch) movne lr, pc ldrne pc, [r10, #CF_TLB_FLUSHID_SE] /* - * We can do that, since + * We can do that, since * PSR_SVC32_MODE|PSR_UND32_MODE == MSR_UND32_MODE */ @@ -422,7 +422,7 @@ ENTRY(cpu_switch) mrs r3, cpsr /* - * We can do that, since + * We can do that, since * PSR_SVC32_MODE|PSR_UND32_MODE == MSR_UND32_MODE */ orr r2, r3, #(PSR_UND32_MODE) @@ -451,7 +451,7 @@ ENTRY(cpu_switch) #ifdef ARMFPE add r0, r7, #(USER_SIZE) & 0x00ff - add r0, r0, #(USER_SIZE) & 0xff00 + add r0, r0, #(USER_SIZE) & 0xff00 bl _C_LABEL(arm_fpe_core_changecontext) #endif diff --git a/sys/arm/arm/sys_machdep.c b/sys/arm/arm/sys_machdep.c index 5fe7b5c03d74..f673dc6135ea 100644 --- a/sys/arm/arm/sys_machdep.c +++ b/sys/arm/arm/sys_machdep.c @@ -132,10 +132,10 @@ sysarch(td, uap) #endif switch (uap->op) { - case ARM_SYNC_ICACHE : + case ARM_SYNC_ICACHE: error = arm32_sync_icache(td, uap->parms); break; - case ARM_DRAIN_WRITEBUF : + case ARM_DRAIN_WRITEBUF: error = arm32_drain_writebuf(td, uap->parms); break; case ARM_SET_TP: diff --git a/sys/arm/arm/trap.c b/sys/arm/arm/trap.c index e56f0ffded86..0d915dae0018 100644 --- a/sys/arm/arm/trap.c +++ b/sys/arm/arm/trap.c @@ -128,7 +128,7 @@ void undefinedinstruction(trapframe_t *); #include <machine/disassem.h> #include <machine/machdep.h> - + extern char fusubailout[]; #ifdef DEBUG @@ -388,7 +388,7 @@ data_abort_handler(trapframe_t *tf) * responsible to determine if it was a write. */ if (IS_PERMISSION_FAULT(fsr)) { - ftype = VM_PROT_WRITE; + ftype = VM_PROT_WRITE; } else { u_int insn = ReadWord(tf->tf_pc); @@ -396,13 +396,13 @@ data_abort_handler(trapframe_t *tf) ((insn & 0x0e1000b0) == 0x000000b0) || /* STRH/STRD */ ((insn & 0x0a100000) == 0x08000000)) /* STM/CDT */ { - ftype = VM_PROT_WRITE; + ftype = VM_PROT_WRITE; } else if ((insn & 0x0fb00ff0) == 0x01000090) /* SWP */ - ftype = VM_PROT_READ | VM_PROT_WRITE; + ftype = VM_PROT_READ | VM_PROT_WRITE; else - ftype = VM_PROT_READ; + ftype = VM_PROT_READ; } /* @@ -734,9 +734,7 @@ prefetch_abort_handler(trapframe_t *tf) if (__predict_true(tf->tf_spsr & F32_bit) == 0) enable_interrupts(F32_bit); } - - /* See if the cpu state needs to be fixed up */ switch (prefetch_abort_fixup(tf, &ksig)) { case ABORT_FIXUP_RETURN: @@ -947,15 +945,15 @@ swi_handler(trapframe_t *frame) /* * Enable interrupts if they were enabled before the exception. * Since all syscalls *should* come from user mode it will always - * be safe to enable them, but check anyway. - */ + * be safe to enable them, but check anyway. + */ if (td->td_md.md_spinlock_count == 0) { if (__predict_true(frame->tf_spsr & I32_bit) == 0) enable_interrupts(I32_bit); if (__predict_true(frame->tf_spsr & F32_bit) == 0) enable_interrupts(F32_bit); } - + syscall(td, frame, insn); } diff --git a/sys/arm/arm/undefined.c b/sys/arm/arm/undefined.c index fa02023c656b..fcb612d1b212 100644 --- a/sys/arm/arm/undefined.c +++ b/sys/arm/arm/undefined.c @@ -194,8 +194,8 @@ undefinedinstruction(trapframe_t *frame) fault_pc = frame->tf_pc; - /* - * Get the current thread/proc structure or thread0/proc0 if there is + /* + * Get the current thread/proc structure or thread0/proc0 if there is * none. */ td = curthread == NULL ? &thread0 : curthread; diff --git a/sys/arm/arm/vectors.S b/sys/arm/arm/vectors.S index c91b437e8b81..95ee5d190101 100644 --- a/sys/arm/arm/vectors.S +++ b/sys/arm/arm/vectors.S @@ -3,7 +3,7 @@ /*- * Copyright (C) 1994-1997 Mark Brinicombe * Copyright (C) 1994 Brini - * All rights reserved. + * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -18,10 +18,10 @@ * This product includes software developed by Brini. * 4. The name of Brini may not be used to endorse or promote products * derived from this software without specific prior written permission. - * + * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; diff --git a/sys/arm/arm/vm_machdep.c b/sys/arm/arm/vm_machdep.c index 54e561e872e2..114c68329346 100644 --- a/sys/arm/arm/vm_machdep.c +++ b/sys/arm/arm/vm_machdep.c @@ -152,11 +152,11 @@ cpu_fork(register struct thread *td1, register struct proc *p2, void cpu_thread_swapin(struct thread *td) { -} +} -void +void cpu_thread_swapout(struct thread *td) -{ +{ } /* @@ -177,7 +177,7 @@ sf_buf_free(struct sf_buf *sf) if (sf_buf_alloc_want > 0) wakeup(&sf_buf_freelist); } - mtx_unlock(&sf_buf_lock); + mtx_unlock(&sf_buf_lock); #endif } @@ -187,11 +187,11 @@ sf_buf_free(struct sf_buf *sf) */ static void sf_buf_init(void *arg) -{ +{ struct sf_buf *sf_bufs; vm_offset_t sf_base; int i; - + nsfbufs = NSFBUFS; TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs); @@ -204,7 +204,7 @@ sf_buf_init(void *arg) sf_bufs[i].kva = sf_base + i * PAGE_SIZE; TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry); } - sf_buf_alloc_want = 0; + sf_buf_alloc_want = 0; mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF); } #endif @@ -246,7 +246,7 @@ sf_buf_alloc(struct vm_page *m, int flags) /* - * If we got a signal, don't risk going back to sleep. + * If we got a signal, don't risk going back to sleep. */ if (error) goto done; @@ -319,7 +319,7 @@ cpu_set_syscall_retval(struct thread *td, int error) /* * Initialize machine state (pcb and trap frame) for a new thread about to - * upcall. Put enough state in the new thread's PCB to get it to go back + * upcall. Put enough state in the new thread's PCB to get it to go back * userret(), where we can intercept it again to set the return (upcall) * Address and stack, along with those from upcals that are from other sources * such as those generated in thread_userret() itself. @@ -387,7 +387,7 @@ cpu_thread_exit(struct thread *td) void cpu_thread_alloc(struct thread *td) { - td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages * + td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages * PAGE_SIZE) - 1; td->td_frame = (struct trapframe *) ((u_int)td->td_kstack + USPACE_SVC_STACK_TOP - sizeof(struct pcb)) - 1; @@ -395,7 +395,7 @@ cpu_thread_alloc(struct thread *td) #ifndef CPU_XSCALE_CORE3 pmap_use_minicache(td->td_kstack, td->td_kstack_pages * PAGE_SIZE); #endif -#endif +#endif } void @@ -429,8 +429,8 @@ cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg) /* * Software interrupt handler for queued VM system processing. - */ -void + */ +void swi_vm(void *dummy) { @@ -445,14 +445,14 @@ cpu_exit(struct thread *td) #define BITS_PER_INT (8 * sizeof(int)) vm_offset_t arm_nocache_startaddr; -static int arm_nocache_allocated[ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE * +static int arm_nocache_allocated[ARM_NOCACHE_KVA_SIZE / (PAGE_SIZE * BITS_PER_INT)]; /* - * Functions to map and unmap memory non-cached into KVA the kernel won't try + * Functions to map and unmap memory non-cached into KVA the kernel won't try * to allocate. The goal is to provide uncached memory to busdma, to honor - * BUS_DMA_COHERENT. - * We can allocate at most ARM_NOCACHE_KVA_SIZE bytes. + * BUS_DMA_COHERENT. + * We can allocate at most ARM_NOCACHE_KVA_SIZE bytes. * The allocator is rather dummy, each page is represented by a bit in * a bitfield, 0 meaning the page is not allocated, 1 meaning it is. * As soon as it finds enough contiguous pages to satisfy the request, @@ -465,7 +465,7 @@ arm_remap_nocache(void *addr, vm_size_t size) size = round_page(size); for (i = 0; i < ARM_NOCACHE_KVA_SIZE / PAGE_SIZE; i++) { - if (!(arm_nocache_allocated[i / BITS_PER_INT] & (1 << (i % + if (!(arm_nocache_allocated[i / BITS_PER_INT] & (1 << (i % BITS_PER_INT)))) { for (j = i; j < i + (size / (PAGE_SIZE)); j++) if (arm_nocache_allocated[j / BITS_PER_INT] & @@ -488,7 +488,7 @@ arm_remap_nocache(void *addr, vm_size_t size) cpu_l2cache_wbinv_range(vaddr, PAGE_SIZE); pmap_kenter_nocache(tomap, physaddr); cpu_tlb_flushID_SE(vaddr); - arm_nocache_allocated[i / BITS_PER_INT] |= 1 << (i % + arm_nocache_allocated[i / BITS_PER_INT] |= 1 << (i % BITS_PER_INT); } return (ret); @@ -506,7 +506,7 @@ arm_unmap_nocache(void *addr, vm_size_t size) size = round_page(size); i = (raddr - arm_nocache_startaddr) / (PAGE_SIZE); for (; size > 0; size -= PAGE_SIZE, i++) { - arm_nocache_allocated[i / BITS_PER_INT] &= ~(1 << (i % + arm_nocache_allocated[i / BITS_PER_INT] &= ~(1 << (i % BITS_PER_INT)); pmap_kremove(raddr); raddr += PAGE_SIZE; @@ -515,9 +515,9 @@ arm_unmap_nocache(void *addr, vm_size_t size) #ifdef ARM_USE_SMALL_ALLOC -static TAILQ_HEAD(,arm_small_page) pages_normal = +static TAILQ_HEAD(,arm_small_page) pages_normal = TAILQ_HEAD_INITIALIZER(pages_normal); -static TAILQ_HEAD(,arm_small_page) pages_wt = +static TAILQ_HEAD(,arm_small_page) pages_wt = TAILQ_HEAD_INITIALIZER(pages_wt); static TAILQ_HEAD(,arm_small_page) free_pgdesc = TAILQ_HEAD_INITIALIZER(free_pgdesc); @@ -561,12 +561,12 @@ arm_init_smallalloc(void) vm_offset_t to_map = 0, mapaddr; int i; - /* + /* * We need to use dump_avail and not phys_avail, since we want to * map the whole memory and not just the memory available to the VM * to be able to do a pa => va association for any address. */ - + for (i = 0; dump_avail[i + 1]; i+= 2) { to_map += (dump_avail[i + 1] & S_FRAME) + S_SIZE - (dump_avail[i] & S_FRAME); @@ -579,10 +579,10 @@ arm_init_smallalloc(void) while (size > 0) { #ifdef ARM_HAVE_SUPERSECTIONS pmap_kenter_supersection(mapaddr, - (dump_avail[i] & L1_SUP_FRAME) + did, + (dump_avail[i] & L1_SUP_FRAME) + did, SECTION_CACHE); #else - pmap_kenter_section(mapaddr, + pmap_kenter_section(mapaddr, (dump_avail[i] & L1_S_FRAME) + did, SECTION_CACHE); #endif mapaddr += S_SIZE; @@ -661,7 +661,7 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) bzero(ret, PAGE_SIZE); return (ret); - } + } TAILQ_REMOVE(head, sp, pg_list); TAILQ_INSERT_HEAD(&free_pgdesc, sp, pg_list); ret = sp->addr; @@ -690,7 +690,7 @@ uma_small_free(void *mem, int size, u_int8_t flags) sp->addr = mem; pmap_get_pde_pte(kernel_pmap, (vm_offset_t)mem, &pd, &pt); - if ((*pd & pte_l1_s_cache_mask) == + if ((*pd & pte_l1_s_cache_mask) == pte_l1_s_cache_mode_pt && pte_l1_s_cache_mode_pt != pte_l1_s_cache_mode) TAILQ_INSERT_HEAD(&pages_wt, sp, pg_list); |