diff options
-rw-r--r-- | Makefile | 11 | ||||
-rw-r--r-- | sbin/savecore/tests/Makefile | 3 | ||||
-rw-r--r-- | share/misc/committers-src.dot | 3 | ||||
-rw-r--r-- | sys/conf/files | 2 | ||||
-rw-r--r-- | sys/dev/md/md.c | 263 | ||||
-rw-r--r-- | sys/fs/nfsclient/nfs_clport.c | 1 | ||||
-rw-r--r-- | sys/kern/link_elf_obj.c | 4 | ||||
-rw-r--r-- | sys/net/if_ethersubr.c | 6 |
8 files changed, 167 insertions, 126 deletions
@@ -376,22 +376,13 @@ buildworld: upgrade_checks kernel-toolchain: upgrade_checks .endif -# we need the system time(1) command, not from the shell -time_cmd= /usr/bin/time - -# mktemp(1) is not portable -mktemp_cmd= mktemp /tmp/_time-logging-XXXXXXXXX - # # Handle the user-driven targets, using the source relative mk files. # tinderbox toolchains kernel-toolchains: .MAKE ${TGTS}: .PHONY .MAKE - ${_+_}@cd ${.CURDIR}; _time_tmp=$$(${mktemp_cmd}); \ - ${time_cmd} -o $${_time_tmp} -p env ${_MAKE} ${.TARGET}; \ - echo ">>> Time spent on target ${.TARGET}: $$(tr '\n' ' ' < $${_time_tmp})"; \ - rm -f $${_time_tmp} + ${_+_}@cd ${.CURDIR}; ${_MAKE} ${.TARGET} # The historic default "all" target creates files which may cause stale # or (in the cross build case) unlinkable results. Fail with an error diff --git a/sbin/savecore/tests/Makefile b/sbin/savecore/tests/Makefile index 81e6b8ce2667..7ec3c9aeedcc 100644 --- a/sbin/savecore/tests/Makefile +++ b/sbin/savecore/tests/Makefile @@ -1,3 +1,6 @@ ATF_TESTS_SH= livedump_test log_test +# The test fails if any kernel modules are loaded while the test is running. +TEST_METADATA.livedump_test+= is_exclusive=true + .include <bsd.test.mk> diff --git a/share/misc/committers-src.dot b/share/misc/committers-src.dot index 60ef52278a53..313f40ad8e51 100644 --- a/share/misc/committers-src.dot +++ b/share/misc/committers-src.dot @@ -114,6 +114,7 @@ andre [label="Andre Oppermann\nandre@FreeBSD.org\n2003/11/12"] andreast [label="Andreas Tobler\nandreast@FreeBSD.org\n2010/09/05"] andrew [label="Andrew Turner\nandrew@FreeBSD.org\n2010/07/19"] antoine [label="Antoine Brodin\nantoine@FreeBSD.org\n2008/02/03"] +aokblast [label="ShengYi Hung\naokblast@FreeBSD.org\n2025/07/02"] araujo [label="Marcelo Araujo\naraujo@FreeBSD.org\n2015/08/04"] arichardson [label="Alex Richardson\narichardson@FreeBSD.org\n2017/10/30"] ariff [label="Ariff Abdullah\nariff@FreeBSD.org\n2005/11/14"] @@ -764,6 +765,7 @@ kp -> nick kp -> rcm kp -> zlei +lwhsu -> aokblast lwhsu -> khng manu -> corvink @@ -777,6 +779,7 @@ marcel -> nwhitehorn marcel -> sjg markj -> "0mp" +markj -> aokblast markj -> bnovkov markj -> cem markj -> christos diff --git a/sys/conf/files b/sys/conf/files index 84fae3c00ea6..75ee10be5896 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -3865,7 +3865,7 @@ kern/sched_ule.c optional sched_ule kern/serdev_if.m standard kern/stack_protector.c standard \ compile-with "${NORMAL_C:N-fstack-protector*}" -kern/subr_acl_nfs4.c optional ufs_acl | zfs +kern/subr_acl_nfs4.c optional ufs_acl | zfs | nfscl kern/subr_acl_posix1e.c optional ufs_acl kern/subr_asan.c optional kasan \ compile-with "${NOSAN_C:N-fstack-protector*}" diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c index 800684e9e63b..b842d4f2fd8e 100644 --- a/sys/dev/md/md.c +++ b/sys/dev/md/md.c @@ -264,25 +264,40 @@ struct md_s { struct g_provider *pp; int (*start)(struct md_s *sc, struct bio *bp); struct devstat *devstat; - bool candelete; - - /* MD_MALLOC related fields */ - struct indir *indir; - uma_zone_t uma; - - /* MD_PRELOAD related fields */ - u_char *pl_ptr; - size_t pl_len; - - /* MD_VNODE related fields */ - struct vnode *vnode; - char file[PATH_MAX]; - char label[PATH_MAX]; struct ucred *cred; - vm_offset_t kva; + char label[PATH_MAX]; + bool candelete; - /* MD_SWAP related fields */ - vm_object_t object; + union { + /* MD_MALLOC related fields */ + struct { + struct indir *indir; + uma_zone_t uma; + } s_malloc; + + /* MD_PRELOAD related fields */ + struct { + u_char *pl_ptr; + size_t pl_len; + char name[PATH_MAX]; + } s_preload; + + /* MD_VNODE related fields */ + struct { + struct vnode *vnode; + char file[PATH_MAX]; + vm_offset_t kva; + } s_vnode; + + /* MD_SWAP related fields */ + struct { + vm_object_t object; + } s_swap; + + /* MD_NULL */ + struct { + } s_null; + }; }; static struct indir * @@ -324,7 +339,7 @@ destroy_indir(struct md_s *sc, struct indir *ip) if (ip->shift) destroy_indir(sc, (struct indir*)(ip->array[i])); else if (ip->array[i] > 255) - uma_zfree(sc->uma, (void *)(ip->array[i])); + uma_zfree(sc->s_malloc.uma, (void *)(ip->array[i])); } del_indir(ip); } @@ -675,10 +690,10 @@ mdstart_malloc(struct md_s *sc, struct bio *bp) secno = bp->bio_offset / sc->sectorsize; error = 0; while (nsec--) { - osp = s_read(sc->indir, secno); + osp = s_read(sc->s_malloc.indir, secno); if (bp->bio_cmd == BIO_DELETE) { if (osp != 0) - error = s_write(sc->indir, secno, 0); + error = s_write(sc->s_malloc.indir, secno, 0); } else if (bp->bio_cmd == BIO_READ) { if (osp == 0) { if (notmapped) { @@ -743,10 +758,12 @@ mdstart_malloc(struct md_s *sc, struct bio *bp) } if (i == sc->sectorsize) { if (osp != uc) - error = s_write(sc->indir, secno, uc); + error = s_write(sc->s_malloc.indir, + secno, uc); } else { if (osp <= 255) { - sp = (uintptr_t)uma_zalloc(sc->uma, + sp = (uintptr_t)uma_zalloc( + sc->s_malloc.uma, md_malloc_wait ? M_WAITOK : M_NOWAIT); if (sp == 0) { @@ -767,7 +784,8 @@ mdstart_malloc(struct md_s *sc, struct bio *bp) bcopy(dst, (void *)sp, sc->sectorsize); } - error = s_write(sc->indir, secno, sp); + error = s_write(sc->s_malloc.indir, + secno, sp); } else { if (notmapped) { error = md_malloc_move_ma(&m, @@ -790,7 +808,7 @@ mdstart_malloc(struct md_s *sc, struct bio *bp) error = EOPNOTSUPP; } if (osp > 255) - uma_zfree(sc->uma, (void*)osp); + uma_zfree(sc->s_malloc.uma, (void*)osp); if (error != 0) break; secno++; @@ -848,7 +866,7 @@ mdstart_preload(struct md_s *sc, struct bio *bp) { uint8_t *p; - p = sc->pl_ptr + bp->bio_offset; + p = sc->s_preload.pl_ptr + bp->bio_offset; switch (bp->bio_cmd) { case BIO_READ: if ((bp->bio_flags & BIO_VLIST) != 0) { @@ -888,7 +906,7 @@ mdstart_vnode(struct md_s *sc, struct bio *bp) bool mapped; td = curthread; - vp = sc->vnode; + vp = sc->s_vnode.vnode; piov = NULL; ma_offs = bp->bio_ma_offset; off = bp->bio_offset; @@ -922,8 +940,8 @@ mdstart_vnode(struct md_s *sc, struct bio *bp) case BIO_DELETE: if (sc->candelete) { error = vn_deallocate(vp, &off, &len, 0, - sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred, - NOCRED); + sc->flags & MD_ASYNC ? 0 : IO_SYNC, + sc->cred, NOCRED); bp->bio_resid = len; return (error); } @@ -963,8 +981,10 @@ unmapped_step: KASSERT(iolen > 0, ("zero iolen")); KASSERT(npages <= atop(maxphys + PAGE_SIZE), ("npages %d too large", npages)); - pmap_qenter(sc->kva, &bp->bio_ma[atop(ma_offs)], npages); - aiov.iov_base = (void *)(sc->kva + (ma_offs & PAGE_MASK)); + pmap_qenter(sc->s_vnode.kva, &bp->bio_ma[atop(ma_offs)], + npages); + aiov.iov_base = (void *)(sc->s_vnode.kva + (ma_offs & + PAGE_MASK)); aiov.iov_len = iolen; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; @@ -998,7 +1018,7 @@ unmapped_step: POSIX_FADV_DONTNEED); if (mapped) { - pmap_qremove(sc->kva, npages); + pmap_qremove(sc->s_vnode.kva, npages); if (error == 0) { len -= iolen; bp->bio_resid -= iolen; @@ -1052,20 +1072,21 @@ mdstart_swap(struct md_s *sc, struct bio *bp) lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1; rv = VM_PAGER_OK; - vm_object_pip_add(sc->object, 1); + vm_object_pip_add(sc->s_swap.object, 1); for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) { len = ((i == lastp) ? lastend : PAGE_SIZE) - offs; - m = vm_page_grab_unlocked(sc->object, i, VM_ALLOC_SYSTEM); + m = vm_page_grab_unlocked(sc->s_swap.object, i, + VM_ALLOC_SYSTEM); if (bp->bio_cmd == BIO_READ) { if (vm_page_all_valid(m)) rv = VM_PAGER_OK; else - rv = vm_pager_get_pages(sc->object, &m, 1, - NULL, NULL); + rv = vm_pager_get_pages(sc->s_swap.object, + &m, 1, NULL, NULL); if (rv == VM_PAGER_ERROR) { - VM_OBJECT_WLOCK(sc->object); + VM_OBJECT_WLOCK(sc->s_swap.object); vm_page_free(m); - VM_OBJECT_WUNLOCK(sc->object); + VM_OBJECT_WUNLOCK(sc->s_swap.object); break; } else if (rv == VM_PAGER_FAIL) { /* @@ -1092,12 +1113,12 @@ mdstart_swap(struct md_s *sc, struct bio *bp) if (len == PAGE_SIZE || vm_page_all_valid(m)) rv = VM_PAGER_OK; else - rv = vm_pager_get_pages(sc->object, &m, 1, - NULL, NULL); + rv = vm_pager_get_pages(sc->s_swap.object, + &m, 1, NULL, NULL); if (rv == VM_PAGER_ERROR) { - VM_OBJECT_WLOCK(sc->object); + VM_OBJECT_WLOCK(sc->s_swap.object); vm_page_free(m); - VM_OBJECT_WUNLOCK(sc->object); + VM_OBJECT_WUNLOCK(sc->s_swap.object); break; } else if (rv == VM_PAGER_FAIL) pmap_zero_page(m); @@ -1118,12 +1139,12 @@ mdstart_swap(struct md_s *sc, struct bio *bp) if (len == PAGE_SIZE || vm_page_all_valid(m)) rv = VM_PAGER_OK; else - rv = vm_pager_get_pages(sc->object, &m, 1, - NULL, NULL); - VM_OBJECT_WLOCK(sc->object); + rv = vm_pager_get_pages(sc->s_swap.object, + &m, 1, NULL, NULL); + VM_OBJECT_WLOCK(sc->s_swap.object); if (rv == VM_PAGER_ERROR) { vm_page_free(m); - VM_OBJECT_WUNLOCK(sc->object); + VM_OBJECT_WUNLOCK(sc->s_swap.object); break; } else if (rv == VM_PAGER_FAIL) { vm_page_free(m); @@ -1139,7 +1160,7 @@ mdstart_swap(struct md_s *sc, struct bio *bp) m = NULL; } } - VM_OBJECT_WUNLOCK(sc->object); + VM_OBJECT_WUNLOCK(sc->s_swap.object); } if (m != NULL) { /* @@ -1159,7 +1180,7 @@ mdstart_swap(struct md_s *sc, struct bio *bp) offs = 0; ma_offs += len; } - vm_object_pip_wakeup(sc->object); + vm_object_pip_wakeup(sc->s_swap.object); return (rv != VM_PAGER_ERROR ? 0 : ENOSPC); } @@ -1354,18 +1375,18 @@ mdcreate_malloc(struct md_s *sc, struct md_req *mdr) if (mdr->md_fwheads != 0) sc->fwheads = mdr->md_fwheads; sc->flags = mdr->md_options & (MD_COMPRESS | MD_FORCE | MD_RESERVE); - sc->indir = dimension(sc->mediasize / sc->sectorsize); - sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL, - 0x1ff, 0); + sc->s_malloc.indir = dimension(sc->mediasize / sc->sectorsize); + sc->s_malloc.uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, + NULL, NULL, 0x1ff, 0); if (mdr->md_options & MD_RESERVE) { off_t nsectors; nsectors = sc->mediasize / sc->sectorsize; for (u = 0; u < nsectors; u++) { - sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ? - M_WAITOK : M_NOWAIT) | M_ZERO); + sp = (uintptr_t)uma_zalloc(sc->s_malloc.uma, + (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO); if (sp != 0) - error = s_write(sc->indir, u, sp); + error = s_write(sc->s_malloc.indir, u, sp); else error = ENOMEM; if (error != 0) @@ -1393,7 +1414,7 @@ mdsetcred(struct md_s *sc, struct ucred *cred) * Horrible kludge to establish credentials for NFS XXX. */ - if (sc->vnode) { + if (sc->type == MD_VNODE && sc->s_vnode.vnode != NULL) { struct uio auio; struct iovec aiov; @@ -1408,9 +1429,9 @@ mdsetcred(struct md_s *sc, struct ucred *cred) auio.uio_rw = UIO_READ; auio.uio_segflg = UIO_SYSSPACE; auio.uio_resid = aiov.iov_len; - vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); - error = VOP_READ(sc->vnode, &auio, 0, sc->cred); - VOP_UNLOCK(sc->vnode); + vn_lock(sc->s_vnode.vnode, LK_EXCLUSIVE | LK_RETRY); + error = VOP_READ(sc->s_vnode.vnode, &auio, 0, sc->cred); + VOP_UNLOCK(sc->s_vnode.vnode); free(tmpbuf, M_TEMP); } return (error); @@ -1427,11 +1448,12 @@ mdcreate_vnode(struct md_s *sc, struct md_req *mdr, struct thread *td) fname = mdr->md_file; if (mdr->md_file_seg == UIO_USERSPACE) { - error = copyinstr(fname, sc->file, sizeof(sc->file), NULL); + error = copyinstr(fname, sc->s_vnode.file, + sizeof(sc->s_vnode.file), NULL); if (error != 0) return (error); } else if (mdr->md_file_seg == UIO_SYSSPACE) - strlcpy(sc->file, fname, sizeof(sc->file)); + strlcpy(sc->s_vnode.file, fname, sizeof(sc->s_vnode.file)); else return (EDOOFUS); @@ -1441,7 +1463,7 @@ mdcreate_vnode(struct md_s *sc, struct md_req *mdr, struct thread *td) */ flags = FREAD | ((mdr->md_options & MD_READONLY) ? 0 : FWRITE) \ | ((mdr->md_options & MD_VERIFY) ? O_VERIFY : 0); - NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file); + NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->s_vnode.file); error = vn_open(&nd, &flags, 0, NULL); if (error != 0) return (error); @@ -1481,17 +1503,17 @@ mdcreate_vnode(struct md_s *sc, struct md_req *mdr, struct thread *td) MD_VERIFY | MD_MUSTDEALLOC); if (!(flags & FWRITE)) sc->flags |= MD_READONLY; - sc->vnode = nd.ni_vp; + sc->s_vnode.vnode = nd.ni_vp; error = mdsetcred(sc, td->td_ucred); if (error != 0) { - sc->vnode = NULL; + sc->s_vnode.vnode = NULL; vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY); nd.ni_vp->v_vflag &= ~VV_MD; goto bad; } - sc->kva = kva_alloc(maxphys + PAGE_SIZE); + sc->s_vnode.kva = kva_alloc(maxphys + PAGE_SIZE); return (0); bad: VOP_UNLOCK(nd.ni_vp); @@ -1535,23 +1557,30 @@ mddestroy(struct md_s *sc, struct thread *td) msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10); mtx_unlock(&sc->queue_mtx); mtx_destroy(&sc->queue_mtx); - if (sc->vnode != NULL) { - vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY); - sc->vnode->v_vflag &= ~VV_MD; - VOP_UNLOCK(sc->vnode); - (void)vn_close(sc->vnode, sc->flags & MD_READONLY ? + switch (sc->type) { + case MD_VNODE: + vn_lock(sc->s_vnode.vnode, LK_EXCLUSIVE | LK_RETRY); + sc->s_vnode.vnode->v_vflag &= ~VV_MD; + VOP_UNLOCK(sc->s_vnode.vnode); + (void)vn_close(sc->s_vnode.vnode, sc->flags & MD_READONLY ? FREAD : (FREAD|FWRITE), sc->cred, td); + kva_free(sc->s_vnode.kva, maxphys + PAGE_SIZE); + break; + case MD_SWAP: + vm_object_deallocate(sc->s_swap.object); + break; + case MD_MALLOC: + destroy_indir(sc, sc->s_malloc.indir); + uma_zdestroy(sc->s_malloc.uma); + break; + case MD_PRELOAD: + case MD_NULL: + break; + default: + __assert_unreachable(); } if (sc->cred != NULL) crfree(sc->cred); - if (sc->object != NULL) - vm_object_deallocate(sc->object); - if (sc->indir) - destroy_indir(sc, sc->indir); - if (sc->uma) - uma_zdestroy(sc->uma); - if (sc->kva) - kva_free(sc->kva, maxphys + PAGE_SIZE); LIST_REMOVE(sc, list); free_unr(md_uh, sc->unit); @@ -1576,13 +1605,14 @@ mdresize(struct md_s *sc, struct md_req *mdr) oldpages = OFF_TO_IDX(sc->mediasize); newpages = OFF_TO_IDX(mdr->md_mediasize); if (newpages < oldpages) { - VM_OBJECT_WLOCK(sc->object); - vm_object_page_remove(sc->object, newpages, 0, 0); + VM_OBJECT_WLOCK(sc->s_swap.object); + vm_object_page_remove(sc->s_swap.object, newpages, + 0, 0); swap_release_by_cred(IDX_TO_OFF(oldpages - newpages), sc->cred); - sc->object->charge = IDX_TO_OFF(newpages); - sc->object->size = newpages; - VM_OBJECT_WUNLOCK(sc->object); + sc->s_swap.object->charge = IDX_TO_OFF(newpages); + sc->s_swap.object->size = newpages; + VM_OBJECT_WUNLOCK(sc->s_swap.object); } else if (newpages > oldpages) { res = swap_reserve_by_cred(IDX_TO_OFF(newpages - oldpages), sc->cred); @@ -1590,7 +1620,7 @@ mdresize(struct md_s *sc, struct md_req *mdr) return (ENOMEM); if ((mdr->md_options & MD_RESERVE) || (sc->flags & MD_RESERVE)) { - error = swap_pager_reserve(sc->object, + error = swap_pager_reserve(sc->s_swap.object, oldpages, newpages - oldpages); if (error < 0) { swap_release_by_cred( @@ -1599,10 +1629,10 @@ mdresize(struct md_s *sc, struct md_req *mdr) return (EDOM); } } - VM_OBJECT_WLOCK(sc->object); - sc->object->charge = IDX_TO_OFF(newpages); - sc->object->size = newpages; - VM_OBJECT_WUNLOCK(sc->object); + VM_OBJECT_WLOCK(sc->s_swap.object); + sc->s_swap.object->charge = IDX_TO_OFF(newpages); + sc->s_swap.object->size = newpages; + VM_OBJECT_WUNLOCK(sc->s_swap.object); } break; default: @@ -1643,13 +1673,13 @@ mdcreate_swap(struct md_s *sc, struct md_req *mdr, struct thread *td) sc->fwsectors = mdr->md_fwsectors; if (mdr->md_fwheads != 0) sc->fwheads = mdr->md_fwheads; - sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage, - VM_PROT_DEFAULT, 0, td->td_ucred); - if (sc->object == NULL) + sc->s_swap.object = vm_pager_allocate(OBJT_SWAP, NULL, + PAGE_SIZE * npage, VM_PROT_DEFAULT, 0, td->td_ucred); + if (sc->s_swap.object == NULL) return (ENOMEM); sc->flags = mdr->md_options & (MD_FORCE | MD_RESERVE); if (mdr->md_options & MD_RESERVE) { - if (swap_pager_reserve(sc->object, 0, npage) < 0) { + if (swap_pager_reserve(sc->s_swap.object, 0, npage) < 0) { error = EDOM; goto finish; } @@ -1657,8 +1687,8 @@ mdcreate_swap(struct md_s *sc, struct md_req *mdr, struct thread *td) error = mdsetcred(sc, td->td_ucred); finish: if (error != 0) { - vm_object_deallocate(sc->object); - sc->object = NULL; + vm_object_deallocate(sc->s_swap.object); + sc->s_swap.object = NULL; } return (error); } @@ -1856,10 +1886,13 @@ kern_mdquery_locked(struct md_req *mdr) if (error != 0) return (error); } - if (sc->type == MD_VNODE || - (sc->type == MD_PRELOAD && mdr->md_file != NULL)) - error = copyout(sc->file, mdr->md_file, - strlen(sc->file) + 1); + if (sc->type == MD_VNODE) { + error = copyout(sc->s_vnode.file, mdr->md_file, + strlen(sc->s_vnode.file) + 1); + } else if (sc->type == MD_PRELOAD && mdr->md_file != NULL) { + error = copyout(sc->s_preload.name, mdr->md_file, + strlen(sc->s_preload.name) + 1); + } return (error); } @@ -2011,11 +2044,12 @@ md_preloaded(u_char *image, size_t length, const char *name) return; sc->mediasize = length; sc->sectorsize = DEV_BSIZE; - sc->pl_ptr = image; - sc->pl_len = length; + sc->s_preload.pl_ptr = image; + sc->s_preload.pl_len = length; sc->start = mdstart_preload; if (name != NULL) - strlcpy(sc->file, name, sizeof(sc->file)); + strlcpy(sc->s_preload.name, name, + sizeof(sc->s_preload.name)); #ifdef MD_ROOT if (sc->unit == 0) { #ifndef ROOTDEVNAME @@ -2127,9 +2161,14 @@ g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors); sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize); sbuf_printf(sb, " t %s", type); - if ((mp->type == MD_VNODE && mp->vnode != NULL) || - (mp->type == MD_PRELOAD && mp->file[0] != '\0')) - sbuf_printf(sb, " file %s", mp->file); + if (mp->type == MD_VNODE && + mp->s_vnode.vnode != NULL) + sbuf_printf(sb, " file %s", mp->s_vnode.file); + if (mp->type == MD_PRELOAD && + mp->s_preload.name[0] != '\0') { + sbuf_printf(sb, " file %s", + mp->s_preload.name); + } sbuf_printf(sb, " label %s", mp->label); } else { sbuf_printf(sb, "%s<unit>%d</unit>\n", indent, @@ -2154,15 +2193,23 @@ g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, "read-only"); sbuf_printf(sb, "%s<type>%s</type>\n", indent, type); - if ((mp->type == MD_VNODE && mp->vnode != NULL) || - (mp->type == MD_PRELOAD && mp->file[0] != '\0')) { + if (mp->type == MD_VNODE) { + if (mp->s_vnode.vnode != NULL) { + sbuf_printf(sb, "%s<file>", indent); + g_conf_printf_escaped(sb, "%s", + mp->s_vnode.file); + sbuf_printf(sb, "</file>\n"); + } + sbuf_printf(sb, "%s<cache>%s</cache>\n", indent, + (mp->flags & MD_CACHE) == 0 ? "off": "on"); + } + if (mp->type == MD_PRELOAD && + mp->s_preload.name[0] != '\0') { sbuf_printf(sb, "%s<file>", indent); - g_conf_printf_escaped(sb, "%s", mp->file); + g_conf_printf_escaped(sb, "%s", + mp->s_preload.name); sbuf_printf(sb, "</file>\n"); } - if (mp->type == MD_VNODE) - sbuf_printf(sb, "%s<cache>%s</cache>\n", indent, - (mp->flags & MD_CACHE) == 0 ? "off": "on"); sbuf_printf(sb, "%s<label>", indent); g_conf_printf_escaped(sb, "%s", mp->label); sbuf_printf(sb, "</label>\n"); diff --git a/sys/fs/nfsclient/nfs_clport.c b/sys/fs/nfsclient/nfs_clport.c index ce7221c133cf..b25d967982a1 100644 --- a/sys/fs/nfsclient/nfs_clport.c +++ b/sys/fs/nfsclient/nfs_clport.c @@ -1489,3 +1489,4 @@ MODULE_DEPEND(nfscl, nfscommon, 1, 1, 1); MODULE_DEPEND(nfscl, krpc, 1, 1, 1); MODULE_DEPEND(nfscl, nfssvc, 1, 1, 1); MODULE_DEPEND(nfscl, xdr, 1, 1, 1); +MODULE_DEPEND(nfscl, acl_nfs4, 1, 1, 1); diff --git a/sys/kern/link_elf_obj.c b/sys/kern/link_elf_obj.c index 3d18aed2b1c0..151aab96f9be 100644 --- a/sys/kern/link_elf_obj.c +++ b/sys/kern/link_elf_obj.c @@ -1514,19 +1514,17 @@ static void link_elf_ifunc_symbol_value(linker_file_t lf, caddr_t *valp, size_t *sizep) { c_linker_sym_t sym; - elf_file_t ef; const Elf_Sym *es; caddr_t val; long off; val = *valp; - ef = (elf_file_t)lf; /* Provide the value and size of the target symbol, if available. */ val = ((caddr_t (*)(void))val)(); if (link_elf_search_symbol(lf, val, &sym, &off) == 0 && off == 0) { es = (const Elf_Sym *)sym; - *valp = (caddr_t)ef->address + es->st_value; + *valp = (caddr_t)es->st_value; *sizep = es->st_size; } else { *valp = val; diff --git a/sys/net/if_ethersubr.c b/sys/net/if_ethersubr.c index 66a4724a786c..7be4dfac23e7 100644 --- a/sys/net/if_ethersubr.c +++ b/sys/net/if_ethersubr.c @@ -1486,7 +1486,7 @@ ether_gen_addr_byname(const char *nameunit, struct ether_addr *hwaddr) char uuid[HOSTUUIDLEN + 1]; uint64_t addr; int i, sz; - char digest[SHA1_RESULTLEN]; + unsigned char digest[SHA1_RESULTLEN]; char jailname[MAXHOSTNAMELEN]; getcredhostuuid(curthread->td_ucred, uuid, sizeof(uuid)); @@ -1510,9 +1510,7 @@ ether_gen_addr_byname(const char *nameunit, struct ether_addr *hwaddr) SHA1Final(digest, &ctx); free(buf, M_TEMP); - addr = ((digest[0] << 16) | (digest[1] << 8) | digest[2]) & - OUI_FREEBSD_GENERATED_MASK; - addr = OUI_FREEBSD(addr); + addr = (digest[0] << 8) | digest[1] | OUI_FREEBSD_GENERATED_LOW; for (i = 0; i < ETHER_ADDR_LEN; ++i) { hwaddr->octet[i] = addr >> ((ETHER_ADDR_LEN - i - 1) * 8) & 0xFF; |