diff options
author | Pyun YongHyeon <yongari@FreeBSD.org> | 2008-08-29 01:30:26 +0000 |
---|---|---|
committer | Pyun YongHyeon <yongari@FreeBSD.org> | 2008-08-29 01:30:26 +0000 |
commit | c8979f2ae090d922a756da69e07c5eca42a31cba (patch) | |
tree | 38f00ff5ffba3129088dda48db77820817fe4395 | |
parent | 55c0e52f9ab883e26fdc7b86f42ac1bf27048182 (diff) |
Notes
-rw-r--r-- | sys/dev/bfe/if_bfe.c | 829 | ||||
-rw-r--r-- | sys/dev/bfe/if_bfereg.h | 37 |
2 files changed, 495 insertions, 371 deletions
diff --git a/sys/dev/bfe/if_bfe.c b/sys/dev/bfe/if_bfe.c index c197ec12e9a2..2647f294c2ba 100644 --- a/sys/dev/bfe/if_bfe.c +++ b/sys/dev/bfe/if_bfe.c @@ -30,41 +30,32 @@ __FBSDID("$FreeBSD$"); #include <sys/param.h> #include <sys/systm.h> -#include <sys/sockio.h> -#include <sys/mbuf.h> -#include <sys/malloc.h> +#include <sys/bus.h> +#include <sys/endian.h> #include <sys/kernel.h> +#include <sys/malloc.h> +#include <sys/mbuf.h> #include <sys/module.h> +#include <sys/rman.h> #include <sys/socket.h> -#include <sys/queue.h> +#include <sys/sockio.h> +#include <net/bpf.h> #include <net/if.h> -#include <net/if_arp.h> #include <net/ethernet.h> #include <net/if_dl.h> #include <net/if_media.h> - -#include <net/bpf.h> - #include <net/if_types.h> #include <net/if_vlan_var.h> -#include <netinet/in_systm.h> -#include <netinet/in.h> -#include <netinet/ip.h> - -#include <machine/bus.h> -#include <machine/resource.h> -#include <sys/bus.h> -#include <sys/rman.h> - #include <dev/mii/mii.h> #include <dev/mii/miivar.h> -#include "miidevs.h" #include <dev/pci/pcireg.h> #include <dev/pci/pcivar.h> +#include <machine/bus.h> + #include <dev/bfe/if_bfereg.h> MODULE_DEPEND(bfe, pci, 1, 1, 1); @@ -91,6 +82,7 @@ static int bfe_suspend (device_t); static int bfe_resume (device_t); static void bfe_release_resources (struct bfe_softc *); static void bfe_intr (void *); +static int bfe_encap (struct bfe_softc *, struct mbuf **); static void bfe_start (struct ifnet *); static void bfe_start_locked (struct ifnet *); static int bfe_ioctl (struct ifnet *, u_long, caddr_t); @@ -104,7 +96,9 @@ static void bfe_txeof (struct bfe_softc *); static void bfe_rxeof (struct bfe_softc *); static void bfe_set_rx_mode (struct bfe_softc *); static int bfe_list_rx_init (struct bfe_softc *); -static int bfe_list_newbuf (struct bfe_softc *, int, struct mbuf*); +static void bfe_list_tx_init (struct bfe_softc *); +static void bfe_discard_buf (struct bfe_softc *, int); +static int bfe_list_newbuf (struct bfe_softc *, int); static void bfe_rx_ring_free (struct bfe_softc *); static void bfe_pci_setup (struct bfe_softc *, u_int32_t); @@ -127,8 +121,8 @@ static void bfe_chip_reset (struct bfe_softc *); static void bfe_chip_halt (struct bfe_softc *); static void bfe_core_reset (struct bfe_softc *); static void bfe_core_disable (struct bfe_softc *); -static int bfe_dma_alloc (device_t); -static void bfe_dma_map_desc (void *, bus_dma_segment_t *, int, int); +static int bfe_dma_alloc (struct bfe_softc *); +static void bfe_dma_free (struct bfe_softc *sc); static void bfe_dma_map (void *, bus_dma_segment_t *, int, int); static void bfe_cam_write (struct bfe_softc *, u_char *, int); @@ -186,137 +180,252 @@ bfe_probe(device_t dev) return (ENXIO); } +struct bfe_dmamap_arg { + bus_addr_t bfe_busaddr; +}; + static int -bfe_dma_alloc(device_t dev) +bfe_dma_alloc(struct bfe_softc *sc) { - struct bfe_softc *sc; + struct bfe_dmamap_arg ctx; + struct bfe_rx_data *rd; + struct bfe_tx_data *td; int error, i; - sc = device_get_softc(dev); - /* * parent tag. Apparently the chip cannot handle any DMA address * greater than 1GB. */ - error = bus_dma_tag_create(NULL, /* parent */ - 4096, 0, /* alignment, boundary */ - 0x3FFFFFFF, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - MAXBSIZE, /* maxsize */ - BUS_SPACE_UNRESTRICTED, /* num of segments */ - BUS_SPACE_MAXSIZE_32BIT, /* max segment size */ - 0, /* flags */ - NULL, NULL, /* lockfunc, lockarg */ - &sc->bfe_parent_tag); - - /* tag for TX ring */ - error = bus_dma_tag_create(sc->bfe_parent_tag, - 4096, 0, - BUS_SPACE_MAXADDR, - BUS_SPACE_MAXADDR, - NULL, NULL, - BFE_TX_LIST_SIZE, - 1, - BUS_SPACE_MAXSIZE_32BIT, - 0, - NULL, NULL, - &sc->bfe_tx_tag); + error = bus_dma_tag_create(bus_get_dma_tag(sc->bfe_dev), /* parent */ + 1, 0, /* alignment, boundary */ + BFE_DMA_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ + 0, /* nsegments */ + BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->bfe_parent_tag); + if (error != 0) { + device_printf(sc->bfe_dev, "cannot create parent DMA tag.\n"); + goto fail; + } - if (error) { - device_printf(dev, "could not allocate dma tag\n"); - return (ENOMEM); + /* Create tag for Tx ring. */ + error = bus_dma_tag_create(sc->bfe_parent_tag, /* parent */ + BFE_TX_RING_ALIGN, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + BFE_TX_LIST_SIZE, /* maxsize */ + 1, /* nsegments */ + BFE_TX_LIST_SIZE, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->bfe_tx_tag); + if (error != 0) { + device_printf(sc->bfe_dev, "cannot create Tx ring DMA tag.\n"); + goto fail; } - /* tag for RX ring */ - error = bus_dma_tag_create(sc->bfe_parent_tag, - 4096, 0, - BUS_SPACE_MAXADDR, - BUS_SPACE_MAXADDR, - NULL, NULL, - BFE_RX_LIST_SIZE, - 1, - BUS_SPACE_MAXSIZE_32BIT, - 0, - NULL, NULL, - &sc->bfe_rx_tag); + /* Create tag for Rx ring. */ + error = bus_dma_tag_create(sc->bfe_parent_tag, /* parent */ + BFE_RX_RING_ALIGN, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + BFE_RX_LIST_SIZE, /* maxsize */ + 1, /* nsegments */ + BFE_RX_LIST_SIZE, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->bfe_rx_tag); + if (error != 0) { + device_printf(sc->bfe_dev, "cannot create Rx ring DMA tag.\n"); + goto fail; + } - if (error) { - device_printf(dev, "could not allocate dma tag\n"); - return (ENOMEM); + /* Create tag for Tx buffers. */ + error = bus_dma_tag_create(sc->bfe_parent_tag, /* parent */ + 1, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + MCLBYTES * BFE_MAXTXSEGS, /* maxsize */ + BFE_MAXTXSEGS, /* nsegments */ + MCLBYTES, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->bfe_txmbuf_tag); + if (error != 0) { + device_printf(sc->bfe_dev, + "cannot create Tx buffer DMA tag.\n"); + goto fail; } - /* tag for mbufs */ - error = bus_dma_tag_create(sc->bfe_parent_tag, - ETHER_ALIGN, 0, - BUS_SPACE_MAXADDR, - BUS_SPACE_MAXADDR, - NULL, NULL, - MCLBYTES, - 1, - BUS_SPACE_MAXSIZE_32BIT, - BUS_DMA_ALLOCNOW, - NULL, NULL, - &sc->bfe_tag); + /* Create tag for Rx buffers. */ + error = bus_dma_tag_create(sc->bfe_parent_tag, /* parent */ + 1, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + MCLBYTES, /* maxsize */ + 1, /* nsegments */ + MCLBYTES, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc->bfe_rxmbuf_tag); + if (error != 0) { + device_printf(sc->bfe_dev, + "cannot create Rx buffer DMA tag.\n"); + goto fail; + } - if (error) { - device_printf(dev, "could not allocate dma tag\n"); - return (ENOMEM); + /* Allocate DMA'able memory and load DMA map. */ + error = bus_dmamem_alloc(sc->bfe_tx_tag, (void *)&sc->bfe_tx_list, + BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->bfe_tx_map); + if (error != 0) { + device_printf(sc->bfe_dev, + "cannot allocate DMA'able memory for Tx ring.\n"); + goto fail; } + ctx.bfe_busaddr = 0; + error = bus_dmamap_load(sc->bfe_tx_tag, sc->bfe_tx_map, + sc->bfe_tx_list, BFE_TX_LIST_SIZE, bfe_dma_map, &ctx, + BUS_DMA_NOWAIT); + if (error != 0 || ctx.bfe_busaddr == 0) { + device_printf(sc->bfe_dev, + "cannot load DMA'able memory for Tx ring.\n"); + goto fail; + } + sc->bfe_tx_dma = BFE_ADDR_LO(ctx.bfe_busaddr); - /* pre allocate dmamaps for RX list */ - for (i = 0; i < BFE_RX_LIST_CNT; i++) { - error = bus_dmamap_create(sc->bfe_tag, 0, - &sc->bfe_rx_ring[i].bfe_map); - if (error) { - device_printf(dev, "cannot create DMA map for RX\n"); - return (ENOMEM); - } + error = bus_dmamem_alloc(sc->bfe_rx_tag, (void *)&sc->bfe_rx_list, + BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->bfe_rx_map); + if (error != 0) { + device_printf(sc->bfe_dev, + "cannot allocate DMA'able memory for Rx ring.\n"); + goto fail; + } + ctx.bfe_busaddr = 0; + error = bus_dmamap_load(sc->bfe_rx_tag, sc->bfe_rx_map, + sc->bfe_rx_list, BFE_RX_LIST_SIZE, bfe_dma_map, &ctx, + BUS_DMA_NOWAIT); + if (error != 0 || ctx.bfe_busaddr == 0) { + device_printf(sc->bfe_dev, + "cannot load DMA'able memory for Rx ring.\n"); + goto fail; } + sc->bfe_rx_dma = BFE_ADDR_LO(ctx.bfe_busaddr); - /* pre allocate dmamaps for TX list */ + /* Create DMA maps for Tx buffers. */ for (i = 0; i < BFE_TX_LIST_CNT; i++) { - error = bus_dmamap_create(sc->bfe_tag, 0, - &sc->bfe_tx_ring[i].bfe_map); - if (error) { - device_printf(dev, "cannot create DMA map for TX\n"); - return (ENOMEM); + td = &sc->bfe_tx_ring[i]; + td->bfe_mbuf = NULL; + td->bfe_map = NULL; + error = bus_dmamap_create(sc->bfe_txmbuf_tag, 0, &td->bfe_map); + if (error != 0) { + device_printf(sc->bfe_dev, + "cannot create DMA map for Tx.\n"); + goto fail; } } - /* Alloc dma for rx ring */ - error = bus_dmamem_alloc(sc->bfe_rx_tag, (void *)&sc->bfe_rx_list, - BUS_DMA_NOWAIT, &sc->bfe_rx_map); - - if (error) - return (ENOMEM); - - bzero(sc->bfe_rx_list, BFE_RX_LIST_SIZE); - error = bus_dmamap_load(sc->bfe_rx_tag, sc->bfe_rx_map, - sc->bfe_rx_list, sizeof(struct bfe_desc), - bfe_dma_map, &sc->bfe_rx_dma, BUS_DMA_NOWAIT); + /* Create spare DMA map for Rx buffers. */ + error = bus_dmamap_create(sc->bfe_rxmbuf_tag, 0, &sc->bfe_rx_sparemap); + if (error != 0) { + device_printf(sc->bfe_dev, "cannot create spare DMA map for Rx.\n"); + goto fail; + } + /* Create DMA maps for Rx buffers. */ + for (i = 0; i < BFE_RX_LIST_CNT; i++) { + rd = &sc->bfe_rx_ring[i]; + rd->bfe_mbuf = NULL; + rd->bfe_map = NULL; + rd->bfe_ctrl = 0; + error = bus_dmamap_create(sc->bfe_rxmbuf_tag, 0, &rd->bfe_map); + if (error != 0) { + device_printf(sc->bfe_dev, + "cannot create DMA map for Rx.\n"); + goto fail; + } + } - if (error) - return (ENOMEM); +fail: + return (error); +} - bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREWRITE); +static void +bfe_dma_free(struct bfe_softc *sc) +{ + struct bfe_tx_data *td; + struct bfe_rx_data *rd; + int i; - error = bus_dmamem_alloc(sc->bfe_tx_tag, (void *)&sc->bfe_tx_list, - BUS_DMA_NOWAIT, &sc->bfe_tx_map); - if (error) - return (ENOMEM); + /* Tx ring. */ + if (sc->bfe_tx_tag != NULL) { + if (sc->bfe_tx_map != NULL) + bus_dmamap_unload(sc->bfe_tx_tag, sc->bfe_tx_map); + if (sc->bfe_tx_map != NULL && sc->bfe_tx_list != NULL) + bus_dmamem_free(sc->bfe_tx_tag, sc->bfe_tx_list, + sc->bfe_tx_map); + sc->bfe_tx_map = NULL; + sc->bfe_tx_list = NULL; + bus_dma_tag_destroy(sc->bfe_tx_tag); + sc->bfe_tx_tag = NULL; + } + /* Rx ring. */ + if (sc->bfe_rx_tag != NULL) { + if (sc->bfe_rx_map != NULL) + bus_dmamap_unload(sc->bfe_rx_tag, sc->bfe_rx_map); + if (sc->bfe_rx_map != NULL && sc->bfe_rx_list != NULL) + bus_dmamem_free(sc->bfe_rx_tag, sc->bfe_rx_list, + sc->bfe_rx_map); + sc->bfe_rx_map = NULL; + sc->bfe_rx_list = NULL; + bus_dma_tag_destroy(sc->bfe_rx_tag); + sc->bfe_rx_tag = NULL; + } - error = bus_dmamap_load(sc->bfe_tx_tag, sc->bfe_tx_map, - sc->bfe_tx_list, sizeof(struct bfe_desc), - bfe_dma_map, &sc->bfe_tx_dma, BUS_DMA_NOWAIT); - if (error) - return (ENOMEM); + /* Tx buffers. */ + if (sc->bfe_txmbuf_tag != NULL) { + for (i = 0; i < BFE_TX_LIST_CNT; i++) { + td = &sc->bfe_tx_ring[i]; + if (td->bfe_map != NULL) { + bus_dmamap_destroy(sc->bfe_txmbuf_tag, + td->bfe_map); + td->bfe_map = NULL; + } + } + bus_dma_tag_destroy(sc->bfe_txmbuf_tag); + sc->bfe_txmbuf_tag = NULL; + } - bzero(sc->bfe_tx_list, BFE_TX_LIST_SIZE); - bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, BUS_DMASYNC_PREWRITE); + /* Rx buffers. */ + if (sc->bfe_rxmbuf_tag != NULL) { + for (i = 0; i < BFE_RX_LIST_CNT; i++) { + rd = &sc->bfe_rx_ring[i]; + if (rd->bfe_map != NULL) { + bus_dmamap_destroy(sc->bfe_rxmbuf_tag, + rd->bfe_map); + rd->bfe_map = NULL; + } + } + if (sc->bfe_rx_sparemap != NULL) { + bus_dmamap_destroy(sc->bfe_rxmbuf_tag, + sc->bfe_rx_sparemap); + sc->bfe_rx_sparemap = NULL; + } + bus_dma_tag_destroy(sc->bfe_rxmbuf_tag); + sc->bfe_rxmbuf_tag = NULL; + } - return (0); + if (sc->bfe_parent_tag != NULL) { + bus_dma_tag_destroy(sc->bfe_parent_tag); + sc->bfe_parent_tag = NULL; + } } static int @@ -338,7 +447,7 @@ bfe_attach(device_t dev) */ pci_enable_busmaster(dev); - rid = BFE_PCI_MEMLO; + rid = PCIR_BAR(0); sc->bfe_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->bfe_res == NULL) { @@ -347,10 +456,6 @@ bfe_attach(device_t dev) goto fail; } - sc->bfe_btag = rman_get_bustag(sc->bfe_res); - sc->bfe_bhandle = rman_get_bushandle(sc->bfe_res); - sc->bfe_vhandle = (vm_offset_t)rman_get_virtual(sc->bfe_res); - /* Allocate interrupt */ rid = 0; @@ -362,7 +467,7 @@ bfe_attach(device_t dev) goto fail; } - if (bfe_dma_alloc(dev)) { + if (bfe_dma_alloc(sc) != 0) { device_printf(dev, "failed to allocate DMA resources\n"); error = ENXIO; goto fail; @@ -420,8 +525,8 @@ bfe_attach(device_t dev) goto fail; } fail: - if (error) - bfe_release_resources(sc); + if (error != 0) + bfe_detach(dev); return (error); } @@ -433,8 +538,6 @@ bfe_detach(device_t dev) sc = device_get_softc(dev); - KASSERT(mtx_initialized(&sc->bfe_mtx), ("bfe mutex not initialized")); - ifp = sc->bfe_ifp; if (device_is_attached(dev)) { @@ -446,13 +549,16 @@ bfe_detach(device_t dev) ether_ifdetach(ifp); } + BFE_LOCK(sc); bfe_chip_reset(sc); + BFE_UNLOCK(sc); bus_generic_detach(dev); if (sc->bfe_miibus != NULL) device_delete_child(dev, sc->bfe_miibus); bfe_release_resources(sc); + bfe_dma_free(sc); mtx_destroy(&sc->bfe_mtx); return (0); @@ -585,14 +691,17 @@ bfe_tx_ring_free(struct bfe_softc *sc) for(i = 0; i < BFE_TX_LIST_CNT; i++) { if (sc->bfe_tx_ring[i].bfe_mbuf != NULL) { + bus_dmamap_sync(sc->bfe_txmbuf_tag, + sc->bfe_tx_ring[i].bfe_map, BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc->bfe_txmbuf_tag, + sc->bfe_tx_ring[i].bfe_map); m_freem(sc->bfe_tx_ring[i].bfe_mbuf); sc->bfe_tx_ring[i].bfe_mbuf = NULL; - bus_dmamap_unload(sc->bfe_tag, - sc->bfe_tx_ring[i].bfe_map); } } bzero(sc->bfe_tx_list, BFE_TX_LIST_SIZE); - bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, BUS_DMASYNC_PREWRITE); + bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static void @@ -602,85 +711,117 @@ bfe_rx_ring_free(struct bfe_softc *sc) for (i = 0; i < BFE_RX_LIST_CNT; i++) { if (sc->bfe_rx_ring[i].bfe_mbuf != NULL) { + bus_dmamap_sync(sc->bfe_rxmbuf_tag, + sc->bfe_rx_ring[i].bfe_map, BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc->bfe_rxmbuf_tag, + sc->bfe_rx_ring[i].bfe_map); m_freem(sc->bfe_rx_ring[i].bfe_mbuf); sc->bfe_rx_ring[i].bfe_mbuf = NULL; - bus_dmamap_unload(sc->bfe_tag, - sc->bfe_rx_ring[i].bfe_map); } } bzero(sc->bfe_rx_list, BFE_RX_LIST_SIZE); - bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREWRITE); + bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static int bfe_list_rx_init(struct bfe_softc *sc) { + struct bfe_rx_data *rd; int i; - for(i = 0; i < BFE_RX_LIST_CNT; i++) { - if (bfe_list_newbuf(sc, i, NULL) == ENOBUFS) + sc->bfe_rx_prod = sc->bfe_rx_cons = 0; + bzero(sc->bfe_rx_list, BFE_RX_LIST_SIZE); + for (i = 0; i < BFE_RX_LIST_CNT; i++) { + rd = &sc->bfe_rx_ring[i]; + rd->bfe_mbuf = NULL; + rd->bfe_ctrl = 0; + if (bfe_list_newbuf(sc, i) != 0) return (ENOBUFS); } - bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREWRITE); + bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, BFE_DMARX_PTR, (i * sizeof(struct bfe_desc))); - sc->bfe_rx_cons = 0; - return (0); } +static void +bfe_list_tx_init(struct bfe_softc *sc) +{ + int i; + + sc->bfe_tx_cnt = sc->bfe_tx_prod = sc->bfe_tx_cons = 0; + bzero(sc->bfe_tx_list, BFE_TX_LIST_SIZE); + for (i = 0; i < BFE_TX_LIST_CNT; i++) + sc->bfe_tx_ring[i].bfe_mbuf = NULL; + + bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); +} + +static void +bfe_discard_buf(struct bfe_softc *sc, int c) +{ + struct bfe_rx_data *r; + struct bfe_desc *d; + + r = &sc->bfe_rx_ring[c]; + d = &sc->bfe_rx_list[c]; + d->bfe_ctrl = htole32(r->bfe_ctrl); +} + static int -bfe_list_newbuf(struct bfe_softc *sc, int c, struct mbuf *m) +bfe_list_newbuf(struct bfe_softc *sc, int c) { struct bfe_rxheader *rx_header; struct bfe_desc *d; - struct bfe_data *r; + struct bfe_rx_data *r; + struct mbuf *m; + bus_dma_segment_t segs[1]; + bus_dmamap_t map; u_int32_t ctrl; - int allocated, error; + int nsegs; - if ((c < 0) || (c >= BFE_RX_LIST_CNT)) - return (EINVAL); + m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); + m->m_len = m->m_pkthdr.len = MCLBYTES; - allocated = 0; - if (m == NULL) { - m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); - if (m == NULL) - return (ENOBUFS); - m->m_len = m->m_pkthdr.len = MCLBYTES; - allocated++; + if (bus_dmamap_load_mbuf_sg(sc->bfe_rxmbuf_tag, sc->bfe_rx_sparemap, + m, segs, &nsegs, 0) != 0) { + m_freem(m); + return (ENOBUFS); } - else - m->m_data = m->m_ext.ext_buf; - rx_header = mtod(m, struct bfe_rxheader *); - rx_header->len = 0; - rx_header->flags = 0; - - /* Map the mbuf into DMA */ - sc->bfe_rx_cnt = c; - d = &sc->bfe_rx_list[c]; + KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); r = &sc->bfe_rx_ring[c]; - error = bus_dmamap_load(sc->bfe_tag, r->bfe_map, mtod(m, void *), - MCLBYTES, bfe_dma_map_desc, d, BUS_DMA_NOWAIT); - if (error != 0) { - if (allocated != 0) - m_free(m); - if (error != ENOMEM) - device_printf(sc->bfe_dev, - "failed to map RX buffer, error %d\n", error); - return (ENOBUFS); + if (r->bfe_mbuf != NULL) { + bus_dmamap_sync(sc->bfe_rxmbuf_tag, r->bfe_map, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc->bfe_rxmbuf_tag, r->bfe_map); } - bus_dmamap_sync(sc->bfe_tag, r->bfe_map, BUS_DMASYNC_PREWRITE); - - ctrl = ETHER_MAX_LEN + 32; + map = r->bfe_map; + r->bfe_map = sc->bfe_rx_sparemap; + sc->bfe_rx_sparemap = map; + r->bfe_mbuf = m; + rx_header = mtod(m, struct bfe_rxheader *); + rx_header->len = 0; + rx_header->flags = 0; + bus_dmamap_sync(sc->bfe_rxmbuf_tag, r->bfe_map, BUS_DMASYNC_PREREAD); + + ctrl = segs[0].ds_len & BFE_DESC_LEN; + KASSERT(ctrl > ETHER_MAX_LEN + 32, ("%s: buffer size too small(%d)!", + __func__, ctrl)); if (c == BFE_RX_LIST_CNT - 1) ctrl |= BFE_DESC_EOT; + r->bfe_ctrl = ctrl; + + d = &sc->bfe_rx_list[c]; + d->bfe_ctrl = htole32(ctrl); + /* The chip needs all addresses to be added to BFE_PCI_DMA. */ + d->bfe_addr = htole32(BFE_ADDR_LO(segs[0].ds_addr) + BFE_PCI_DMA); - d->bfe_ctrl = ctrl; - r->bfe_mbuf = m; - bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREWRITE); return (0); } @@ -789,12 +930,10 @@ bfe_chip_reset(struct bfe_softc *sc) CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE); bfe_wait_bit(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE, 100, 1); CSR_WRITE_4(sc, BFE_DMATX_CTRL, 0); - sc->bfe_tx_cnt = sc->bfe_tx_prod = sc->bfe_tx_cons = 0; if (CSR_READ_4(sc, BFE_DMARX_STAT) & BFE_STAT_EMASK) bfe_wait_bit(sc, BFE_DMARX_STAT, BFE_STAT_SIDLE, 100, 0); CSR_WRITE_4(sc, BFE_DMARX_CTRL, 0); - sc->bfe_rx_prod = sc->bfe_rx_cons = 0; } bfe_core_reset(sc); @@ -972,81 +1111,33 @@ bfe_set_rx_mode(struct bfe_softc *sc) static void bfe_dma_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) { - u_int32_t *ptr; + struct bfe_dmamap_arg *ctx; - ptr = arg; - *ptr = segs->ds_addr; -} + if (error != 0) + return; -static void -bfe_dma_map_desc(void *arg, bus_dma_segment_t *segs, int nseg, int error) -{ - struct bfe_desc *d; + KASSERT(nseg == 1, ("%s : %d segments returned!", __func__, nseg)); - d = arg; - /* The chip needs all addresses to be added to BFE_PCI_DMA */ - d->bfe_addr = segs->ds_addr + BFE_PCI_DMA; + ctx = (struct bfe_dmamap_arg *)arg; + ctx->bfe_busaddr = segs[0].ds_addr; } static void bfe_release_resources(struct bfe_softc *sc) { - device_t dev; - int i; - - dev = sc->bfe_dev; - - if (sc->bfe_vpd_prodname != NULL) - free(sc->bfe_vpd_prodname, M_DEVBUF); - - if (sc->bfe_vpd_readonly != NULL) - free(sc->bfe_vpd_readonly, M_DEVBUF); if (sc->bfe_intrhand != NULL) - bus_teardown_intr(dev, sc->bfe_irq, sc->bfe_intrhand); + bus_teardown_intr(sc->bfe_dev, sc->bfe_irq, sc->bfe_intrhand); if (sc->bfe_irq != NULL) - bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bfe_irq); + bus_release_resource(sc->bfe_dev, SYS_RES_IRQ, 0, sc->bfe_irq); if (sc->bfe_res != NULL) - bus_release_resource(dev, SYS_RES_MEMORY, 0x10, sc->bfe_res); + bus_release_resource(sc->bfe_dev, SYS_RES_MEMORY, PCIR_BAR(0), + sc->bfe_res); if (sc->bfe_ifp != NULL) if_free(sc->bfe_ifp); - - if (sc->bfe_tx_tag != NULL) { - bus_dmamap_unload(sc->bfe_tx_tag, sc->bfe_tx_map); - bus_dmamem_free(sc->bfe_tx_tag, sc->bfe_tx_list, - sc->bfe_tx_map); - bus_dma_tag_destroy(sc->bfe_tx_tag); - sc->bfe_tx_tag = NULL; - } - - if (sc->bfe_rx_tag != NULL) { - bus_dmamap_unload(sc->bfe_rx_tag, sc->bfe_rx_map); - bus_dmamem_free(sc->bfe_rx_tag, sc->bfe_rx_list, - sc->bfe_rx_map); - bus_dma_tag_destroy(sc->bfe_rx_tag); - sc->bfe_rx_tag = NULL; - } - - if (sc->bfe_tag != NULL) { - for(i = 0; i < BFE_TX_LIST_CNT; i++) { - bus_dmamap_destroy(sc->bfe_tag, - sc->bfe_tx_ring[i].bfe_map); - } - for(i = 0; i < BFE_RX_LIST_CNT; i++) { - bus_dmamap_destroy(sc->bfe_tag, - sc->bfe_rx_ring[i].bfe_map); - } - bus_dma_tag_destroy(sc->bfe_tag); - sc->bfe_tag = NULL; - } - - if (sc->bfe_parent_tag != NULL) - bus_dma_tag_destroy(sc->bfe_parent_tag); - - return; } static void @@ -1158,6 +1249,7 @@ bfe_stats_update(struct bfe_softc *sc) static void bfe_txeof(struct bfe_softc *sc) { + struct bfe_tx_data *r; struct ifnet *ifp; int i, chipidx; @@ -1169,17 +1261,23 @@ bfe_txeof(struct bfe_softc *sc) chipidx /= sizeof(struct bfe_desc); i = sc->bfe_tx_cons; + if (i == chipidx) + return; + bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* Go through the mbufs and free those that have been transmitted */ - while (i != chipidx) { - struct bfe_data *r = &sc->bfe_tx_ring[i]; - if (r->bfe_mbuf != NULL) { - ifp->if_opackets++; - m_freem(r->bfe_mbuf); - r->bfe_mbuf = NULL; - } - bus_dmamap_unload(sc->bfe_tag, r->bfe_map); + for (; i != chipidx; BFE_INC(i, BFE_TX_LIST_CNT)) { + r = &sc->bfe_tx_ring[i]; sc->bfe_tx_cnt--; - BFE_INC(i, BFE_TX_LIST_CNT); + if (r->bfe_mbuf == NULL) + continue; + bus_dmamap_sync(sc->bfe_txmbuf_tag, r->bfe_map, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc->bfe_txmbuf_tag, r->bfe_map); + + ifp->if_opackets++; + m_freem(r->bfe_mbuf); + r->bfe_mbuf = NULL; } if (i != sc->bfe_tx_cons) { @@ -1199,8 +1297,8 @@ bfe_rxeof(struct bfe_softc *sc) struct mbuf *m; struct ifnet *ifp; struct bfe_rxheader *rxheader; - struct bfe_data *r; - int cons; + struct bfe_rx_data *r; + int cons, prog; u_int32_t status, current, len, flags; BFE_LOCK_ASSERT(sc); @@ -1210,17 +1308,29 @@ bfe_rxeof(struct bfe_softc *sc) ifp = sc->bfe_ifp; - while (current != cons) { + bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + + for (prog = 0; current != cons; prog++, + BFE_INC(cons, BFE_RX_LIST_CNT)) { r = &sc->bfe_rx_ring[cons]; m = r->bfe_mbuf; + /* + * Rx status should be read from mbuf such that we can't + * delay bus_dmamap_sync(9). This hardware limiation + * results in inefficent mbuf usage as bfe(4) couldn't + * reuse mapped buffer from errored frame. + */ + if (bfe_list_newbuf(sc, cons) != 0) { + ifp->if_iqdrops++; + bfe_discard_buf(sc, cons); + continue; + } rxheader = mtod(m, struct bfe_rxheader*); - bus_dmamap_sync(sc->bfe_tag, r->bfe_map, BUS_DMASYNC_POSTREAD); - len = rxheader->len; - r->bfe_mbuf = NULL; - - bus_dmamap_unload(sc->bfe_tag, r->bfe_map); - flags = rxheader->flags; + len = le16toh(rxheader->len); + flags = le16toh(rxheader->flags); + /* Remove CRC bytes. */ len -= ETHER_CRC_LEN; /* flag an error and try again */ @@ -1228,31 +1338,26 @@ bfe_rxeof(struct bfe_softc *sc) ifp->if_ierrors++; if (flags & BFE_RX_FLAG_SERR) ifp->if_collisions++; - bfe_list_newbuf(sc, cons, m); - BFE_INC(cons, BFE_RX_LIST_CNT); + m_freem(m); continue; } - /* Go past the rx header */ - if (bfe_list_newbuf(sc, cons, NULL) == 0) { - m_adj(m, BFE_RX_OFFSET); - m->m_len = m->m_pkthdr.len = len; - } else { - bfe_list_newbuf(sc, cons, m); - ifp->if_ierrors++; - BFE_INC(cons, BFE_RX_LIST_CNT); - continue; - } + /* Make sure to skip header bytes written by hardware. */ + m_adj(m, BFE_RX_OFFSET); + m->m_len = m->m_pkthdr.len = len; ifp->if_ipackets++; m->m_pkthdr.rcvif = ifp; BFE_UNLOCK(sc); (*ifp->if_input)(ifp, m); BFE_LOCK(sc); + } - BFE_INC(cons, BFE_RX_LIST_CNT); + if (prog > 0) { + sc->bfe_rx_cons = cons; + bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } - sc->bfe_rx_cons = cons; } static void @@ -1330,87 +1435,89 @@ bfe_intr(void *xsc) } static int -bfe_encap(struct bfe_softc *sc, struct mbuf **m_head, u_int32_t *txidx) +bfe_encap(struct bfe_softc *sc, struct mbuf **m_head) { - struct bfe_desc *d = NULL; - struct bfe_data *r = NULL; - struct mbuf *m; - u_int32_t frag, cur, cnt = 0; - int chainlen = 0; - int error; - - if (BFE_TX_LIST_CNT - sc->bfe_tx_cnt < 2) - return (ENOBUFS); - - /* - * Count the number of frags in this chain to see if - * we need to m_defrag. Since the descriptor list is shared - * by all packets, we'll m_defrag long chains so that they - * do not use up the entire list, even if they would fit. - */ - for(m = *m_head; m != NULL; m = m->m_next) - chainlen++; + struct bfe_desc *d; + struct bfe_tx_data *r, *r1; + struct mbuf *m; + bus_dmamap_t map; + bus_dma_segment_t txsegs[BFE_MAXTXSEGS]; + uint32_t cur, si; + int error, i, nsegs; + BFE_LOCK_ASSERT(sc); - if ((chainlen > BFE_TX_LIST_CNT / 4) || - ((BFE_TX_LIST_CNT - (chainlen + sc->bfe_tx_cnt)) < 2)) { - m = m_defrag(*m_head, M_DONTWAIT); - if (m == NULL) - return (ENOBUFS); + M_ASSERTPKTHDR((*m_head)); + + si = cur = sc->bfe_tx_prod; + r = &sc->bfe_tx_ring[cur]; + error = bus_dmamap_load_mbuf_sg(sc->bfe_txmbuf_tag, r->bfe_map, *m_head, + txsegs, &nsegs, 0); + if (error == EFBIG) { + m = m_collapse(*m_head, M_DONTWAIT, BFE_MAXTXSEGS); + if (m == NULL) { + m_freem(*m_head); + *m_head = NULL; + return (ENOMEM); + } *m_head = m; - } - - /* - * Start packing the mbufs in this chain into - * the fragment pointers. Stop when we run out - * of fragments or hit the end of the mbuf chain. - */ - cur = frag = *txidx; - cnt = 0; - - for(m = *m_head; m != NULL; m = m->m_next) { - if (m->m_len != 0) { - if ((BFE_TX_LIST_CNT - (sc->bfe_tx_cnt + cnt)) < 2) - return (ENOBUFS); - - d = &sc->bfe_tx_list[cur]; - r = &sc->bfe_tx_ring[cur]; - d->bfe_ctrl = BFE_DESC_LEN & m->m_len; - /* always intterupt on completion */ - d->bfe_ctrl |= BFE_DESC_IOC; - if (cnt == 0) - /* Set start of frame */ - d->bfe_ctrl |= BFE_DESC_SOF; - if (cur == BFE_TX_LIST_CNT - 1) - /* - * Tell the chip to wrap to the start of - * the descriptor list - */ - d->bfe_ctrl |= BFE_DESC_EOT; - - error = bus_dmamap_load(sc->bfe_tag, - r->bfe_map, mtod(m, void*), m->m_len, - bfe_dma_map_desc, d, BUS_DMA_NOWAIT); - if (error) - return (ENOBUFS); - bus_dmamap_sync(sc->bfe_tag, r->bfe_map, - BUS_DMASYNC_PREWRITE); - - frag = cur; - BFE_INC(cur, BFE_TX_LIST_CNT); - cnt++; + error = bus_dmamap_load_mbuf_sg(sc->bfe_txmbuf_tag, r->bfe_map, + *m_head, txsegs, &nsegs, 0); + if (error != 0) { + m_freem(*m_head); + *m_head = NULL; + return (error); } + } else if (error != 0) + return (error); + if (nsegs == 0) { + m_freem(*m_head); + *m_head = NULL; + return (EIO); } - if (m != NULL) + if (sc->bfe_tx_cnt + nsegs > BFE_TX_LIST_CNT - 1) { + bus_dmamap_unload(sc->bfe_txmbuf_tag, r->bfe_map); return (ENOBUFS); + } + + for (i = 0; i < nsegs; i++) { + d = &sc->bfe_tx_list[cur]; + d->bfe_ctrl = htole32(txsegs[i].ds_len & BFE_DESC_LEN); + d->bfe_ctrl |= htole32(BFE_DESC_IOC); + if (cur == BFE_TX_LIST_CNT - 1) + /* + * Tell the chip to wrap to the start of + * the descriptor list. + */ + d->bfe_ctrl |= htole32(BFE_DESC_EOT); + /* The chip needs all addresses to be added to BFE_PCI_DMA. */ + d->bfe_addr = htole32(BFE_ADDR_LO(txsegs[i].ds_addr) + + BFE_PCI_DMA); + BFE_INC(cur, BFE_TX_LIST_CNT); + } + + /* Update producer index. */ + sc->bfe_tx_prod = cur; + + /* Set EOF on the last descriptor. */ + cur = (cur + BFE_TX_LIST_CNT - 1) % BFE_TX_LIST_CNT; + d = &sc->bfe_tx_list[cur]; + d->bfe_ctrl |= htole32(BFE_DESC_EOF); - sc->bfe_tx_list[frag].bfe_ctrl |= BFE_DESC_EOF; - sc->bfe_tx_ring[frag].bfe_mbuf = *m_head; - bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, BUS_DMASYNC_PREWRITE); + /* Lastly set SOF on the first descriptor to avoid races. */ + d = &sc->bfe_tx_list[si]; + d->bfe_ctrl |= htole32(BFE_DESC_SOF); + + r1 = &sc->bfe_tx_ring[cur]; + map = r->bfe_map; + r->bfe_map = r1->bfe_map; + r1->bfe_map = map; + r1->bfe_mbuf = *m_head; + sc->bfe_tx_cnt += nsegs; + + bus_dmamap_sync(sc->bfe_txmbuf_tag, map, BUS_DMASYNC_PREWRITE); - *txidx = cur; - sc->bfe_tx_cnt += cnt; return (0); } @@ -1432,11 +1539,10 @@ static void bfe_start_locked(struct ifnet *ifp) { struct bfe_softc *sc; - struct mbuf *m_head = NULL; - int idx, queued = 0; + struct mbuf *m_head; + int queued; sc = ifp->if_softc; - idx = sc->bfe_tx_prod; BFE_LOCK_ASSERT(sc); @@ -1444,14 +1550,12 @@ bfe_start_locked(struct ifnet *ifp) * Not much point trying to send if the link is down * or we have nothing to send. */ - if (!sc->bfe_link && ifp->if_snd.ifq_len < 10) - return; - if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != - IFF_DRV_RUNNING) + IFF_DRV_RUNNING || sc->bfe_link == 0) return; - while (sc->bfe_tx_ring[idx].bfe_mbuf == NULL) { + for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && + sc->bfe_tx_cnt < BFE_TX_LIST_CNT - 1;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; @@ -1460,7 +1564,9 @@ bfe_start_locked(struct ifnet *ifp) * Pack the data into the tx ring. If we dont have * enough room, let the chip drain the ring. */ - if (bfe_encap(sc, &m_head, &idx)) { + if (bfe_encap(sc, &m_head)) { + if (m_head == NULL) + break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; @@ -1476,10 +1582,24 @@ bfe_start_locked(struct ifnet *ifp) } if (queued) { - sc->bfe_tx_prod = idx; + bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Transmit - twice due to apparent hardware bug */ - CSR_WRITE_4(sc, BFE_DMATX_PTR, idx * sizeof(struct bfe_desc)); - CSR_WRITE_4(sc, BFE_DMATX_PTR, idx * sizeof(struct bfe_desc)); + CSR_WRITE_4(sc, BFE_DMATX_PTR, + sc->bfe_tx_prod * sizeof(struct bfe_desc)); + /* + * XXX It seems the following write is not necessary + * to kick Tx command. What might be required would be + * a way flushing PCI posted write. Reading the register + * back ensures the flush operation. In addition, + * hardware will execute PCI posted write in the long + * run and watchdog timer for the kick command was set + * to 5 seconds. Therefore I think the second write + * access is not necessary or could be replaced with + * read operation. + */ + CSR_WRITE_4(sc, BFE_DMATX_PTR, + sc->bfe_tx_prod * sizeof(struct bfe_desc)); /* * Set a timeout in case the chip goes out to lunch. @@ -1519,6 +1639,7 @@ bfe_init_locked(void *xsc) bfe_stop(sc); return; } + bfe_list_tx_init(sc); bfe_set_rx_mode(sc); diff --git a/sys/dev/bfe/if_bfereg.h b/sys/dev/bfe/if_bfereg.h index 65be5922d7fc..7ece5dc1070c 100644 --- a/sys/dev/bfe/if_bfereg.h +++ b/sys/dev/bfe/if_bfereg.h @@ -428,9 +428,6 @@ #define PCI_CLRBIT(dev, reg, x, s) \ pci_write_config(dev, reg, (pci_read_config(dev, reg, s) & ~(x)), s) -#define BFE_RX_RING_SIZE 512 -#define BFE_TX_RING_SIZE 512 -#define BFE_LINK_DOWN 5 #define BFE_TX_LIST_CNT 128 #define BFE_RX_LIST_CNT 128 #define BFE_TX_LIST_SIZE BFE_TX_LIST_CNT * sizeof(struct bfe_desc) @@ -438,11 +435,15 @@ #define BFE_RX_OFFSET 30 #define BFE_TX_QLEN 256 -#define CSR_READ_4(sc, reg) \ - bus_space_read_4(sc->bfe_btag, sc->bfe_bhandle, reg) +#define BFE_RX_RING_ALIGN 4096 +#define BFE_TX_RING_ALIGN 4096 +#define BFE_MAXTXSEGS 16 +#define BFE_DMA_MAXADDR 0x3FFFFFFF /* 1GB DMA address limit. */ +#define BFE_ADDR_LO(x) ((uint64_t)(x) & 0xFFFFFFFF) -#define CSR_WRITE_4(sc, reg, val) \ - bus_space_write_4(sc->bfe_btag, sc->bfe_bhandle, reg, val) +#define CSR_READ_4(sc, reg) bus_read_4(sc->bfe_res, reg) + +#define CSR_WRITE_4(sc, reg, val) bus_write_4(sc->bfe_res, reg, val) #define BFE_OR(sc, name, val) \ CSR_WRITE_4(sc, name, CSR_READ_4(sc, name) | val) @@ -456,9 +457,15 @@ #define BFE_INC(x, y) (x) = ((x) == ((y)-1)) ? 0 : (x)+1 -struct bfe_data { +struct bfe_tx_data { + struct mbuf *bfe_mbuf; + bus_dmamap_t bfe_map; +}; + +struct bfe_rx_data { struct mbuf *bfe_mbuf; bus_dmamap_t bfe_map; + u_int32_t bfe_ctrl; }; struct bfe_desc { @@ -498,38 +505,34 @@ struct bfe_softc struct ifnet *bfe_ifp; /* interface info */ device_t bfe_dev; device_t bfe_miibus; - bus_space_handle_t bfe_bhandle; - vm_offset_t bfe_vhandle; - bus_space_tag_t bfe_btag; bus_dma_tag_t bfe_tag; bus_dma_tag_t bfe_parent_tag; bus_dma_tag_t bfe_tx_tag, bfe_rx_tag; bus_dmamap_t bfe_tx_map, bfe_rx_map; + bus_dma_tag_t bfe_txmbuf_tag, bfe_rxmbuf_tag; + bus_dmamap_t bfe_rx_sparemap; void *bfe_intrhand; struct resource *bfe_irq; struct resource *bfe_res; struct callout bfe_stat_co; struct bfe_hw_stats bfe_hwstats; struct bfe_desc *bfe_tx_list, *bfe_rx_list; - struct bfe_data bfe_tx_ring[BFE_TX_LIST_CNT]; /* XXX */ - struct bfe_data bfe_rx_ring[BFE_RX_LIST_CNT]; /* XXX */ + struct bfe_tx_data bfe_tx_ring[BFE_TX_LIST_CNT]; /* XXX */ + struct bfe_rx_data bfe_rx_ring[BFE_RX_LIST_CNT]; /* XXX */ struct mtx bfe_mtx; u_int32_t bfe_flags; u_int32_t bfe_imask; u_int32_t bfe_dma_offset; u_int32_t bfe_tx_cnt, bfe_tx_cons, bfe_tx_prod; - u_int32_t bfe_rx_cnt, bfe_rx_prod, bfe_rx_cons; + u_int32_t bfe_rx_prod, bfe_rx_cons; u_int32_t bfe_tx_dma, bfe_rx_dma; u_int32_t bfe_link; int bfe_watchdog_timer; u_int8_t bfe_phyaddr; /* Address of the card's PHY */ u_int8_t bfe_mdc_port; u_int8_t bfe_core_unit; - u_int8_t bfe_up; u_char bfe_enaddr[6]; int bfe_if_flags; - char *bfe_vpd_prodname; - char *bfe_vpd_readonly; }; struct bfe_type |