dect
/
linux-2.6
Archived
13
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (32 commits)
  wimax: fix oops in wimax_dev_get_by_genl_info() when looking up non-wimax iface
  net: 4 bytes kernel memory disclosure in SO_BSDCOMPAT gsopt try #2
  netxen: fix compile waring "label ‘set_32_bit_mask’ defined but not used" on IA64 platform
  bnx2: Update version to 1.9.2 and copyright.
  bnx2: Fix jumbo frames error handling.
  bnx2: Update 5709 firmware.
  bnx2: Update 5706/5708 firmware.
  3c505: do not set pcb->data.raw beyond its size
  Documentation/connector/cn_test.c: don't use gfp_any()
  net: don't use in_atomic() in gfp_any()
  IRDA: cnt is off by 1
  netxen: remove pcie workaround
  sun3: print when lance_open() fails
  qlge: bugfix: Add missing rx buf clean index on early exit.
  qlge: bugfix: Fix RX scaling values.
  qlge: bugfix: Fix TSO breakage.
  qlge: bugfix: Add missing dev_kfree_skb_any() call.
  qlge: bugfix: Add missing put_page() call.
  qlge: bugfix: Fix fatal error recovery hang.
  qlge: bugfix: Use netif_receive_skb() and vlan_hwaccel_receive_skb().
  ...
This commit is contained in:
Linus Torvalds 2009-02-12 17:47:15 -08:00
commit 37bed90094
26 changed files with 8767 additions and 8901 deletions

View File

@ -137,7 +137,7 @@ static void cn_test_timer_func(unsigned long __data)
memcpy(m + 1, data, m->len);
cn_netlink_send(m, 0, gfp_any());
cn_netlink_send(m, 0, GFP_ATOMIC);
kfree(m);
}
@ -160,10 +160,8 @@ static int cn_test_init(void)
goto err_out;
}
init_timer(&cn_test_timer);
cn_test_timer.function = cn_test_timer_func;
setup_timer(&cn_test_timer, cn_test_timer_func, 0);
cn_test_timer.expires = jiffies + HZ;
cn_test_timer.data = 0;
add_timer(&cn_test_timer);
return 0;

View File

@ -2519,8 +2519,8 @@ fore200e_load_and_start_fw(struct fore200e* fore200e)
return err;
sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT);
if (request_firmware(&firmware, buf, device) == 1) {
printk(FORE200E "missing %s firmware image\n", fore200e->bus->model_name);
if ((err = request_firmware(&firmware, buf, device)) < 0) {
printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name);
return err;
}

View File

@ -493,21 +493,27 @@ static bool receive_pcb(struct net_device *dev, pcb_struct * pcb)
}
/* read the data */
spin_lock_irqsave(&adapter->lock, flags);
i = 0;
do {
j = 0;
while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && j++ < 20000);
pcb->data.raw[i++] = inb_command(dev->base_addr);
if (i > MAX_PCB_DATA)
INVALID_PCB_MSG(i);
} while ((stat & ASF_PCB_MASK) != ASF_PCB_END && j < 20000);
for (i = 0; i < MAX_PCB_DATA; i++) {
for (j = 0; j < 20000; j++) {
stat = get_status(dev->base_addr);
if (stat & ACRF)
break;
}
pcb->data.raw[i] = inb_command(dev->base_addr);
if ((stat & ASF_PCB_MASK) == ASF_PCB_END || j >= 20000)
break;
}
spin_unlock_irqrestore(&adapter->lock, flags);
if (i >= MAX_PCB_DATA) {
INVALID_PCB_MSG(i);
return false;
}
if (j >= 20000) {
TIMEOUT_MSG(__LINE__);
return false;
}
/* woops, the last "data" byte was really the length! */
total_length = pcb->data.raw[--i];
/* the last "data" byte was really the length! */
total_length = pcb->data.raw[i];
/* safety check total length vs data length */
if (total_length != (pcb->length + 2)) {

View File

@ -1,6 +1,6 @@
/* bnx2.c: Broadcom NX2 network driver.
*
* Copyright (c) 2004-2008 Broadcom Corporation
* Copyright (c) 2004-2009 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -57,8 +57,8 @@
#define DRV_MODULE_NAME "bnx2"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.9.0"
#define DRV_MODULE_RELDATE "Dec 16, 2008"
#define DRV_MODULE_VERSION "1.9.2"
#define DRV_MODULE_RELDATE "Feb 11, 2009"
#define RUN_AT(x) (jiffies + (x))
@ -2910,18 +2910,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
rx_hdr = (struct l2_fhdr *) skb->data;
len = rx_hdr->l2_fhdr_pkt_len;
status = rx_hdr->l2_fhdr_status;
if ((status = rx_hdr->l2_fhdr_status) &
(L2_FHDR_ERRORS_BAD_CRC |
L2_FHDR_ERRORS_PHY_DECODE |
L2_FHDR_ERRORS_ALIGNMENT |
L2_FHDR_ERRORS_TOO_SHORT |
L2_FHDR_ERRORS_GIANT_FRAME)) {
bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
sw_ring_prod);
goto next_rx;
}
hdr_len = 0;
if (status & L2_FHDR_STATUS_SPLIT) {
hdr_len = rx_hdr->l2_fhdr_ip_xsum;
@ -2931,6 +2921,24 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
pg_ring_used = 1;
}
if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
L2_FHDR_ERRORS_PHY_DECODE |
L2_FHDR_ERRORS_ALIGNMENT |
L2_FHDR_ERRORS_TOO_SHORT |
L2_FHDR_ERRORS_GIANT_FRAME))) {
bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
sw_ring_prod);
if (pg_ring_used) {
int pages;
pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
}
goto next_rx;
}
len -= 4;
if (len <= bp->rx_copy_thresh) {

View File

@ -1,6 +1,6 @@
/* bnx2.h: Broadcom NX2 network driver.
*
* Copyright (c) 2004-2007 Broadcom Corporation
* Copyright (c) 2004-2009 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -585,7 +585,7 @@ static int mcs_speed_change(struct mcs_cb *mcs)
mcs_get_reg(mcs, MCS_RESV_REG, &rval);
} while(cnt++ < 100 && (rval & MCS_IRINTX));
if(cnt >= 100) {
if (cnt > 100) {
IRDA_ERROR("unable to change speed\n");
ret = -EIO;
goto error;

View File

@ -201,9 +201,9 @@ static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id)
adapter->pci_using_dac = 1;
return 0;
}
set_32_bit_mask:
#endif /* CONFIG_IA64 */
set_32_bit_mask:
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (!err)
err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
@ -372,67 +372,6 @@ static void netxen_set_port_mode(struct netxen_adapter *adapter)
}
}
#define PCI_CAP_ID_GEN 0x10
static void netxen_pcie_strap_init(struct netxen_adapter *adapter)
{
u32 pdevfuncsave;
u32 c8c9value = 0;
u32 chicken = 0;
u32 control = 0;
int i, pos;
struct pci_dev *pdev;
pdev = adapter->pdev;
adapter->hw_read_wx(adapter,
NETXEN_PCIE_REG(PCIE_CHICKEN3), &chicken, 4);
/* clear chicken3.25:24 */
chicken &= 0xFCFFFFFF;
/*
* if gen1 and B0, set F1020 - if gen 2, do nothing
* if gen2 set to F1000
*/
pos = pci_find_capability(pdev, PCI_CAP_ID_GEN);
if (pos == 0xC0) {
pci_read_config_dword(pdev, pos + 0x10, &control);
if ((control & 0x000F0000) != 0x00020000) {
/* set chicken3.24 if gen1 */
chicken |= 0x01000000;
}
printk(KERN_INFO "%s Gen2 strapping detected\n",
netxen_nic_driver_name);
c8c9value = 0xF1000;
} else {
/* set chicken3.24 if gen1 */
chicken |= 0x01000000;
printk(KERN_INFO "%s Gen1 strapping detected\n",
netxen_nic_driver_name);
if (adapter->ahw.revision_id == NX_P3_B0)
c8c9value = 0xF1020;
else
c8c9value = 0;
}
adapter->hw_write_wx(adapter,
NETXEN_PCIE_REG(PCIE_CHICKEN3), &chicken, 4);
if (!c8c9value)
return;
pdevfuncsave = pdev->devfn;
if (pdevfuncsave & 0x07)
return;
for (i = 0; i < 8; i++) {
pci_read_config_dword(pdev, pos + 8, &control);
pci_read_config_dword(pdev, pos + 8, &control);
pci_write_config_dword(pdev, pos + 8, c8c9value);
pdev->devfn++;
}
pdev->devfn = pdevfuncsave;
}
static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
{
u32 control;
@ -812,9 +751,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
netxen_load_firmware(adapter);
if (NX_IS_REVISION_P3(revision_id))
netxen_pcie_strap_init(adapter);
if (NX_IS_REVISION_P2(revision_id)) {
/* Initialize multicast addr pool owners */

View File

@ -125,6 +125,8 @@ static int __devinit mdio_gpio_bus_init(struct device *dev,
if (gpio_request(bitbang->mdio, "mdio"))
goto out_free_mdc;
gpio_direction_output(bitbang->mdc, 0);
dev_set_drvdata(dev, new_bus);
ret = mdiobus_register(new_bus);

View File

@ -898,6 +898,7 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
lbq_desc->index);
lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
if (lbq_desc->p.lbq_page == NULL) {
rx_ring->lbq_clean_idx = clean_idx;
QPRINTK(qdev, RX_STATUS, ERR,
"Couldn't get a page.\n");
return;
@ -907,6 +908,9 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(qdev->pdev, map)) {
rx_ring->lbq_clean_idx = clean_idx;
put_page(lbq_desc->p.lbq_page);
lbq_desc->p.lbq_page = NULL;
QPRINTK(qdev, RX_STATUS, ERR,
"PCI mapping failed.\n");
return;
@ -968,6 +972,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
if (pci_dma_mapping_error(qdev->pdev, map)) {
QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
rx_ring->sbq_clean_idx = clean_idx;
dev_kfree_skb_any(sbq_desc->p.skb);
sbq_desc->p.skb = NULL;
return;
}
pci_unmap_addr_set(sbq_desc, mapaddr, map);
@ -1449,12 +1455,12 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
QPRINTK(qdev, RX_STATUS, DEBUG,
"Passing a VLAN packet upstream.\n");
vlan_hwaccel_rx(skb, qdev->vlgrp,
vlan_hwaccel_receive_skb(skb, qdev->vlgrp,
le16_to_cpu(ib_mac_rsp->vlan_id));
} else {
QPRINTK(qdev, RX_STATUS, DEBUG,
"Passing a normal packet upstream.\n");
netif_rx(skb);
netif_receive_skb(skb);
}
}
@ -1511,6 +1517,11 @@ void ql_queue_asic_error(struct ql_adapter *qdev)
netif_stop_queue(qdev->ndev);
netif_carrier_off(qdev->ndev);
ql_disable_interrupts(qdev);
/* Clear adapter up bit to signal the recovery
* process that it shouldn't kill the reset worker
* thread
*/
clear_bit(QL_ADAPTER_UP, &qdev->flags);
queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
}
@ -1927,10 +1938,6 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
mac_iocb_ptr = tx_ring_desc->queue_entry;
memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) {
QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n");
return NETDEV_TX_BUSY;
}
mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
mac_iocb_ptr->tid = tx_ring_desc->index;
@ -1956,6 +1963,12 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
ql_hw_csum_setup(skb,
(struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
}
if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
NETDEV_TX_OK) {
QPRINTK(qdev, TX_QUEUED, ERR,
"Could not map the segments.\n");
return NETDEV_TX_BUSY;
}
QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
tx_ring->prod_idx++;
if (tx_ring->prod_idx == tx_ring->wq_len)
@ -2873,8 +2886,8 @@ static int ql_start_rss(struct ql_adapter *qdev)
/*
* Fill out the Indirection Table.
*/
for (i = 0; i < 32; i++)
hash_id[i] = i & 1;
for (i = 0; i < 256; i++)
hash_id[i] = i & (qdev->rss_ring_count - 1);
/*
* Random values for the IPv6 and IPv4 Hash Keys.
@ -3100,7 +3113,11 @@ static int ql_adapter_down(struct ql_adapter *qdev)
netif_stop_queue(ndev);
netif_carrier_off(ndev);
cancel_delayed_work_sync(&qdev->asic_reset_work);
/* Don't kill the reset worker thread if we
* are in the process of recovery.
*/
if (test_bit(QL_ADAPTER_UP, &qdev->flags))
cancel_delayed_work_sync(&qdev->asic_reset_work);
cancel_delayed_work_sync(&qdev->mpi_reset_work);
cancel_delayed_work_sync(&qdev->mpi_work);
@ -3501,7 +3518,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
static void qlge_tx_timeout(struct net_device *ndev)
{
struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
ql_queue_asic_error(qdev);
}
static void ql_asic_reset_work(struct work_struct *work)

View File

@ -428,7 +428,7 @@ static int lance_open( struct net_device *dev )
while (--i > 0)
if (DREG & CSR0_IDON)
break;
if (i < 0 || (DREG & CSR0_ERR)) {
if (i <= 0 || (DREG & CSR0_ERR)) {
DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
dev->name, i, DREG ));
DREG = CSR0_STOP;

View File

@ -2543,25 +2543,36 @@ static struct quattro * __devinit quattro_sbus_find(struct of_device *child)
}
/* After all quattro cards have been probed, we call these functions
* to register the IRQ handlers.
* to register the IRQ handlers for the cards that have been
* successfully probed and skip the cards that failed to initialize
*/
static void __init quattro_sbus_register_irqs(void)
static int __init quattro_sbus_register_irqs(void)
{
struct quattro *qp;
for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
struct of_device *op = qp->quattro_dev;
int err;
int err, qfe_slot, skip = 0;
for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
if (!qp->happy_meals[qfe_slot])
skip = 1;
}
if (skip)
continue;
err = request_irq(op->irqs[0],
quattro_sbus_interrupt,
IRQF_SHARED, "Quattro",
qp);
if (err != 0) {
printk(KERN_ERR "Quattro: Fatal IRQ registery error %d.\n", err);
panic("QFE request irq");
printk(KERN_ERR "Quattro HME: IRQ registration "
"error %d.\n", err);
return err;
}
}
return 0;
}
static void quattro_sbus_free_irqs(void)
@ -2570,6 +2581,14 @@ static void quattro_sbus_free_irqs(void)
for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
struct of_device *op = qp->quattro_dev;
int qfe_slot, skip = 0;
for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
if (!qp->happy_meals[qfe_slot])
skip = 1;
}
if (skip)
continue;
free_irq(op->irqs[0], qp);
}
@ -2828,6 +2847,9 @@ err_out_iounmap:
if (hp->tcvregs)
of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
if (qp)
qp->happy_meals[qfe_slot] = NULL;
err_out_free_netdev:
free_netdev(dev);
@ -3285,7 +3307,7 @@ static int __init happy_meal_sbus_init(void)
err = of_register_driver(&hme_sbus_driver, &of_bus_type);
if (!err)
quattro_sbus_register_irqs();
err = quattro_sbus_register_irqs();
return err;
}

View File

@ -852,7 +852,7 @@ static int tg3_bmcr_reset(struct tg3 *tp)
}
udelay(10);
}
if (limit <= 0)
if (limit < 0)
return -EBUSY;
return 0;
@ -1603,7 +1603,7 @@ static int tg3_wait_macro_done(struct tg3 *tp)
break;
}
}
if (limit <= 0)
if (limit < 0)
return -EBUSY;
return 0;

View File

@ -1098,6 +1098,42 @@ ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
* Buffers setup *
\***************/
static
struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
{
struct sk_buff *skb;
unsigned int off;
/*
* Allocate buffer with headroom_needed space for the
* fake physical layer header at the start.
*/
skb = dev_alloc_skb(sc->rxbufsize + sc->cachelsz - 1);
if (!skb) {
ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
sc->rxbufsize + sc->cachelsz - 1);
return NULL;
}
/*
* Cache-line-align. This is important (for the
* 5210 at least) as not doing so causes bogus data
* in rx'd frames.
*/
off = ((unsigned long)skb->data) % sc->cachelsz;
if (off != 0)
skb_reserve(skb, sc->cachelsz - off);
*skb_addr = pci_map_single(sc->pdev,
skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE);
if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) {
ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
dev_kfree_skb(skb);
return NULL;
}
return skb;
}
static int
ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
{
@ -1105,37 +1141,11 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
struct sk_buff *skb = bf->skb;
struct ath5k_desc *ds;
if (likely(skb == NULL)) {
unsigned int off;
/*
* Allocate buffer with headroom_needed space for the
* fake physical layer header at the start.
*/
skb = dev_alloc_skb(sc->rxbufsize + sc->cachelsz - 1);
if (unlikely(skb == NULL)) {
ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
sc->rxbufsize + sc->cachelsz - 1);
if (!skb) {
skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr);
if (!skb)
return -ENOMEM;
}
/*
* Cache-line-align. This is important (for the
* 5210 at least) as not doing so causes bogus data
* in rx'd frames.
*/
off = ((unsigned long)skb->data) % sc->cachelsz;
if (off != 0)
skb_reserve(skb, sc->cachelsz - off);
bf->skb = skb;
bf->skbaddr = pci_map_single(sc->pdev,
skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE);
if (unlikely(pci_dma_mapping_error(sc->pdev, bf->skbaddr))) {
ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
dev_kfree_skb(skb);
bf->skb = NULL;
return -ENOMEM;
}
}
/*
@ -1664,7 +1674,8 @@ ath5k_tasklet_rx(unsigned long data)
{
struct ieee80211_rx_status rxs = {};
struct ath5k_rx_status rs = {};
struct sk_buff *skb;
struct sk_buff *skb, *next_skb;
dma_addr_t next_skb_addr;
struct ath5k_softc *sc = (void *)data;
struct ath5k_buf *bf, *bf_last;
struct ath5k_desc *ds;
@ -1749,10 +1760,17 @@ ath5k_tasklet_rx(unsigned long data)
goto next;
}
accept:
next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
/*
* If we can't replace bf->skb with a new skb under memory
* pressure, just skip this packet
*/
if (!next_skb)
goto next;
pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize,
PCI_DMA_FROMDEVICE);
bf->skb = NULL;
skb_put(skb, rs.rs_datalen);
/* The MAC header is padded to have 32-bit boundary if the
@ -1825,6 +1843,9 @@ accept:
ath5k_check_ibss_tsf(sc, skb, &rxs);
__ieee80211_rx(sc->hw, skb, &rxs);
bf->skb = next_skb;
bf->skbaddr = next_skb_addr;
next:
list_move_tail(&bf->list, &sc->rxbuf);
} while (ath5k_rxbuf_setup(sc, bf) == 0);

View File

@ -4042,7 +4042,19 @@ static int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
priv->is_open = 1;
}
pci_save_state(pdev);
/* pci driver assumes state will be saved in this function.
* pci state is saved and device disabled when interface is
* stopped, so at this time pci device will always be disabled -
* whether interface was started or not. saving pci state now will
* cause saved state be that of a disabled device, which will cause
* problems during resume in that we will end up with a disabled device.
*
* indicate that the current saved state (from when interface was
* stopped) is valid. if interface was never up at time of suspend
* then the saved state will still be valid as it was saved during
* .probe. */
pdev->state_saved = true;
pci_set_power_state(pdev, PCI_D3hot);
return 0;
@ -4053,7 +4065,6 @@ static int iwl_pci_resume(struct pci_dev *pdev)
struct iwl_priv *priv = pci_get_drvdata(pdev);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
if (priv->is_open)
iwl_mac_start(priv->hw);

View File

@ -8143,7 +8143,19 @@ static int iwl3945_pci_suspend(struct pci_dev *pdev, pm_message_t state)
priv->is_open = 1;
}
pci_save_state(pdev);
/* pci driver assumes state will be saved in this function.
* pci state is saved and device disabled when interface is
* stopped, so at this time pci device will always be disabled -
* whether interface was started or not. saving pci state now will
* cause saved state be that of a disabled device, which will cause
* problems during resume in that we will end up with a disabled device.
*
* indicate that the current saved state (from when interface was
* stopped) is valid. if interface was never up at time of suspend
* then the saved state will still be valid as it was saved during
* .probe. */
pdev->state_saved = true;
pci_set_power_state(pdev, PCI_D3hot);
return 0;
@ -8154,7 +8166,6 @@ static int iwl3945_pci_resume(struct pci_dev *pdev)
struct iwl3945_priv *priv = pci_get_drvdata(pdev);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
if (priv->is_open)
iwl3945_mac_start(priv->hw);

View File

@ -86,6 +86,7 @@ int zd_rf_init_hw(struct zd_rf *rf, u8 type)
case AL7230B_RF:
r = zd_rf_init_al7230b(rf);
break;
case MAXIM_NEW_RF:
case UW2453_RF:
r = zd_rf_init_uw2453(rf);
break;

View File

@ -37,6 +37,7 @@
static struct usb_device_id usb_ids[] = {
/* ZD1211 */
{ USB_DEVICE(0x0ace, 0x1211), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x0ace, 0xa211), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x126f, 0xa006), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },

View File

@ -515,7 +515,7 @@ enum
struct tc_drr_stats
{
u32 deficit;
__u32 deficit;
};
#endif

View File

@ -1308,7 +1308,7 @@ static inline int sock_writeable(const struct sock *sk)
static inline gfp_t gfp_any(void)
{
return in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
}
static inline long sock_rcvtimeo(const struct sock *sk, int noblock)

View File

@ -696,6 +696,8 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
if (len < 0)
return -EINVAL;
v.val = 0;
switch(optname) {
case SO_DEBUG:
v.val = sock_flag(sk, SOCK_DBG);

View File

@ -1343,6 +1343,8 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
list) {
if (!netif_running(sdata->dev))
continue;
if (sdata->vif.type != NL80211_IFTYPE_AP)
continue;
if (compare_ether_addr(sdata->dev->dev_addr,
hdr->addr2)) {
dev_hold(sdata->dev);

View File

@ -207,7 +207,6 @@ static int gprs_xmit(struct sk_buff *skb, struct net_device *dev)
dev->name, err);
dev->stats.tx_aborted_errors++;
dev->stats.tx_errors++;
dev_kfree_skb(skb);
} else {
dev->stats.tx_packets++;
dev->stats.tx_bytes += len;

View File

@ -553,7 +553,7 @@ static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
{
struct pep_sock *pn = pep_sk(sk);
struct sock *sknode;
struct pnpipehdr *hdr = pnp_hdr(skb);
struct pnpipehdr *hdr;
struct sockaddr_pn dst;
int err = NET_RX_SUCCESS;
u8 pipe_handle;

View File

@ -94,12 +94,13 @@ struct wimax_dev *wimax_dev_get_by_genl_info(
list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) {
if (wimax_dev->net_dev->ifindex == ifindex) {
dev_hold(wimax_dev->net_dev);
break;
goto found;
}
}
if (wimax_dev == NULL)
d_printf(1, NULL, "wimax: no devices found with ifindex %d\n",
ifindex);
wimax_dev = NULL;
d_printf(1, NULL, "wimax: no devices found with ifindex %d\n",
ifindex);
found:
spin_unlock(&wimax_id_table_lock);
d_fnend(3, NULL, "(info %p ifindex %d) = %p\n",
info, ifindex, wimax_dev);