dect
/
linux-2.6
Archived
13
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking update from David Miller:

 1) Fix RX sequence number handling in mwifiex, from Stone Piao.

 2) Netfilter ipset mis-compares device names, fix from Florian
    Westphal.

 3) Fix route leak in ipv6 IPVS, from Eric Dumazet.

 4) NFS fixes.  Several buffer overflows in NCI layer from Dan
    Rosenberg, and release sock OOPS'er fix from Eric Dumazet.

 5) Fix WEP handling ath9k, we started using a bit the chip provides to
    indicate undecrypted packets but that bit turns out to be unreliable
    in certain configurations.  Fix from Felix Fietkau.

 6) Fix Kconfig dependency bug in wlcore, from Randy Dunlap.

 7) New USB IDs for rtlwifi driver from Larry Finger.

 8) Fix crashes in qmi_wwan usbnet driver when disconnecting, from Bjørn
    Mork.

 9) Gianfar driver programs coalescing settings properly in single queue
    mode, but does not do so in multi-queue mode.  Fix from Claudiu
    Manoil.

10) Missing module.h include in davinci_cpdma.c, from Daniel Mack.

11) Need dummy handler for IPSET_CMD_NONE otherwise we crash in ipset if
    we get this via nfnetlink, fix from Tomasz Bursztyka.

12) Missing RCU unlock in nfnetlink error path, also from Tomasz.

13) Fix divide by zero in igbvf when the user tries to set an RX
    coalescing value of 0 usecs, from Mitch A Williams.

14) We can process SCTP sacks for the wrong transport, oops.  Fix from
    Neil Horman.

15) Remove hw IP payload checksumming from e1000e driver.  This has zery
    value in our stack, and turning it on creates a very unintuitive
    restriction for users when using jumbo MTUs.

    Specifically, when IP payload checksums are on you cannot use both
    receive hashing offload and jumbo MTU.  Fix from Bruce Allan.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (27 commits)
  e1000e: remove use of IP payload checksum
  sctp: be more restrictive in transport selection on bundled sacks
  igbvf: fix divide by zero
  netfilter: nfnetlink: fix missing rcu_read_unlock in nfnetlink_rcv_msg
  netfilter: ipset: fix crash if IPSET_CMD_NONE command is sent
  davinci_cpdma: include linux/module.h
  gianfar: Fix RXICr/TXICr programming for multi-queue mode
  net: Downgrade CAP_SYS_MODULE deprecated message from error to warning.
  net: qmi_wwan: fix Oops while disconnecting
  mwifiex: fix memory leak associated with IE manamgement
  ath9k: fix panic caused by returning a descriptor we have queued for reuse
  mac80211: correct behaviour on unrecognised action frames
  ath9k: enable serialize_regmode for non-PCIE AR9287
  rtlwifi: rtl8192cu: New USB IDs
  NFC: Return from rawsock_release when sk is NULL
  iwlwifi: fix activating inactive stations
  wlcore: drop INET dependency
  ath9k: fix dynamic WEP related regression
  NFC: Prevent multiple buffer overflows in NCI
  netfilter: update location of my trees
  ...
This commit is contained in:
Linus Torvalds 2012-07-03 18:01:54 -07:00
commit 5672874889
40 changed files with 197 additions and 161 deletions

View File

@ -4654,8 +4654,8 @@ L: netfilter@vger.kernel.org
L: coreteam@netfilter.org
W: http://www.netfilter.org/
W: http://www.iptables.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git
T: git git://1984.lsi.us.es/nf
T: git git://1984.lsi.us.es/nf-next
S: Supported
F: include/linux/netfilter*
F: include/linux/netfilter/

View File

@ -1804,18 +1804,16 @@ void gfar_configure_coalescing(struct gfar_private *priv,
if (priv->mode == MQ_MG_MODE) {
baddr = &regs->txic0;
for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
if (likely(priv->tx_queue[i]->txcoalescing)) {
gfar_write(baddr + i, 0);
gfar_write(baddr + i, 0);
if (likely(priv->tx_queue[i]->txcoalescing))
gfar_write(baddr + i, priv->tx_queue[i]->txic);
}
}
baddr = &regs->rxic0;
for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
if (likely(priv->rx_queue[i]->rxcoalescing)) {
gfar_write(baddr + i, 0);
gfar_write(baddr + i, 0);
if (likely(priv->rx_queue[i]->rxcoalescing))
gfar_write(baddr + i, priv->rx_queue[i]->rxic);
}
}
}
}

View File

@ -103,6 +103,7 @@
#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */

View File

@ -496,7 +496,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
* @sk_buff: socket buffer with received data
**/
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
__le16 csum, struct sk_buff *skb)
struct sk_buff *skb)
{
u16 status = (u16)status_err;
u8 errors = (u8)(status_err >> 24);
@ -511,8 +511,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
if (status & E1000_RXD_STAT_IXSM)
return;
/* TCP/UDP checksum error bit is set */
if (errors & E1000_RXD_ERR_TCPE) {
/* TCP/UDP checksum error bit or IP checksum error bit is set */
if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
/* let the stack verify checksum errors */
adapter->hw_csum_err++;
return;
@ -523,19 +523,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
return;
/* It must be a TCP or UDP packet with a valid checksum */
if (status & E1000_RXD_STAT_TCPCS) {
/* TCP checksum is good */
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
/*
* IP fragment with UDP payload
* Hardware complements the payload checksum, so we undo it
* and then put the value in host order for further stack use.
*/
__sum16 sum = (__force __sum16)swab16((__force u16)csum);
skb->csum = csum_unfold(~sum);
skb->ip_summed = CHECKSUM_COMPLETE;
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
adapter->hw_csum_good++;
}
@ -954,8 +942,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
skb_put(skb, length);
/* Receive Checksum Offload */
e1000_rx_checksum(adapter, staterr,
rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
e1000_rx_checksum(adapter, staterr, skb);
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
@ -1341,8 +1328,7 @@ copydone:
total_rx_bytes += skb->len;
total_rx_packets++;
e1000_rx_checksum(adapter, staterr,
rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
e1000_rx_checksum(adapter, staterr, skb);
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
@ -1512,9 +1498,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
}
}
/* Receive Checksum Offload XXX recompute due to CRC strip? */
e1000_rx_checksum(adapter, staterr,
rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
/* Receive Checksum Offload */
e1000_rx_checksum(adapter, staterr, skb);
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
@ -3098,19 +3083,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
/* Enable Receive Checksum Offload for TCP and UDP */
rxcsum = er32(RXCSUM);
if (adapter->netdev->features & NETIF_F_RXCSUM) {
if (adapter->netdev->features & NETIF_F_RXCSUM)
rxcsum |= E1000_RXCSUM_TUOFL;
/*
* IPv4 payload checksum for UDP fragments must be
* used in conjunction with packet-split.
*/
if (adapter->rx_ps_pages)
rxcsum |= E1000_RXCSUM_IPPCSE;
} else {
else
rxcsum &= ~E1000_RXCSUM_TUOFL;
/* no need to clear IPPCSE as it defaults to 0 */
}
ew32(RXCSUM, rxcsum);
if (adapter->hw.mac.type == e1000_pch2lan) {
@ -5241,22 +5217,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
/* Jumbo frame support */
if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
e_err("Jumbo Frames not supported.\n");
return -EINVAL;
}
/*
* IP payload checksum (enabled with jumbos/packet-split when
* Rx checksum is enabled) and generation of RSS hash is
* mutually exclusive in the hardware.
*/
if ((netdev->features & NETIF_F_RXCSUM) &&
(netdev->features & NETIF_F_RXHASH)) {
e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled. Disable one of the receive offload features before enabling jumbos.\n");
return -EINVAL;
}
if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
e_err("Jumbo Frames not supported.\n");
return -EINVAL;
}
/* Supported frame sizes */
@ -6030,17 +5994,6 @@ static int e1000_set_features(struct net_device *netdev,
NETIF_F_RXALL)))
return 0;
/*
* IP payload checksum (enabled with jumbos/packet-split when Rx
* checksum is enabled) and generation of RSS hash is mutually
* exclusive in the hardware.
*/
if (adapter->rx_ps_pages &&
(features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) {
e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames. Disable jumbos or enable only one of the receive offload features.\n");
return -EINVAL;
}
if (changed & NETIF_F_RXFCS) {
if (features & NETIF_F_RXFCS) {
adapter->flags2 &= ~FLAG2_CRC_STRIPPING;

View File

@ -357,21 +357,28 @@ static int igbvf_set_coalesce(struct net_device *netdev,
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) ||
((ec->rx_coalesce_usecs > 3) &&
(ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) ||
(ec->rx_coalesce_usecs == 2))
return -EINVAL;
/* convert to rate of irq's per second */
if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
adapter->current_itr = IGBVF_START_ITR;
adapter->requested_itr = ec->rx_coalesce_usecs;
} else {
if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) &&
(ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
adapter->current_itr = ec->rx_coalesce_usecs << 2;
adapter->requested_itr = 1000000000 /
(adapter->current_itr * 256);
}
} else if ((ec->rx_coalesce_usecs == 3) ||
(ec->rx_coalesce_usecs == 2)) {
adapter->current_itr = IGBVF_START_ITR;
adapter->requested_itr = ec->rx_coalesce_usecs;
} else if (ec->rx_coalesce_usecs == 0) {
/*
* The user's desire is to turn off interrupt throttling
* altogether, but due to HW limitations, we can't do that.
* Instead we set a very small value in EITR, which would
* allow ~967k interrupts per second, but allow the adapter's
* internal clocking to still function properly.
*/
adapter->current_itr = 4;
adapter->requested_itr = 1000000000 /
(adapter->current_itr * 256);
} else
return -EINVAL;
writel(adapter->current_itr,
hw->hw_addr + adapter->rx_ring->itr_register);

View File

@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>

View File

@ -197,6 +197,10 @@ err:
static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
{
struct usbnet *dev = usb_get_intfdata(intf);
/* can be called while disconnecting */
if (!dev)
return 0;
return qmi_wwan_manage_power(dev, on);
}

View File

@ -143,6 +143,7 @@ struct ath_common {
u32 keymax;
DECLARE_BITMAP(keymap, ATH_KEYMAX);
DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
DECLARE_BITMAP(ccmp_keymap, ATH_KEYMAX);
enum ath_crypt_caps crypt_caps;
unsigned int clockrate;

View File

@ -622,7 +622,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
((AR_SREV_9160(ah) || AR_SREV_9280(ah)) &&
((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) &&
!ah->is_pciexpress)) {
ah->config.serialize_regmode =
SER_REG_MODE_ON;

View File

@ -695,9 +695,9 @@ static bool ath_edma_get_buffers(struct ath_softc *sc,
__skb_unlink(skb, &rx_edma->rx_fifo);
list_add_tail(&bf->list, &sc->rx.rxbuf);
ath_rx_edma_buf_link(sc, qtype);
} else {
bf = NULL;
}
bf = NULL;
}
*dest = bf;
@ -822,7 +822,8 @@ static bool ath9k_rx_accept(struct ath_common *common,
* descriptor does contain a valid key index. This has been observed
* mostly with CCMP encryption.
*/
if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
!test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
if (!rx_stats->rs_datalen) {

View File

@ -556,6 +556,9 @@ int ath_key_config(struct ath_common *common,
return -EIO;
set_bit(idx, common->keymap);
if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
set_bit(idx, common->ccmp_keymap);
if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
set_bit(idx + 64, common->keymap);
set_bit(idx, common->tkip_keymap);
@ -582,6 +585,7 @@ void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
return;
clear_bit(key->hw_key_idx, common->keymap);
clear_bit(key->hw_key_idx, common->ccmp_keymap);
if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
return;

View File

@ -796,6 +796,18 @@ int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
switch (op) {
case ADD:
ret = iwlagn_mac_sta_add(hw, vif, sta);
if (ret)
break;
/*
* Clear the in-progress flag, the AP station entry was added
* but we'll initialize LQ only when we've associated (which
* would also clear the in-progress flag). This is necessary
* in case we never initialize LQ because association fails.
*/
spin_lock_bh(&priv->sta_lock);
priv->stations[iwl_sta_id(sta)].used &=
~IWL_STA_UCODE_INPROGRESS;
spin_unlock_bh(&priv->sta_lock);
break;
case REMOVE:
ret = iwlagn_mac_sta_remove(hw, vif, sta);

View File

@ -256,7 +256,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
else
last_seq = priv->rx_seq[tid];
if (last_seq >= new_node->start_win)
if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
last_seq >= new_node->start_win)
new_node->start_win = last_seq + 1;
new_node->win_size = win_size;
@ -596,5 +597,5 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
memset(priv->rx_seq, 0, sizeof(priv->rx_seq));
mwifiex_reset_11n_rx_seq_num(priv);
}

View File

@ -37,6 +37,13 @@
#define ADDBA_RSP_STATUS_ACCEPT 0
#define MWIFIEX_DEF_11N_RX_SEQ_NUM 0xffff
static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
{
memset(priv->rx_seq, 0xff, sizeof(priv->rx_seq));
}
int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *,
u16 seqNum,
u16 tid, u8 *ta,

View File

@ -213,6 +213,7 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
/* save assoc resp ie index after auto-indexing */
*assoc_idx = *((u16 *)pos);
kfree(ap_custom_ie);
return ret;
}

View File

@ -978,10 +978,10 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
dev_dbg(adapter->dev, "info: --- Rx: Event ---\n");
adapter->event_cause = *(u32 *) skb->data;
skb_pull(skb, MWIFIEX_EVENT_HEADER_LEN);
if ((skb->len > 0) && (skb->len < MAX_EVENT_SIZE))
memcpy(adapter->event_body, skb->data, skb->len);
memcpy(adapter->event_body,
skb->data + MWIFIEX_EVENT_HEADER_LEN,
skb->len);
/* event cause has been saved to adapter->event_cause */
adapter->event_received = true;

View File

@ -406,9 +406,9 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_UAP_STA_ASSOC:
skb_pull(adapter->event_skb, MWIFIEX_UAP_EVENT_EXTRA_HEADER);
memset(&sinfo, 0, sizeof(sinfo));
event = (struct mwifiex_assoc_event *)adapter->event_skb->data;
event = (struct mwifiex_assoc_event *)
(adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
len = -1;
@ -433,9 +433,8 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
GFP_KERNEL);
break;
case EVENT_UAP_STA_DEAUTH:
skb_pull(adapter->event_skb, MWIFIEX_UAP_EVENT_EXTRA_HEADER);
cfg80211_del_sta(priv->netdev, adapter->event_skb->data,
GFP_KERNEL);
cfg80211_del_sta(priv->netdev, adapter->event_body +
MWIFIEX_UAP_EVENT_EXTRA_HEADER, GFP_KERNEL);
break;
case EVENT_UAP_BSS_IDLE:
priv->media_connected = false;

View File

@ -49,6 +49,7 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
struct device *dev = adapter->dev;
u32 recv_type;
__le32 tmp;
int ret;
if (adapter->hs_activated)
mwifiex_process_hs_config(adapter);
@ -69,16 +70,19 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
case MWIFIEX_USB_TYPE_CMD:
if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) {
dev_err(dev, "CMD: skb->len too large\n");
return -1;
ret = -1;
goto exit_restore_skb;
} else if (!adapter->curr_cmd) {
dev_dbg(dev, "CMD: no curr_cmd\n");
if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
mwifiex_process_sleep_confirm_resp(
adapter, skb->data,
skb->len);
return 0;
ret = 0;
goto exit_restore_skb;
}
return -1;
ret = -1;
goto exit_restore_skb;
}
adapter->curr_cmd->resp_skb = skb;
@ -87,20 +91,22 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
case MWIFIEX_USB_TYPE_EVENT:
if (skb->len < sizeof(u32)) {
dev_err(dev, "EVENT: skb->len too small\n");
return -1;
ret = -1;
goto exit_restore_skb;
}
skb_copy_from_linear_data(skb, &tmp, sizeof(u32));
adapter->event_cause = le32_to_cpu(tmp);
skb_pull(skb, sizeof(u32));
dev_dbg(dev, "event_cause %#x\n", adapter->event_cause);
if (skb->len > MAX_EVENT_SIZE) {
dev_err(dev, "EVENT: event body too large\n");
return -1;
ret = -1;
goto exit_restore_skb;
}
skb_copy_from_linear_data(skb, adapter->event_body,
skb->len);
memcpy(adapter->event_body, skb->data +
MWIFIEX_EVENT_HEADER_LEN, skb->len);
adapter->event_received = true;
adapter->event_skb = skb;
break;
@ -124,6 +130,12 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
}
return -EINPROGRESS;
exit_restore_skb:
/* The buffer will be reused for further cmds/events */
skb_push(skb, INTF_HEADER_LEN);
return ret;
}
static void mwifiex_usb_rx_complete(struct urb *urb)

View File

@ -404,6 +404,8 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE;
priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE;
mwifiex_reset_11n_rx_seq_num(priv);
atomic_set(&priv->wmm.tx_pkts_queued, 0);
atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
}
@ -1221,6 +1223,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
if (!ptr->is_11n_enabled ||
mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
priv->wps.session_enable ||
((priv->sec_info.wpa_enabled ||
priv->sec_info.wpa2_enabled) &&
!priv->wpa_is_gtk_set)) {

View File

@ -301,9 +301,11 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
{RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
/* HP - Lite-On ,8188CUS Slim Combo */
{RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
{RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
@ -346,6 +348,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
{RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
{RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
{RTL_USB_DEVICE(0x0bda, 0x8186, rtl92cu_hal_cfg)}, /*Realtek 92CE-VAU*/
{RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
{RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/
{RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/

View File

@ -1,7 +1,6 @@
config WLCORE
tristate "TI wlcore support"
depends on WL_TI && GENERIC_HARDIRQS && MAC80211
depends on INET
select FW_LOADER
---help---
This module contains the main code for TI WLAN chips. It abstracts

View File

@ -912,6 +912,9 @@ struct sctp_transport {
/* Is this structure kfree()able? */
malloced:1;
/* Has this transport moved the ctsn since we last sacked */
__u32 sack_generation;
struct flowi fl;
/* This is the peer's IP address and port. */
@ -1584,6 +1587,7 @@ struct sctp_association {
*/
__u8 sack_needed; /* Do we need to sack the peer? */
__u32 sack_cnt;
__u32 sack_generation;
/* These are capabilities which our peer advertised. */
__u8 ecn_capable:1, /* Can peer do ECN? */

View File

@ -117,7 +117,8 @@ void sctp_tsnmap_free(struct sctp_tsnmap *map);
int sctp_tsnmap_check(const struct sctp_tsnmap *, __u32 tsn);
/* Mark this TSN as seen. */
int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn);
int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn,
struct sctp_transport *trans);
/* Mark this TSN and all lower as seen. */
void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn);

View File

@ -1136,8 +1136,8 @@ void dev_load(struct net *net, const char *name)
no_module = request_module("netdev-%s", name);
if (no_module && capable(CAP_SYS_MODULE)) {
if (!request_module("%s", name))
pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
name);
pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
name);
}
}
EXPORT_SYMBOL(dev_load);

View File

@ -1342,7 +1342,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
u32 changed = 0;
u8 bssid[ETH_ALEN];
ASSERT_MGD_MTX(ifmgd);
@ -1354,10 +1353,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
ieee80211_stop_poll(sdata);
memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
ifmgd->associated = NULL;
memset(ifmgd->bssid, 0, ETH_ALEN);
/*
* we need to commit the associated = NULL change because the
@ -1377,7 +1373,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
netif_carrier_off(sdata->dev);
mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, bssid);
sta = sta_info_get(sdata, ifmgd->bssid);
if (sta) {
set_sta_flag(sta, WLAN_STA_BLOCK_BA);
ieee80211_sta_tear_down_BA_sessions(sta, tx);
@ -1386,13 +1382,16 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
/* deauthenticate/disassociate now */
if (tx || frame_buf)
ieee80211_send_deauth_disassoc(sdata, bssid, stype, reason,
tx, frame_buf);
ieee80211_send_deauth_disassoc(sdata, ifmgd->bssid, stype,
reason, tx, frame_buf);
/* flush out frame */
if (tx)
drv_flush(local, false);
/* clear bssid only after building the needed mgmt frames */
memset(ifmgd->bssid, 0, ETH_ALEN);
/* remove AP and TDLS peers */
sta_info_flush(local, sdata);

View File

@ -2455,7 +2455,7 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
* frames that we didn't handle, including returning unknown
* ones. For all other modes we will return them to the sender,
* setting the 0x80 bit in the action category, as required by
* 802.11-2007 7.3.1.11.
* 802.11-2012 9.24.4.
* Newer versions of hostapd shall also use the management frame
* registration mechanisms, but older ones still use cooked
* monitor interfaces so push all frames there.
@ -2465,6 +2465,9 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
return RX_DROP_MONITOR;
if (is_multicast_ether_addr(mgmt->da))
return RX_DROP_MONITOR;
/* do not return rejected action frames */
if (mgmt->u.action.category & 0x80)
return RX_DROP_UNUSABLE;

View File

@ -639,6 +639,14 @@ find_free_id(const char *name, ip_set_id_t *index, struct ip_set **set)
return 0;
}
static int
ip_set_none(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const attr[])
{
return -EOPNOTSUPP;
}
static int
ip_set_create(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
@ -1539,6 +1547,10 @@ nlmsg_failure:
}
static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
[IPSET_CMD_NONE] = {
.call = ip_set_none,
.attr_count = IPSET_ATTR_CMD_MAX,
},
[IPSET_CMD_CREATE] = {
.call = ip_set_create,
.attr_count = IPSET_ATTR_CMD_MAX,

View File

@ -38,30 +38,6 @@ struct iface_node {
#define iface_data(n) (rb_entry(n, struct iface_node, node)->iface)
static inline long
ifname_compare(const char *_a, const char *_b)
{
const long *a = (const long *)_a;
const long *b = (const long *)_b;
BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
if (a[0] != b[0])
return a[0] - b[0];
if (IFNAMSIZ > sizeof(long)) {
if (a[1] != b[1])
return a[1] - b[1];
}
if (IFNAMSIZ > 2 * sizeof(long)) {
if (a[2] != b[2])
return a[2] - b[2];
}
if (IFNAMSIZ > 3 * sizeof(long)) {
if (a[3] != b[3])
return a[3] - b[3];
}
return 0;
}
static void
rbtree_destroy(struct rb_root *root)
{
@ -99,7 +75,7 @@ iface_test(struct rb_root *root, const char **iface)
while (n) {
const char *d = iface_data(n);
long res = ifname_compare(*iface, d);
int res = strcmp(*iface, d);
if (res < 0)
n = n->rb_left;
@ -121,7 +97,7 @@ iface_add(struct rb_root *root, const char **iface)
while (*n) {
char *ifname = iface_data(*n);
long res = ifname_compare(*iface, ifname);
int res = strcmp(*iface, ifname);
p = *n;
if (res < 0)
@ -366,7 +342,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
struct hash_netiface4_elem data = { .cidr = HOST_MASK };
u32 ip = 0, ip_to, last;
u32 timeout = h->timeout;
char iface[IFNAMSIZ] = {};
char iface[IFNAMSIZ];
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
@ -663,7 +639,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netiface6_elem data = { .cidr = HOST_MASK };
u32 timeout = h->timeout;
char iface[IFNAMSIZ] = {};
char iface[IFNAMSIZ];
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||

View File

@ -76,19 +76,19 @@ static void __ip_vs_del_service(struct ip_vs_service *svc);
#ifdef CONFIG_IP_VS_IPV6
/* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
static int __ip_vs_addr_is_local_v6(struct net *net,
const struct in6_addr *addr)
static bool __ip_vs_addr_is_local_v6(struct net *net,
const struct in6_addr *addr)
{
struct rt6_info *rt;
struct flowi6 fl6 = {
.daddr = *addr,
};
struct dst_entry *dst = ip6_route_output(net, NULL, &fl6);
bool is_local;
rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
if (rt && rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
return 1;
is_local = !dst->error && dst->dev && (dst->dev->flags & IFF_LOOPBACK);
return 0;
dst_release(dst);
return is_local;
}
#endif

View File

@ -169,8 +169,10 @@ replay:
err = nla_parse(cda, ss->cb[cb_id].attr_count,
attr, attrlen, ss->cb[cb_id].policy);
if (err < 0)
if (err < 0) {
rcu_read_unlock();
return err;
}
if (nc->call_rcu) {
err = nc->call_rcu(net->nfnl, skb, nlh,

View File

@ -106,7 +106,7 @@ static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
data += 2;
nfca_poll->nfcid1_len = *data++;
nfca_poll->nfcid1_len = min_t(__u8, *data++, NFC_NFCID1_MAXSIZE);
pr_debug("sens_res 0x%x, nfcid1_len %d\n",
nfca_poll->sens_res, nfca_poll->nfcid1_len);
@ -130,7 +130,7 @@ static __u8 *nci_extract_rf_params_nfcb_passive_poll(struct nci_dev *ndev,
struct rf_tech_specific_params_nfcb_poll *nfcb_poll,
__u8 *data)
{
nfcb_poll->sensb_res_len = *data++;
nfcb_poll->sensb_res_len = min_t(__u8, *data++, NFC_SENSB_RES_MAXSIZE);
pr_debug("sensb_res_len %d\n", nfcb_poll->sensb_res_len);
@ -145,7 +145,7 @@ static __u8 *nci_extract_rf_params_nfcf_passive_poll(struct nci_dev *ndev,
__u8 *data)
{
nfcf_poll->bit_rate = *data++;
nfcf_poll->sensf_res_len = *data++;
nfcf_poll->sensf_res_len = min_t(__u8, *data++, NFC_SENSF_RES_MAXSIZE);
pr_debug("bit_rate %d, sensf_res_len %d\n",
nfcf_poll->bit_rate, nfcf_poll->sensf_res_len);
@ -331,7 +331,7 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
switch (ntf->activation_rf_tech_and_mode) {
case NCI_NFC_A_PASSIVE_POLL_MODE:
nfca_poll = &ntf->activation_params.nfca_poll_iso_dep;
nfca_poll->rats_res_len = *data++;
nfca_poll->rats_res_len = min_t(__u8, *data++, 20);
pr_debug("rats_res_len %d\n", nfca_poll->rats_res_len);
if (nfca_poll->rats_res_len > 0) {
memcpy(nfca_poll->rats_res,
@ -341,7 +341,7 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
case NCI_NFC_B_PASSIVE_POLL_MODE:
nfcb_poll = &ntf->activation_params.nfcb_poll_iso_dep;
nfcb_poll->attrib_res_len = *data++;
nfcb_poll->attrib_res_len = min_t(__u8, *data++, 50);
pr_debug("attrib_res_len %d\n", nfcb_poll->attrib_res_len);
if (nfcb_poll->attrib_res_len > 0) {
memcpy(nfcb_poll->attrib_res,

View File

@ -54,7 +54,10 @@ static int rawsock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
pr_debug("sock=%p\n", sock);
pr_debug("sock=%p sk=%p\n", sock, sk);
if (!sk)
return 0;
sock_orphan(sk);
sock_put(sk);

View File

@ -271,6 +271,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
*/
asoc->peer.sack_needed = 1;
asoc->peer.sack_cnt = 0;
asoc->peer.sack_generation = 1;
/* Assume that the peer will tell us if he recognizes ASCONF
* as part of INIT exchange.

View File

@ -248,6 +248,11 @@ static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
/* If the SACK timer is running, we have a pending SACK */
if (timer_pending(timer)) {
struct sctp_chunk *sack;
if (pkt->transport->sack_generation !=
pkt->transport->asoc->peer.sack_generation)
return retval;
asoc->a_rwnd = asoc->rwnd;
sack = sctp_make_sack(asoc);
if (sack) {

View File

@ -734,8 +734,10 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
int len;
__u32 ctsn;
__u16 num_gabs, num_dup_tsns;
struct sctp_association *aptr = (struct sctp_association *)asoc;
struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
struct sctp_gap_ack_block gabs[SCTP_MAX_GABS];
struct sctp_transport *trans;
memset(gabs, 0, sizeof(gabs));
ctsn = sctp_tsnmap_get_ctsn(map);
@ -805,6 +807,20 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns,
sctp_tsnmap_get_dups(map));
/* Once we have a sack generated, check to see what our sack
* generation is, if its 0, reset the transports to 0, and reset
* the association generation to 1
*
* The idea is that zero is never used as a valid generation for the
* association so no transport will match after a wrap event like this,
* Until the next sack
*/
if (++aptr->peer.sack_generation == 0) {
list_for_each_entry(trans, &asoc->peer.transport_addr_list,
transports)
trans->sack_generation = 0;
aptr->peer.sack_generation = 1;
}
nodata:
return retval;
}

View File

@ -1268,7 +1268,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
case SCTP_CMD_REPORT_TSN:
/* Record the arrival of a TSN. */
error = sctp_tsnmap_mark(&asoc->peer.tsn_map,
cmd->obj.u32);
cmd->obj.u32, NULL);
break;
case SCTP_CMD_REPORT_FWDTSN:

View File

@ -68,6 +68,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
memset(&peer->saddr, 0, sizeof(union sctp_addr));
peer->sack_generation = 0;
/* From 6.3.1 RTO Calculation:
*
* C1) Until an RTT measurement has been made for a packet sent to the

View File

@ -114,7 +114,8 @@ int sctp_tsnmap_check(const struct sctp_tsnmap *map, __u32 tsn)
/* Mark this TSN as seen. */
int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn)
int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn,
struct sctp_transport *trans)
{
u16 gap;
@ -133,6 +134,9 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn)
*/
map->max_tsn_seen++;
map->cumulative_tsn_ack_point++;
if (trans)
trans->sack_generation =
trans->asoc->peer.sack_generation;
map->base_tsn++;
} else {
/* Either we already have a gap, or about to record a gap, so

View File

@ -715,7 +715,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
* can mark it as received so the tsn_map is updated correctly.
*/
if (sctp_tsnmap_mark(&asoc->peer.tsn_map,
ntohl(chunk->subh.data_hdr->tsn)))
ntohl(chunk->subh.data_hdr->tsn),
chunk->transport))
goto fail_mark;
/* First calculate the padding, so we don't inadvertently

View File

@ -1051,7 +1051,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
if (chunk && (freed >= needed)) {
__u32 tsn;
tsn = ntohl(chunk->subh.data_hdr->tsn);
sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport);
sctp_ulpq_tail_data(ulpq, chunk, gfp);
sctp_ulpq_partial_delivery(ulpq, chunk, gfp);