dect
/
linux-2.6
Archived
13
0
Fork 0

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6 into for-davem

This commit is contained in:
John W. Linville 2011-03-11 14:11:11 -05:00
commit 409ec36c32
78 changed files with 2592 additions and 1751 deletions

View File

@ -290,7 +290,7 @@ struct mib_mac_mgmt {
u8 res;
u8 multi_domain_capability_implemented;
u8 multi_domain_capability_enabled;
u8 country_string[3];
u8 country_string[IEEE80211_COUNTRY_STRING_LEN];
u8 reserved[3];
} __packed;

View File

@ -93,7 +93,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
goto err_out;
}
mem = ioremap_nocache(res->start, res->end - res->start + 1);
mem = ioremap_nocache(res->start, resource_size(res));
if (mem == NULL) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -ENOMEM;

View File

@ -513,7 +513,7 @@ enum ath5k_tx_queue_id {
AR5K_TX_QUEUE_ID_NOQCU_DATA = 0,
AR5K_TX_QUEUE_ID_NOQCU_BEACON = 1,
AR5K_TX_QUEUE_ID_DATA_MIN = 0, /*IEEE80211_TX_QUEUE_DATA0*/
AR5K_TX_QUEUE_ID_DATA_MAX = 4, /*IEEE80211_TX_QUEUE_DATA4*/
AR5K_TX_QUEUE_ID_DATA_MAX = 3, /*IEEE80211_TX_QUEUE_DATA3*/
AR5K_TX_QUEUE_ID_DATA_SVP = 5, /*IEEE80211_TX_QUEUE_SVP - Spectralink Voice Protocol*/
AR5K_TX_QUEUE_ID_CAB = 6, /*IEEE80211_TX_QUEUE_AFTER_BEACON*/
AR5K_TX_QUEUE_ID_BEACON = 7, /*IEEE80211_TX_QUEUE_BEACON*/

View File

@ -442,19 +442,9 @@ ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
return ath5k_reset(sc, chan, true);
}
struct ath_vif_iter_data {
const u8 *hw_macaddr;
u8 mask[ETH_ALEN];
u8 active_mac[ETH_ALEN]; /* first active MAC */
bool need_set_hw_addr;
bool found_active;
bool any_assoc;
enum nl80211_iftype opmode;
};
static void ath_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
struct ath_vif_iter_data *iter_data = data;
struct ath5k_vif_iter_data *iter_data = data;
int i;
struct ath5k_vif *avf = (void *)vif->drv_priv;
@ -484,9 +474,12 @@ static void ath_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
*/
if (avf->opmode == NL80211_IFTYPE_AP)
iter_data->opmode = NL80211_IFTYPE_AP;
else
else {
if (avf->opmode == NL80211_IFTYPE_STATION)
iter_data->n_stas++;
if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
iter_data->opmode = avf->opmode;
}
}
void
@ -494,7 +487,8 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
struct ieee80211_vif *vif)
{
struct ath_common *common = ath5k_hw_common(sc->ah);
struct ath_vif_iter_data iter_data;
struct ath5k_vif_iter_data iter_data;
u32 rfilt;
/*
* Use the hardware MAC address as reference, the hardware uses it
@ -505,12 +499,13 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
iter_data.found_active = false;
iter_data.need_set_hw_addr = true;
iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
iter_data.n_stas = 0;
if (vif)
ath_vif_iter(&iter_data, vif->addr, vif);
ath5k_vif_iter(&iter_data, vif->addr, vif);
/* Get list of all active MAC addresses */
ieee80211_iterate_active_interfaces_atomic(sc->hw, ath_vif_iter,
ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
&iter_data);
memcpy(sc->bssidmask, iter_data.mask, ETH_ALEN);
@ -528,20 +523,19 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
if (ath5k_hw_hasbssidmask(sc->ah))
ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
}
void
ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif)
{
struct ath5k_hw *ah = sc->ah;
u32 rfilt;
/* Set up RX Filter */
if (iter_data.n_stas > 1) {
/* If you have multiple STA interfaces connected to
* different APs, ARPs are not received (most of the time?)
* Enabling PROMISC appears to fix that probem.
*/
sc->filter_flags |= AR5K_RX_FILTER_PROM;
}
/* configure rx filter */
rfilt = sc->filter_flags;
ath5k_hw_set_rx_filter(ah, rfilt);
ath5k_hw_set_rx_filter(sc->ah, rfilt);
ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
ath5k_update_bssid_mask_and_opmode(sc, vif);
}
static inline int
@ -1117,7 +1111,7 @@ ath5k_rx_start(struct ath5k_softc *sc)
spin_unlock_bh(&sc->rxbuflock);
ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */
ath5k_mode_setup(sc, NULL); /* set filters, etc. */
ath5k_update_bssid_mask_and_opmode(sc, NULL); /* set filters, etc. */
ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */
return 0;
@ -2923,13 +2917,13 @@ ath5k_deinit_softc(struct ath5k_softc *sc)
bool
ath_any_vif_assoc(struct ath5k_softc *sc)
{
struct ath_vif_iter_data iter_data;
struct ath5k_vif_iter_data iter_data;
iter_data.hw_macaddr = NULL;
iter_data.any_assoc = false;
iter_data.need_set_hw_addr = false;
iter_data.found_active = true;
ieee80211_iterate_active_interfaces_atomic(sc->hw, ath_vif_iter,
ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
&iter_data);
return iter_data.any_assoc;
}

View File

@ -259,6 +259,19 @@ struct ath5k_softc {
struct survey_info survey; /* collected survey info */
};
struct ath5k_vif_iter_data {
const u8 *hw_macaddr;
u8 mask[ETH_ALEN];
u8 active_mac[ETH_ALEN]; /* first active MAC */
bool need_set_hw_addr;
bool found_active;
bool any_assoc;
enum nl80211_iftype opmode;
int n_stas;
};
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif);
#define ath5k_hw_hasbssidmask(_ah) \
(ath5k_hw_get_capability(_ah, AR5K_CAP_BSSIDMASK, 0, NULL) == 0)
#define ath5k_hw_hasveol(_ah) \

View File

@ -158,8 +158,7 @@ ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
memcpy(&avf->lladdr, vif->addr, ETH_ALEN);
ath5k_mode_setup(sc, vif);
ath5k_update_bssid_mask_and_opmode(sc, vif);
ret = 0;
end:
mutex_unlock(&sc->lock);
@ -381,6 +380,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
struct ath5k_softc *sc = hw->priv;
struct ath5k_hw *ah = sc->ah;
u32 mfilt[2], rfilt;
struct ath5k_vif_iter_data iter_data; /* to count STA interfaces */
mutex_lock(&sc->lock);
@ -454,6 +454,21 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
break;
}
iter_data.hw_macaddr = NULL;
iter_data.n_stas = 0;
iter_data.need_set_hw_addr = false;
ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
&iter_data);
/* Set up RX Filter */
if (iter_data.n_stas > 1) {
/* If you have multiple STA interfaces connected to
* different APs, ARPs are not received (most of the time?)
* Enabling PROMISC appears to fix that probem.
*/
rfilt |= AR5K_RX_FILTER_PROM;
}
/* Set filters */
ath5k_hw_set_rx_filter(ah, rfilt);

View File

@ -75,7 +75,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
goto err_out;
}
mem = ioremap_nocache(res->start, res->end - res->start + 1);
mem = ioremap_nocache(res->start, resource_size(res));
if (mem == NULL) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -ENOMEM;

View File

@ -1020,28 +1020,29 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
static void ar9003_hw_do_getnf(struct ath_hw *ah,
int16_t nfarray[NUM_NF_READINGS])
{
#define AR_PHY_CH_MINCCA_PWR 0x1FF00000
#define AR_PHY_CH_MINCCA_PWR_S 20
#define AR_PHY_CH_EXT_MINCCA_PWR 0x01FF0000
#define AR_PHY_CH_EXT_MINCCA_PWR_S 16
int16_t nf;
int i;
nf = MS(REG_READ(ah, AR_PHY_CCA_0), AR_PHY_MINCCA_PWR);
nfarray[0] = sign_extend32(nf, 8);
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (ah->rxchainmask & BIT(i)) {
nf = MS(REG_READ(ah, ah->nf_regs[i]),
AR_PHY_CH_MINCCA_PWR);
nfarray[i] = sign_extend32(nf, 8);
nf = MS(REG_READ(ah, AR_PHY_CCA_1), AR_PHY_CH1_MINCCA_PWR);
nfarray[1] = sign_extend32(nf, 8);
if (IS_CHAN_HT40(ah->curchan)) {
u8 ext_idx = AR9300_MAX_CHAINS + i;
nf = MS(REG_READ(ah, AR_PHY_CCA_2), AR_PHY_CH2_MINCCA_PWR);
nfarray[2] = sign_extend32(nf, 8);
if (!IS_CHAN_HT40(ah->curchan))
return;
nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
nfarray[3] = sign_extend32(nf, 8);
nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_1), AR_PHY_CH1_EXT_MINCCA_PWR);
nfarray[4] = sign_extend32(nf, 8);
nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_2), AR_PHY_CH2_EXT_MINCCA_PWR);
nfarray[5] = sign_extend32(nf, 8);
nf = MS(REG_READ(ah, ah->nf_regs[ext_idx]),
AR_PHY_CH_EXT_MINCCA_PWR);
nfarray[ext_idx] = sign_extend32(nf, 8);
}
}
}
}
static void ar9003_hw_set_nf_limits(struct ath_hw *ah)

View File

@ -15,6 +15,7 @@
*/
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <asm/unaligned.h>
#include "ath9k.h"
@ -30,6 +31,19 @@ static int ath9k_debugfs_open(struct inode *inode, struct file *file)
return 0;
}
static ssize_t ath9k_debugfs_read_buf(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
u8 *buf = file->private_data;
return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
}
static int ath9k_debugfs_release_buf(struct inode *inode, struct file *file)
{
vfree(file->private_data);
return 0;
}
#ifdef CONFIG_ATH_DEBUG
static ssize_t read_file_debug(struct file *file, char __user *user_buf,
@ -548,10 +562,10 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
PR("hw-tx-proc-desc: ", txprocdesc);
len += snprintf(buf + len, size - len,
"%s%11p%11p%10p%10p\n", "txq-memory-address:",
&(sc->tx.txq_map[WME_AC_BE]),
&(sc->tx.txq_map[WME_AC_BK]),
&(sc->tx.txq_map[WME_AC_VI]),
&(sc->tx.txq_map[WME_AC_VO]));
sc->tx.txq_map[WME_AC_BE],
sc->tx.txq_map[WME_AC_BK],
sc->tx.txq_map[WME_AC_VI],
sc->tx.txq_map[WME_AC_VO]);
if (len >= size)
goto done;
@ -1027,6 +1041,42 @@ static const struct file_operations fops_regval = {
.llseek = default_llseek,
};
#define REGDUMP_LINE_SIZE 20
static int open_file_regdump(struct inode *inode, struct file *file)
{
struct ath_softc *sc = inode->i_private;
unsigned int len = 0;
u8 *buf;
int i;
unsigned long num_regs, regdump_len, max_reg_offset;
max_reg_offset = AR_SREV_9300_20_OR_LATER(sc->sc_ah) ? 0x16bd4 : 0xb500;
num_regs = max_reg_offset / 4 + 1;
regdump_len = num_regs * REGDUMP_LINE_SIZE + 1;
buf = vmalloc(regdump_len);
if (!buf)
return -ENOMEM;
ath9k_ps_wakeup(sc);
for (i = 0; i < num_regs; i++)
len += scnprintf(buf + len, regdump_len - len,
"0x%06x 0x%08x\n", i << 2, REG_READ(sc->sc_ah, i << 2));
ath9k_ps_restore(sc);
file->private_data = buf;
return 0;
}
static const struct file_operations fops_regdump = {
.open = open_file_regdump,
.read = ath9k_debugfs_read_buf,
.release = ath9k_debugfs_release_buf,
.owner = THIS_MODULE,
.llseek = default_llseek,/* read accesses f_pos */
};
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@ -1091,6 +1141,10 @@ int ath9k_init_debug(struct ath_hw *ah)
sc->debug.debugfs_phy, &ah->config.cwm_ignore_extcca))
goto err;
if (!debugfs_create_file("regdump", S_IRUSR, sc->debug.debugfs_phy,
sc, &fops_regdump))
goto err;
sc->debug.regidx = 0;
return 0;
err:

View File

@ -92,7 +92,7 @@ config B43_PHY_N
---help---
Support for the N-PHY.
This enables support for devices with N-PHY revision up to 2.
This enables support for devices with N-PHY.
Say N if you expect high stability and performance. Saying Y will not
affect other devices support and may provide support for basic needs.

View File

@ -1168,23 +1168,98 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
{
struct b43_phy_n *nphy = dev->phy.n;
struct ssb_sprom *sprom = &(dev->dev->bus->sprom);
/* PHY rev 0, 1, 2 */
u8 i, j;
u8 code;
u16 tmp;
/* TODO: for PHY >= 3
s8 *lna1_gain, *lna2_gain;
u8 *gain_db, *gain_bits;
u16 *rfseq_init;
u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 };
u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 };
*/
u8 rfseq_events[3] = { 6, 8, 7 };
u8 rfseq_delays[3] = { 10, 30, 1 };
/* PHY rev >= 3 */
bool ghz5;
bool ext_lna;
u16 rssi_gain;
struct nphy_gain_ctl_workaround_entry *e;
u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 };
u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 };
if (dev->phy.rev >= 3) {
/* TODO */
/* Prepare values */
ghz5 = b43_phy_read(dev, B43_NPHY_BANDCTL)
& B43_NPHY_BANDCTL_5GHZ;
ext_lna = sprom->boardflags_lo & B43_BFL_EXTLNA;
e = b43_nphy_get_gain_ctl_workaround_ent(dev, ghz5, ext_lna);
if (ghz5 && dev->phy.rev >= 5)
rssi_gain = 0x90;
else
rssi_gain = 0x50;
b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040);
/* Set Clip 2 detect */
b43_phy_set(dev, B43_NPHY_C1_CGAINI,
B43_NPHY_C1_CGAINI_CL2DETECT);
b43_phy_set(dev, B43_NPHY_C2_CGAINI,
B43_NPHY_C2_CGAINI_CL2DETECT);
b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC,
0x17);
b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAG1_IDAC,
0x17);
b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG2_IDAC, 0xF0);
b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG2_IDAC, 0xF0);
b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_POLE, 0x00);
b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_POLE, 0x00);
b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_GAIN,
rssi_gain);
b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_GAIN,
rssi_gain);
b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAA1_IDAC,
0x17);
b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAA1_IDAC,
0x17);
b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA2_IDAC, 0xFF);
b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA2_IDAC, 0xFF);
b43_ntab_write_bulk(dev, B43_NTAB8(0, 8), 4, e->lna1_gain);
b43_ntab_write_bulk(dev, B43_NTAB8(1, 8), 4, e->lna1_gain);
b43_ntab_write_bulk(dev, B43_NTAB8(0, 16), 4, e->lna2_gain);
b43_ntab_write_bulk(dev, B43_NTAB8(1, 16), 4, e->lna2_gain);
b43_ntab_write_bulk(dev, B43_NTAB8(0, 32), 10, e->gain_db);
b43_ntab_write_bulk(dev, B43_NTAB8(1, 32), 10, e->gain_db);
b43_ntab_write_bulk(dev, B43_NTAB8(2, 32), 10, e->gain_bits);
b43_ntab_write_bulk(dev, B43_NTAB8(3, 32), 10, e->gain_bits);
b43_ntab_write_bulk(dev, B43_NTAB8(0, 0x40), 6, lpf_gain);
b43_ntab_write_bulk(dev, B43_NTAB8(1, 0x40), 6, lpf_gain);
b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits);
b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits);
b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain);
b43_phy_write(dev, 0x2A7, e->init_gain);
b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2,
e->rfseq_init);
b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain);
/* TODO: check defines. Do not match variables names */
b43_phy_write(dev, B43_NPHY_C1_CLIP1_MEDGAIN, e->cliphi_gain);
b43_phy_write(dev, 0x2A9, e->cliphi_gain);
b43_phy_write(dev, B43_NPHY_C1_CLIP2_GAIN, e->clipmd_gain);
b43_phy_write(dev, 0x2AB, e->clipmd_gain);
b43_phy_write(dev, B43_NPHY_C2_CLIP1_HIGAIN, e->cliplo_gain);
b43_phy_write(dev, 0x2AD, e->cliplo_gain);
b43_phy_maskset(dev, 0x27D, 0xFF00, e->crsmin);
b43_phy_maskset(dev, 0x280, 0xFF00, e->crsminl);
b43_phy_maskset(dev, 0x283, 0xFF00, e->crsminu);
b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip);
b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip);
b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
~B43_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip);
b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
~B43_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip);
b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
} else {
/* Set Clip 2 detect */
b43_phy_set(dev, B43_NPHY_C1_CGAINI,
@ -1308,6 +1383,9 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
u16 tmp16;
u32 tmp32;
if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
b43_nphy_classifier(dev, 1, 0);
else
@ -1320,7 +1398,82 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
if (dev->phy.rev >= 3) {
tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
tmp32 &= 0xffffff;
b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x0125);
b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x01B3);
b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x0105);
b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x016E);
b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD);
b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020);
b43_phy_write(dev, B43_NPHY_C2_CLIP1_MEDGAIN, 0x000C);
b43_phy_write(dev, 0x2AE, 0x000C);
/* TODO */
tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
0x2 : 0x9C40;
b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16);
b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700);
b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
b43_nphy_gain_ctrl_workarounds(dev);
b43_ntab_write(dev, B43_NTAB32(8, 0), 2);
b43_ntab_write(dev, B43_NTAB32(8, 16), 2);
/* TODO */
b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_AUX, 0x07);
b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07);
b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88);
b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88);
b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
/* N PHY WAR TX Chain Update with hw_phytxchain as argument */
if ((bus->sprom.boardflags2_lo & B43_BFL2_APLL_WAR &&
b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
(bus->sprom.boardflags2_lo & B43_BFL2_GPLL_WAR &&
b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
tmp32 = 0x00088888;
else
tmp32 = 0x88888888;
b43_ntab_write(dev, B43_NTAB32(30, 1), tmp32);
b43_ntab_write(dev, B43_NTAB32(30, 2), tmp32);
b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
if (dev->phy.rev == 4 &&
b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
0x70);
b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
0x70);
}
b43_phy_write(dev, 0x224, 0x039C);
b43_phy_write(dev, 0x225, 0x0357);
b43_phy_write(dev, 0x226, 0x0317);
b43_phy_write(dev, 0x227, 0x02D7);
b43_phy_write(dev, 0x228, 0x039C);
b43_phy_write(dev, 0x229, 0x0357);
b43_phy_write(dev, 0x22A, 0x0317);
b43_phy_write(dev, 0x22B, 0x02D7);
b43_phy_write(dev, 0x22C, 0x039C);
b43_phy_write(dev, 0x22D, 0x0357);
b43_phy_write(dev, 0x22E, 0x0317);
b43_phy_write(dev, 0x22F, 0x02D7);
} else {
if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
nphy->band5g_pwrgain) {
@ -3878,10 +4031,14 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/Anacore */
static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on)
{
b43_phy_write(dev, B43_NPHY_AFECTL_OVER,
on ? 0 : 0x7FFF);
u16 val = on ? 0 : 0x7FFF;
if (dev->phy.rev >= 3)
b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, val);
b43_phy_write(dev, B43_NPHY_AFECTL_OVER, val);
}
static int b43_nphy_op_switch_channel(struct b43_wldev *dev,

View File

@ -2709,6 +2709,79 @@ const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = {
{ 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */
};
struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = {
{ /* 2GHz */
{ /* PHY rev 3 */
{ 7, 11, 16, 23 },
{ -5, 6, 10, 14 },
{ 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
{ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
0x627E,
{ 0x613F, 0x613F, 0x613F, 0x613F },
0x107E, 0x0066, 0x0074,
0x18, 0x18, 0x18,
0x020D, 0x5,
},
{ /* PHY rev 4 */
{ 8, 12, 17, 25 },
{ -5, 6, 10, 14 },
{ 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
{ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
0x527E,
{ 0x513F, 0x513F, 0x513F, 0x513F },
0x007E, 0x0066, 0x0074,
0x18, 0x18, 0x18,
0x01A1, 0x5,
},
{ /* PHY rev 5+ */
{ 9, 13, 18, 26 },
{ -3, 7, 11, 16 },
{ 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
{ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
0x427E, /* invalid for external LNA! */
{ 0x413F, 0x413F, 0x413F, 0x413F }, /* invalid for external LNA! */
0x1076, 0x0066, 0x106A,
0xC, 0xC, 0xC,
0x01D0, 0x5,
},
},
{ /* 5GHz */
{ /* PHY rev 3 */
{ 7, 11, 17, 23 },
{ -6, 2, 6, 10 },
{ 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 },
{ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 },
0x52DE,
{ 0x516F, 0x516F, 0x516F, 0x516F },
0x00DE, 0x00CA, 0x00CC,
0x1E, 0x1E, 0x1E,
0x01A1, 25,
},
{ /* PHY rev 4 */
{ 8, 12, 18, 23 },
{ -5, 2, 6, 10 },
{ 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD },
{ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 },
0x629E,
{ 0x614F, 0x614F, 0x614F, 0x614F },
0x029E, 0x1084, 0x0086,
0x24, 0x24, 0x24,
0x0107, 25,
},
{ /* PHY rev 5+ */
{ 6, 10, 16, 21 },
{ -7, 0, 4, 8 },
{ 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD },
{ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 },
0x729E,
{ 0x714F, 0x714F, 0x714F, 0x714F },
0x029E, 0x2084, 0x2086,
0x24, 0x24, 0x24,
0x00A9, 25,
},
},
};
static inline void assert_ntab_array_sizes(void)
{
#undef check
@ -2957,3 +3030,33 @@ void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
/* Volatile tables */
/* TODO */
}
struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
struct b43_wldev *dev, bool ghz5, bool ext_lna)
{
struct nphy_gain_ctl_workaround_entry *e;
u8 phy_idx;
B43_WARN_ON(dev->phy.rev < 3);
if (dev->phy.rev >= 5)
phy_idx = 2;
else if (dev->phy.rev == 4)
phy_idx = 1;
else
phy_idx = 0;
e = &nphy_gain_ctl_workaround[ghz5][phy_idx];
/* Only one entry differs for external LNA, so instead making whole
* table 2 times bigger, hack is here
*/
if (!ghz5 && dev->phy.rev >= 5 && ext_lna) {
e->rfseq_init[0] &= 0x0FFF;
e->rfseq_init[1] &= 0x0FFF;
e->rfseq_init[2] &= 0x0FFF;
e->rfseq_init[3] &= 0x0FFF;
e->init_gain &= 0x0FFF;
}
return e;
}

View File

@ -35,6 +35,31 @@ struct nphy_rf_control_override_rev3 {
u8 val_addr1;
};
struct nphy_gain_ctl_workaround_entry {
s8 lna1_gain[4];
s8 lna2_gain[4];
u8 gain_db[10];
u8 gain_bits[10];
u16 init_gain;
u16 rfseq_init[4];
u16 cliphi_gain;
u16 clipmd_gain;
u16 cliplo_gain;
u16 crsmin;
u16 crsminl;
u16 crsminu;
u16 nbclip;
u16 wlclip;
};
/* Get entry with workaround values for gain ctl. Does not return NULL. */
struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
struct b43_wldev *dev, bool ghz5, bool ext_lna);
/* Get the NPHY Channel Switch Table entry for a channel.
* Returns NULL on failure to find an entry. */
const struct b43_nphy_channeltab_entry_rev2 *

View File

@ -961,7 +961,7 @@ struct ipw_country_channel_info {
struct ipw_country_info {
u8 id;
u8 length;
u8 country_str[3];
u8 country_str[IEEE80211_COUNTRY_STRING_LEN];
struct ipw_country_channel_info groups[7];
} __packed;

View File

@ -533,9 +533,10 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
void iwlagn_temperature(struct iwl_priv *priv)
{
/* store temperature from statistics (in Celsius) */
priv->temperature =
le32_to_cpu(priv->_agn.statistics.general.common.temperature);
/* store temperature from correct statistics (in Celsius) */
priv->temperature = le32_to_cpu((iwl_bt_statistics(priv)) ?
priv->_agn.statistics_bt.general.common.temperature :
priv->_agn.statistics.general.common.temperature);
iwl_tt_handler(priv);
}
@ -994,241 +995,6 @@ int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
return -1;
}
/* Calc max signal level (dBm) among 3 possible receivers */
static inline int iwlagn_calc_rssi(struct iwl_priv *priv,
struct iwl_rx_phy_res *rx_resp)
{
return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
}
static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
{
u32 decrypt_out = 0;
if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
RX_RES_STATUS_STATION_FOUND)
decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
/* packet was not encrypted */
if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
RX_RES_STATUS_SEC_TYPE_NONE)
return decrypt_out;
/* packet was encrypted with unknown alg */
if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
RX_RES_STATUS_SEC_TYPE_ERR)
return decrypt_out;
/* decryption was not done in HW */
if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
RX_MPDU_RES_STATUS_DEC_DONE_MSK)
return decrypt_out;
switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
case RX_RES_STATUS_SEC_TYPE_CCMP:
/* alg is CCM: check MIC only */
if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
/* Bad MIC */
decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
else
decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
break;
case RX_RES_STATUS_SEC_TYPE_TKIP:
if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
/* Bad TTAK */
decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
break;
}
/* fall through if TTAK OK */
default:
if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
else
decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
break;
}
IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
decrypt_in, decrypt_out);
return decrypt_out;
}
static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,
u16 len,
u32 ampdu_status,
struct iwl_rx_mem_buffer *rxb,
struct ieee80211_rx_status *stats)
{
struct sk_buff *skb;
__le16 fc = hdr->frame_control;
/* We only process data packets if the interface is open */
if (unlikely(!priv->is_open)) {
IWL_DEBUG_DROP_LIMIT(priv,
"Dropping packet while interface is not open.\n");
return;
}
/* In case of HW accelerated crypto and bad decryption, drop */
if (!priv->cfg->mod_params->sw_crypto &&
iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
return;
skb = dev_alloc_skb(128);
if (!skb) {
IWL_ERR(priv, "dev_alloc_skb failed\n");
return;
}
skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
iwl_update_stats(priv, false, fc, len);
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
ieee80211_rx(priv->hw, skb);
priv->alloc_rxb_page--;
rxb->page = NULL;
}
/* Called for REPLY_RX (legacy ABG frames), or
* REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
void iwlagn_rx_reply_rx(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct ieee80211_hdr *header;
struct ieee80211_rx_status rx_status;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_phy_res *phy_res;
__le32 rx_pkt_status;
struct iwl_rx_mpdu_res_start *amsdu;
u32 len;
u32 ampdu_status;
u32 rate_n_flags;
/**
* REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
* REPLY_RX: physical layer info is in this buffer
* REPLY_RX_MPDU_CMD: physical layer info was sent in separate
* command and cached in priv->last_phy_res
*
* Here we set up local variables depending on which command is
* received.
*/
if (pkt->hdr.cmd == REPLY_RX) {
phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
+ phy_res->cfg_phy_cnt);
len = le16_to_cpu(phy_res->byte_count);
rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
phy_res->cfg_phy_cnt + len);
ampdu_status = le32_to_cpu(rx_pkt_status);
} else {
if (!priv->_agn.last_phy_res_valid) {
IWL_ERR(priv, "MPDU frame without cached PHY data\n");
return;
}
phy_res = &priv->_agn.last_phy_res;
amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
len = le16_to_cpu(amsdu->byte_count);
rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
ampdu_status = iwlagn_translate_rx_status(priv,
le32_to_cpu(rx_pkt_status));
}
if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
phy_res->cfg_phy_cnt);
return;
}
if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
!(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
le32_to_cpu(rx_pkt_status));
return;
}
/* This will be used in several places later */
rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
/* rx_status carries information about the packet to mac80211 */
rx_status.mactime = le64_to_cpu(phy_res->timestamp);
rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
rx_status.freq =
ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
rx_status.band);
rx_status.rate_idx =
iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
rx_status.flag = 0;
/* TSF isn't reliable. In order to allow smooth user experience,
* this W/A doesn't propagate it to the mac80211 */
/*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
/* Find max signal strength (dBm) among 3 antenna/receiver chains */
rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
iwl_dbg_log_rx_data_frame(priv, len, header);
IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
rx_status.signal, (unsigned long long)rx_status.mactime);
/*
* "antenna number"
*
* It seems that the antenna field in the phy flags value
* is actually a bit field. This is undefined by radiotap,
* it wants an actual antenna number but I always get "7"
* for most legacy frames I receive indicating that the
* same frame was received on all three RX chains.
*
* I think this field should be removed in favor of a
* new 802.11n radiotap field "RX chains" that is defined
* as a bitmask.
*/
rx_status.antenna =
(le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
>> RX_RES_PHY_FLAGS_ANTENNA_POS;
/* set the preamble flag if appropriate */
if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
rx_status.flag |= RX_FLAG_SHORTPRE;
/* Set up the HT phy flags */
if (rate_n_flags & RATE_MCS_HT_MSK)
rx_status.flag |= RX_FLAG_HT;
if (rate_n_flags & RATE_MCS_HT40_MSK)
rx_status.flag |= RX_FLAG_40MHZ;
if (rate_n_flags & RATE_MCS_SGI_MSK)
rx_status.flag |= RX_FLAG_SHORT_GI;
iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
rxb, &rx_status);
}
/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
* This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
priv->_agn.last_phy_res_valid = true;
memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
sizeof(struct iwl_rx_phy_res));
}
static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
struct ieee80211_vif *vif,
enum ieee80211_band band,

View File

@ -424,60 +424,6 @@ int iwl_hw_tx_queue_init(struct iwl_priv *priv,
return 0;
}
/******************************************************************************
*
* Generic RX handler implementations
*
******************************************************************************/
static void iwl_rx_reply_alive(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_alive_resp *palive;
struct delayed_work *pwork;
palive = &pkt->u.alive_frame;
IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
"0x%01X 0x%01X\n",
palive->is_valid, palive->ver_type,
palive->ver_subtype);
if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
memcpy(&priv->card_alive_init,
&pkt->u.alive_frame,
sizeof(struct iwl_init_alive_resp));
pwork = &priv->init_alive_start;
} else {
IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
memcpy(&priv->card_alive, &pkt->u.alive_frame,
sizeof(struct iwl_alive_resp));
pwork = &priv->alive_start;
}
/* We delay the ALIVE response by 5ms to
* give the HW RF Kill time to activate... */
if (palive->is_valid == UCODE_VALID_OK)
queue_delayed_work(priv->workqueue, pwork,
msecs_to_jiffies(5));
else {
IWL_WARN(priv, "%s uCode did not respond OK.\n",
(palive->ver_subtype == INITIALIZE_SUBTYPE) ?
"init" : "runtime");
/*
* If fail to load init uCode,
* let's try to load the init uCode again.
* We should not get into this situation, but if it
* does happen, we should not move on and loading "runtime"
* without proper calibrate the device.
*/
if (palive->ver_subtype == INITIALIZE_SUBTYPE)
priv->ucode_type = UCODE_NONE;
queue_work(priv->workqueue, &priv->restart);
}
}
static void iwl_bg_beacon_update(struct work_struct *work)
{
struct iwl_priv *priv =
@ -712,83 +658,6 @@ static void iwl_bg_ucode_trace(unsigned long data)
}
}
static void iwlagn_rx_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
#ifdef CONFIG_IWLWIFI_DEBUG
u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
"tsf:0x%.8x%.8x rate:%d\n",
status & TX_STATUS_MSK,
beacon->beacon_notify_hdr.failure_frame,
le32_to_cpu(beacon->ibss_mgr_status),
le32_to_cpu(beacon->high_tsf),
le32_to_cpu(beacon->low_tsf), rate);
#endif
priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
queue_work(priv->workqueue, &priv->beacon_update);
}
/* Handle notification from uCode that card's power state is changing
* due to software, hardware, or critical temperature RFKILL */
static void iwl_rx_card_state_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
unsigned long status = priv->status;
IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
(flags & HW_CARD_DISABLED) ? "Kill" : "On",
(flags & SW_CARD_DISABLED) ? "Kill" : "On",
(flags & CT_CARD_DISABLED) ?
"Reached" : "Not reached");
if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
CT_CARD_DISABLED)) {
iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
iwl_write_direct32(priv, HBUS_TARG_MBX_C,
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
if (!(flags & RXON_CARD_DISABLED)) {
iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
iwl_write_direct32(priv, HBUS_TARG_MBX_C,
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
}
if (flags & CT_CARD_DISABLED)
iwl_tt_enter_ct_kill(priv);
}
if (!(flags & CT_CARD_DISABLED))
iwl_tt_exit_ct_kill(priv);
if (flags & HW_CARD_DISABLED)
set_bit(STATUS_RF_KILL_HW, &priv->status);
else
clear_bit(STATUS_RF_KILL_HW, &priv->status);
if (!(flags & RXON_CARD_DISABLED))
iwl_scan_cancel(priv);
if ((test_bit(STATUS_RF_KILL_HW, &status) !=
test_bit(STATUS_RF_KILL_HW, &priv->status)))
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
test_bit(STATUS_RF_KILL_HW, &priv->status));
else
wake_up_interruptible(&priv->wait_command_queue);
}
static void iwl_bg_tx_flush(struct work_struct *work)
{
struct iwl_priv *priv =
@ -807,51 +676,6 @@ static void iwl_bg_tx_flush(struct work_struct *work)
}
}
/**
* iwl_setup_rx_handlers - Initialize Rx handler callbacks
*
* Setup the RX handlers for each of the reply types sent from the uCode
* to the host.
*
* This function chains into the hardware specific files for them to setup
* any hardware specific handlers as well.
*/
static void iwl_setup_rx_handlers(struct iwl_priv *priv)
{
priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
iwl_rx_spectrum_measure_notif;
priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
iwl_rx_pm_debug_statistics_notif;
priv->rx_handlers[BEACON_NOTIFICATION] = iwlagn_rx_beacon_notif;
/*
* The same handler is used for both the REPLY to a discrete
* statistics request from the host as well as for the periodic
* statistics notifications (after received beacons) from the uCode.
*/
priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_reply_statistics;
priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
iwl_setup_rx_scan_handlers(priv);
/* status change handler */
priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
iwl_rx_missed_beacon_notif;
/* Rx handlers */
priv->rx_handlers[REPLY_RX_PHY_CMD] = iwlagn_rx_reply_rx_phy;
priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwlagn_rx_reply_rx;
/* block ack */
priv->rx_handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
/* Set up hardware specific Rx handlers */
priv->cfg->ops->lib->rx_handler_setup(priv);
}
/**
* iwl_rx_handle - Main entry function for receiving responses from uCode
*
@ -3913,6 +3737,8 @@ static int iwl_init_drv(struct iwl_priv *priv)
priv->force_reset[IWL_FW_RESET].reset_duration =
IWL_DELAY_NEXT_FORCE_FW_RELOAD;
priv->rx_statistics_jiffies = jiffies;
/* Choose which receivers/antennas to use */
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv,

View File

@ -190,10 +190,7 @@ void iwlagn_rx_replenish_now(struct iwl_priv *priv);
void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
int iwlagn_rxq_stop(struct iwl_priv *priv);
int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
void iwlagn_rx_reply_rx(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_setup_rx_handlers(struct iwl_priv *priv);
/* tx */
void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
@ -243,14 +240,6 @@ static inline bool iwl_is_tx_success(u32 status)
u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
/* rx */
void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_reply_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
/* scan */
int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
void iwlagn_post_scan(struct iwl_priv *priv);

View File

@ -869,33 +869,6 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
}
}
void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
/*
* MULTI-FIXME
* See iwl_mac_channel_switch.
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
if (priv->switch_rxon.switch_in_progress) {
if (!le32_to_cpu(csa->status) &&
(csa->channel == priv->switch_rxon.channel)) {
rxon->channel = csa->channel;
ctx->staging.channel = csa->channel;
IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
le16_to_cpu(csa->channel));
iwl_chswitch_done(priv, true);
} else {
IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
le16_to_cpu(csa->channel));
iwl_chswitch_done(priv, false);
}
}
}
#ifdef CONFIG_IWLWIFI_DEBUG
void iwl_print_rx_config_cmd(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
@ -1245,42 +1218,6 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
&statistics_cmd);
}
void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
sleep->pm_sleep_mode, sleep->pm_wakeup_src);
#endif
}
void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
"notification for %s:\n", len,
get_cmd_string(pkt->hdr.cmd));
iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
}
void iwl_rx_reply_error(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
"seq 0x%04X ser 0x%08X\n",
le32_to_cpu(pkt->u.err_resp.error_type),
get_cmd_string(pkt->u.err_resp.cmd_id),
pkt->u.err_resp.cmd_id,
le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
le32_to_cpu(pkt->u.err_resp.error_info));
}
void iwl_clear_isr_stats(struct iwl_priv *priv)
{
memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));

View File

@ -441,10 +441,6 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
void iwl_connection_init_rx_config(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
void iwl_set_rate(struct iwl_priv *priv);
int iwl_set_decrypted_flag(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,
u32 decrypt_res,
struct ieee80211_rx_status *stats);
void iwl_irq_handle_error(struct iwl_priv *priv);
int iwl_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
@ -493,15 +489,6 @@ static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx,
{
}
#endif
/*****************************************************
* RX handlers.
* **************************************************/
void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_rx_reply_error(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
/*****************************************************
* RX
@ -513,11 +500,8 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
struct iwl_rx_queue *q);
int iwl_rx_queue_space(const struct iwl_rx_queue *q);
void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
/* Handlers */
void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
/* TX helpers */

View File

@ -1261,8 +1261,8 @@ struct iwl_priv {
/* track IBSS manager (last beacon) status */
u32 ibss_manager;
/* storing the jiffies when the plcp error rate is received */
unsigned long plcp_jiffies;
/* jiffies when last recovery from statistics was performed */
unsigned long rx_statistics_jiffies;
/* force reset */
struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];

View File

@ -29,6 +29,7 @@
#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <net/mac80211.h>
#include <asm/unaligned.h>
#include "iwl-eeprom.h"
@ -38,7 +39,14 @@
#include "iwl-io.h"
#include "iwl-helpers.h"
#include "iwl-agn-calib.h"
/************************** RX-FUNCTIONS ****************************/
#include "iwl-agn.h"
/******************************************************************************
*
* RX path functions
*
******************************************************************************/
/*
* Rx theory of operation
*
@ -211,7 +219,104 @@ err_bd:
return -ENOMEM;
}
void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
/******************************************************************************
*
* Generic RX handler implementations
*
******************************************************************************/
static void iwl_rx_reply_alive(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_alive_resp *palive;
struct delayed_work *pwork;
palive = &pkt->u.alive_frame;
IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
"0x%01X 0x%01X\n",
palive->is_valid, palive->ver_type,
palive->ver_subtype);
if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
memcpy(&priv->card_alive_init,
&pkt->u.alive_frame,
sizeof(struct iwl_init_alive_resp));
pwork = &priv->init_alive_start;
} else {
IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
memcpy(&priv->card_alive, &pkt->u.alive_frame,
sizeof(struct iwl_alive_resp));
pwork = &priv->alive_start;
}
/* We delay the ALIVE response by 5ms to
* give the HW RF Kill time to activate... */
if (palive->is_valid == UCODE_VALID_OK)
queue_delayed_work(priv->workqueue, pwork,
msecs_to_jiffies(5));
else {
IWL_WARN(priv, "%s uCode did not respond OK.\n",
(palive->ver_subtype == INITIALIZE_SUBTYPE) ?
"init" : "runtime");
/*
* If fail to load init uCode,
* let's try to load the init uCode again.
* We should not get into this situation, but if it
* does happen, we should not move on and loading "runtime"
* without proper calibrate the device.
*/
if (palive->ver_subtype == INITIALIZE_SUBTYPE)
priv->ucode_type = UCODE_NONE;
queue_work(priv->workqueue, &priv->restart);
}
}
static void iwl_rx_reply_error(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
"seq 0x%04X ser 0x%08X\n",
le32_to_cpu(pkt->u.err_resp.error_type),
get_cmd_string(pkt->u.err_resp.cmd_id),
pkt->u.err_resp.cmd_id,
le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
le32_to_cpu(pkt->u.err_resp.error_info));
}
static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
/*
* MULTI-FIXME
* See iwl_mac_channel_switch.
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
if (priv->switch_rxon.switch_in_progress) {
if (!le32_to_cpu(csa->status) &&
(csa->channel == priv->switch_rxon.channel)) {
rxon->channel = csa->channel;
ctx->staging.channel = csa->channel;
IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
le16_to_cpu(csa->channel));
iwl_chswitch_done(priv, true);
} else {
IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
le16_to_cpu(csa->channel));
iwl_chswitch_done(priv, false);
}
}
}
static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@ -227,6 +332,52 @@ void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
priv->measurement_status |= MEASUREMENT_READY;
}
static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
sleep->pm_sleep_mode, sleep->pm_wakeup_src);
#endif
}
static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
"notification for %s:\n", len,
get_cmd_string(pkt->hdr.cmd));
iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
}
static void iwl_rx_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
#ifdef CONFIG_IWLWIFI_DEBUG
u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
"tsf:0x%.8x%.8x rate:%d\n",
status & TX_STATUS_MSK,
beacon->beacon_notify_hdr.failure_frame,
le32_to_cpu(beacon->ibss_mgr_status),
le32_to_cpu(beacon->high_tsf),
le32_to_cpu(beacon->low_tsf), rate);
#endif
priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
queue_work(priv->workqueue, &priv->beacon_update);
}
/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
#define ACK_CNT_RATIO (50)
#define BA_TIMEOUT_CNT (5)
@ -298,92 +449,72 @@ static bool iwl_good_ack_health(struct iwl_priv *priv, struct iwl_rx_packet *pkt
* When the plcp error is exceeding the thresholds, reset the radio
* to improve the throughput.
*/
static bool iwl_good_plcp_health(struct iwl_priv *priv, struct iwl_rx_packet *pkt)
static bool iwl_good_plcp_health(struct iwl_priv *priv,
struct iwl_rx_packet *pkt, unsigned int msecs)
{
bool rc = true;
int combined_plcp_delta;
unsigned int plcp_msec;
unsigned long plcp_received_jiffies;
int delta;
int threshold = priv->cfg->base_params->plcp_delta_threshold;
if (priv->cfg->base_params->plcp_delta_threshold ==
IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
return rc;
return true;
}
/*
* check for plcp_err and trigger radio reset if it exceeds
* the plcp error threshold plcp_delta.
*/
plcp_received_jiffies = jiffies;
plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
(long) priv->plcp_jiffies);
priv->plcp_jiffies = plcp_received_jiffies;
/*
* check to make sure plcp_msec is not 0 to prevent division
* by zero.
*/
if (plcp_msec) {
struct statistics_rx_phy *ofdm;
struct statistics_rx_ht_phy *ofdm_ht;
if (iwl_bt_statistics(priv)) {
struct statistics_rx_bt *cur, *old;
if (iwl_bt_statistics(priv)) {
ofdm = &pkt->u.stats_bt.rx.ofdm;
ofdm_ht = &pkt->u.stats_bt.rx.ofdm_ht;
combined_plcp_delta =
(le32_to_cpu(ofdm->plcp_err) -
le32_to_cpu(priv->_agn.statistics_bt.
rx.ofdm.plcp_err)) +
(le32_to_cpu(ofdm_ht->plcp_err) -
le32_to_cpu(priv->_agn.statistics_bt.
rx.ofdm_ht.plcp_err));
} else {
ofdm = &pkt->u.stats.rx.ofdm;
ofdm_ht = &pkt->u.stats.rx.ofdm_ht;
combined_plcp_delta =
(le32_to_cpu(ofdm->plcp_err) -
le32_to_cpu(priv->_agn.statistics.
rx.ofdm.plcp_err)) +
(le32_to_cpu(ofdm_ht->plcp_err) -
le32_to_cpu(priv->_agn.statistics.
rx.ofdm_ht.plcp_err));
}
cur = &pkt->u.stats_bt.rx;
old = &priv->_agn.statistics_bt.rx;
if ((combined_plcp_delta > 0) &&
((combined_plcp_delta * 100) / plcp_msec) >
priv->cfg->base_params->plcp_delta_threshold) {
/*
* if plcp_err exceed the threshold,
* the following data is printed in csv format:
* Text: plcp_err exceeded %d,
* Received ofdm.plcp_err,
* Current ofdm.plcp_err,
* Received ofdm_ht.plcp_err,
* Current ofdm_ht.plcp_err,
* combined_plcp_delta,
* plcp_msec
*/
IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
"%u, %u, %u, %u, %d, %u mSecs\n",
priv->cfg->base_params->plcp_delta_threshold,
le32_to_cpu(ofdm->plcp_err),
le32_to_cpu(ofdm->plcp_err),
le32_to_cpu(ofdm_ht->plcp_err),
le32_to_cpu(ofdm_ht->plcp_err),
combined_plcp_delta, plcp_msec);
delta = le32_to_cpu(cur->ofdm.plcp_err) -
le32_to_cpu(old->ofdm.plcp_err) +
le32_to_cpu(cur->ofdm_ht.plcp_err) -
le32_to_cpu(old->ofdm_ht.plcp_err);
} else {
struct statistics_rx *cur, *old;
rc = false;
}
cur = &pkt->u.stats.rx;
old = &priv->_agn.statistics.rx;
delta = le32_to_cpu(cur->ofdm.plcp_err) -
le32_to_cpu(old->ofdm.plcp_err) +
le32_to_cpu(cur->ofdm_ht.plcp_err) -
le32_to_cpu(old->ofdm_ht.plcp_err);
}
return rc;
/* Can be negative if firmware reseted statistics */
if (delta <= 0)
return true;
if ((delta * 100 / msecs) > threshold) {
IWL_DEBUG_RADIO(priv,
"plcp health threshold %u delta %d msecs %u\n",
threshold, delta, msecs);
return false;
}
return true;
}
static void iwl_recover_from_statistics(struct iwl_priv *priv, struct iwl_rx_packet *pkt)
static void iwl_recover_from_statistics(struct iwl_priv *priv,
struct iwl_rx_packet *pkt)
{
const struct iwl_mod_params *mod_params = priv->cfg->mod_params;
unsigned int msecs;
unsigned long stamp;
if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
!iwl_is_any_associated(priv))
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
stamp = jiffies;
msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies);
/* Only gather statistics and update time stamp when not associated */
if (!iwl_is_any_associated(priv))
goto out;
/* Do not check/recover when do not have enough statistics data */
if (msecs < 99)
return;
if (mod_params->ack_check && !iwl_good_ack_health(priv, pkt)) {
@ -392,8 +523,18 @@ static void iwl_recover_from_statistics(struct iwl_priv *priv, struct iwl_rx_pac
return;
}
if (mod_params->plcp_check && !iwl_good_plcp_health(priv, pkt))
if (mod_params->plcp_check && !iwl_good_plcp_health(priv, pkt, msecs))
iwl_force_reset(priv, IWL_RF_RESET, false);
out:
if (iwl_bt_statistics(priv))
memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt,
sizeof(priv->_agn.statistics_bt));
else
memcpy(&priv->_agn.statistics, &pkt->u.stats,
sizeof(priv->_agn.statistics));
priv->rx_statistics_jiffies = stamp;
}
/* Calculate noise level, based on measurements during network silence just
@ -442,7 +583,6 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
last_rx_noise);
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
/*
* based on the assumption of all statistics counter are in DWORD
* FIXME: This function is for debugging, do not deal with
@ -451,6 +591,7 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
static void iwl_accumulative_statistics(struct iwl_priv *priv,
__le32 *stats)
{
#ifdef CONFIG_IWLWIFI_DEBUGFS
int i, size;
__le32 *prev_stats;
u32 *accum_stats;
@ -498,14 +639,13 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
accum_tx->tx_power.ant_a = tx->tx_power.ant_a;
accum_tx->tx_power.ant_b = tx->tx_power.ant_b;
accum_tx->tx_power.ant_c = tx->tx_power.ant_c;
}
#endif
}
#define REG_RECALIB_PERIOD (60)
void iwl_rx_statistics(struct iwl_priv *priv,
static void iwl_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
const int reg_recalib_period = 60;
int change;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@ -522,10 +662,8 @@ void iwl_rx_statistics(struct iwl_priv *priv,
STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
(pkt->u.stats_bt.flag &
STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt);
#endif
iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt);
} else {
IWL_DEBUG_RX(priv,
"Statistics notification received (%d vs %d).\n",
@ -539,29 +677,20 @@ void iwl_rx_statistics(struct iwl_priv *priv,
STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
(pkt->u.stats.flag &
STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
#endif
iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
}
iwl_recover_from_statistics(priv, pkt);
if (iwl_bt_statistics(priv))
memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt,
sizeof(priv->_agn.statistics_bt));
else
memcpy(&priv->_agn.statistics, &pkt->u.stats,
sizeof(priv->_agn.statistics));
set_bit(STATUS_STATISTICS, &priv->status);
/* Reschedule the statistics timer to occur in
* REG_RECALIB_PERIOD seconds to ensure we get a
* reg_recalib_period seconds to ensure we get a
* thermal update even if the uCode doesn't give
* us one */
mod_timer(&priv->statistics_periodic, jiffies +
msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
msecs_to_jiffies(reg_recalib_period * 1000));
if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
(pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
@ -572,8 +701,8 @@ void iwl_rx_statistics(struct iwl_priv *priv,
priv->cfg->ops->lib->temp_ops.temperature(priv);
}
void iwl_reply_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
static void iwl_rx_reply_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@ -597,8 +726,61 @@ void iwl_reply_statistics(struct iwl_priv *priv,
iwl_rx_statistics(priv, rxb);
}
void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
/* Handle notification from uCode that card's power state is changing
* due to software, hardware, or critical temperature RFKILL */
static void iwl_rx_card_state_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
unsigned long status = priv->status;
IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
(flags & HW_CARD_DISABLED) ? "Kill" : "On",
(flags & SW_CARD_DISABLED) ? "Kill" : "On",
(flags & CT_CARD_DISABLED) ?
"Reached" : "Not reached");
if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
CT_CARD_DISABLED)) {
iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
iwl_write_direct32(priv, HBUS_TARG_MBX_C,
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
if (!(flags & RXON_CARD_DISABLED)) {
iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
iwl_write_direct32(priv, HBUS_TARG_MBX_C,
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
}
if (flags & CT_CARD_DISABLED)
iwl_tt_enter_ct_kill(priv);
}
if (!(flags & CT_CARD_DISABLED))
iwl_tt_exit_ct_kill(priv);
if (flags & HW_CARD_DISABLED)
set_bit(STATUS_RF_KILL_HW, &priv->status);
else
clear_bit(STATUS_RF_KILL_HW, &priv->status);
if (!(flags & RXON_CARD_DISABLED))
iwl_scan_cancel(priv);
if ((test_bit(STATUS_RF_KILL_HW, &status) !=
test_bit(STATUS_RF_KILL_HW, &priv->status)))
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
test_bit(STATUS_RF_KILL_HW, &priv->status));
else
wake_up_interruptible(&priv->wait_command_queue);
}
static void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@ -618,13 +800,25 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
}
}
/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
* This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
static void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
priv->_agn.last_phy_res_valid = true;
memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
sizeof(struct iwl_rx_phy_res));
}
/*
* returns non-zero if packet should be dropped
*/
int iwl_set_decrypted_flag(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,
u32 decrypt_res,
struct ieee80211_rx_status *stats)
static int iwl_set_decrypted_flag(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,
u32 decrypt_res,
struct ieee80211_rx_status *stats)
{
u16 fc = le16_to_cpu(hdr->frame_control);
@ -669,3 +863,264 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
}
return 0;
}
static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,
u16 len,
u32 ampdu_status,
struct iwl_rx_mem_buffer *rxb,
struct ieee80211_rx_status *stats)
{
struct sk_buff *skb;
__le16 fc = hdr->frame_control;
/* We only process data packets if the interface is open */
if (unlikely(!priv->is_open)) {
IWL_DEBUG_DROP_LIMIT(priv,
"Dropping packet while interface is not open.\n");
return;
}
/* In case of HW accelerated crypto and bad decryption, drop */
if (!priv->cfg->mod_params->sw_crypto &&
iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
return;
skb = dev_alloc_skb(128);
if (!skb) {
IWL_ERR(priv, "dev_alloc_skb failed\n");
return;
}
skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
iwl_update_stats(priv, false, fc, len);
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
ieee80211_rx(priv->hw, skb);
priv->alloc_rxb_page--;
rxb->page = NULL;
}
static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
{
u32 decrypt_out = 0;
if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
RX_RES_STATUS_STATION_FOUND)
decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
/* packet was not encrypted */
if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
RX_RES_STATUS_SEC_TYPE_NONE)
return decrypt_out;
/* packet was encrypted with unknown alg */
if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
RX_RES_STATUS_SEC_TYPE_ERR)
return decrypt_out;
/* decryption was not done in HW */
if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
RX_MPDU_RES_STATUS_DEC_DONE_MSK)
return decrypt_out;
switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
case RX_RES_STATUS_SEC_TYPE_CCMP:
/* alg is CCM: check MIC only */
if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
/* Bad MIC */
decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
else
decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
break;
case RX_RES_STATUS_SEC_TYPE_TKIP:
if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
/* Bad TTAK */
decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
break;
}
/* fall through if TTAK OK */
default:
if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
else
decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
break;
}
IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
decrypt_in, decrypt_out);
return decrypt_out;
}
/* Called for REPLY_RX (legacy ABG frames), or
* REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
static void iwl_rx_reply_rx(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct ieee80211_hdr *header;
struct ieee80211_rx_status rx_status;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_phy_res *phy_res;
__le32 rx_pkt_status;
struct iwl_rx_mpdu_res_start *amsdu;
u32 len;
u32 ampdu_status;
u32 rate_n_flags;
/**
* REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
* REPLY_RX: physical layer info is in this buffer
* REPLY_RX_MPDU_CMD: physical layer info was sent in separate
* command and cached in priv->last_phy_res
*
* Here we set up local variables depending on which command is
* received.
*/
if (pkt->hdr.cmd == REPLY_RX) {
phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
+ phy_res->cfg_phy_cnt);
len = le16_to_cpu(phy_res->byte_count);
rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
phy_res->cfg_phy_cnt + len);
ampdu_status = le32_to_cpu(rx_pkt_status);
} else {
if (!priv->_agn.last_phy_res_valid) {
IWL_ERR(priv, "MPDU frame without cached PHY data\n");
return;
}
phy_res = &priv->_agn.last_phy_res;
amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
len = le16_to_cpu(amsdu->byte_count);
rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
ampdu_status = iwl_translate_rx_status(priv,
le32_to_cpu(rx_pkt_status));
}
if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
phy_res->cfg_phy_cnt);
return;
}
if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
!(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
le32_to_cpu(rx_pkt_status));
return;
}
/* This will be used in several places later */
rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
/* rx_status carries information about the packet to mac80211 */
rx_status.mactime = le64_to_cpu(phy_res->timestamp);
rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
rx_status.freq =
ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
rx_status.band);
rx_status.rate_idx =
iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
rx_status.flag = 0;
/* TSF isn't reliable. In order to allow smooth user experience,
* this W/A doesn't propagate it to the mac80211 */
/*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
/* Find max signal strength (dBm) among 3 antenna/receiver chains */
rx_status.signal = priv->cfg->ops->utils->calc_rssi(priv, phy_res);
iwl_dbg_log_rx_data_frame(priv, len, header);
IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
rx_status.signal, (unsigned long long)rx_status.mactime);
/*
* "antenna number"
*
* It seems that the antenna field in the phy flags value
* is actually a bit field. This is undefined by radiotap,
* it wants an actual antenna number but I always get "7"
* for most legacy frames I receive indicating that the
* same frame was received on all three RX chains.
*
* I think this field should be removed in favor of a
* new 802.11n radiotap field "RX chains" that is defined
* as a bitmask.
*/
rx_status.antenna =
(le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
>> RX_RES_PHY_FLAGS_ANTENNA_POS;
/* set the preamble flag if appropriate */
if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
rx_status.flag |= RX_FLAG_SHORTPRE;
/* Set up the HT phy flags */
if (rate_n_flags & RATE_MCS_HT_MSK)
rx_status.flag |= RX_FLAG_HT;
if (rate_n_flags & RATE_MCS_HT40_MSK)
rx_status.flag |= RX_FLAG_40MHZ;
if (rate_n_flags & RATE_MCS_SGI_MSK)
rx_status.flag |= RX_FLAG_SHORT_GI;
iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
rxb, &rx_status);
}
/**
* iwl_setup_rx_handlers - Initialize Rx handler callbacks
*
* Setup the RX handlers for each of the reply types sent from the uCode
* to the host.
*/
void iwl_setup_rx_handlers(struct iwl_priv *priv)
{
void (**handlers)(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
handlers = priv->rx_handlers;
handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
handlers[REPLY_ERROR] = iwl_rx_reply_error;
handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
handlers[SPECTRUM_MEASURE_NOTIFICATION] = iwl_rx_spectrum_measure_notif;
handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
handlers[PM_DEBUG_STATISTIC_NOTIFIC] = iwl_rx_pm_debug_statistics_notif;
handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
/*
* The same handler is used for both the REPLY to a discrete
* statistics request from the host as well as for the periodic
* statistics notifications (after received beacons) from the uCode.
*/
handlers[REPLY_STATISTICS_CMD] = iwl_rx_reply_statistics;
handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
iwl_setup_rx_scan_handlers(priv);
handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
handlers[MISSED_BEACONS_NOTIFICATION] = iwl_rx_missed_beacon_notif;
/* Rx handlers */
handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy;
handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx;
/* block ack */
handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
/* Set up hardware specific Rx handlers */
priv->cfg->ops->lib->rx_handler_setup(priv);
}

View File

@ -387,7 +387,7 @@ struct lbs_offset_value {
struct mrvl_ie_domain_param_set {
struct mrvl_ie_header header;
u8 country_code[3];
u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
struct ieee80211_country_ie_triplet triplet[MAX_11D_TRIPLETS];
} __packed;

View File

@ -1056,13 +1056,12 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
}
memset(rxq->rxd, 0, size);
rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL);
rxq->buf = kcalloc(MWL8K_RX_DESCS, sizeof(*rxq->buf), GFP_KERNEL);
if (rxq->buf == NULL) {
wiphy_err(hw->wiphy, "failed to alloc RX skbuff list\n");
pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma);
return -ENOMEM;
}
memset(rxq->buf, 0, MWL8K_RX_DESCS * sizeof(*rxq->buf));
for (i = 0; i < MWL8K_RX_DESCS; i++) {
int desc_size;
@ -1347,13 +1346,12 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
}
memset(txq->txd, 0, size);
txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL);
txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL);
if (txq->skb == NULL) {
wiphy_err(hw->wiphy, "failed to alloc TX skbuff list\n");
pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma);
return -ENOMEM;
}
memset(txq->skb, 0, MWL8K_TX_DESCS * sizeof(*txq->skb));
for (i = 0; i < MWL8K_TX_DESCS; i++) {
struct mwl8k_tx_desc *tx_desc;

View File

@ -43,9 +43,8 @@ config P54_SPI
tristate "Prism54 SPI (stlc45xx) support"
depends on P54_COMMON && SPI_MASTER && GENERIC_HARDIRQS
---help---
This driver is for stlc4550 or stlc4560 based wireless chips.
This driver is experimental, untested and will probably only work on
Nokia's N800/N810 Portable Internet Tablet.
This driver is for stlc4550 or stlc4560 based wireless chips
such as Nokia's N800/N810 Portable Internet Tablet.
If you choose to build a module, it'll be called p54spi.

View File

@ -779,7 +779,7 @@ static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev)
rt2x00pci_register_read(rt2x00dev, TXCSR2, &reg);
rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size);
rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit);
rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->bcn[1].limit);
rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->atim->limit);
rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit);
rt2x00pci_register_write(rt2x00dev, TXCSR2, reg);
@ -795,13 +795,13 @@ static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev)
entry_priv->desc_dma);
rt2x00pci_register_write(rt2x00dev, TXCSR5, reg);
entry_priv = rt2x00dev->bcn[1].entries[0].priv_data;
entry_priv = rt2x00dev->atim->entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg);
rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER,
entry_priv->desc_dma);
rt2x00pci_register_write(rt2x00dev, TXCSR4, reg);
entry_priv = rt2x00dev->bcn[0].entries[0].priv_data;
entry_priv = rt2x00dev->bcn->entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg);
rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER,
entry_priv->desc_dma);
@ -1131,19 +1131,21 @@ static void rt2400pci_write_tx_desc(struct queue_entry *entry,
rt2x00_desc_write(txd, 2, word);
rt2x00_desc_read(txd, 3, &word);
rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->signal);
rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->u.plcp.signal);
rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_REGNUM, 5);
rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_BUSY, 1);
rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->service);
rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->u.plcp.service);
rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_REGNUM, 6);
rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_BUSY, 1);
rt2x00_desc_write(txd, 3, word);
rt2x00_desc_read(txd, 4, &word);
rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW, txdesc->length_low);
rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW,
txdesc->u.plcp.length_low);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_REGNUM, 8);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_BUSY, 1);
rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_HIGH, txdesc->length_high);
rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_HIGH,
txdesc->u.plcp.length_high);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_REGNUM, 7);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_BUSY, 1);
rt2x00_desc_write(txd, 4, word);
@ -1164,7 +1166,7 @@ static void rt2400pci_write_tx_desc(struct queue_entry *entry,
test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_RTS,
test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
rt2x00_desc_write(txd, 0, word);
@ -1276,7 +1278,7 @@ static void rt2400pci_fill_rxdone(struct queue_entry *entry,
static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue_idx)
{
struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
struct data_queue *queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
struct queue_entry_priv_pci *entry_priv;
struct queue_entry *entry;
struct txdone_entry_desc txdesc;
@ -1315,27 +1317,25 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
static void rt2400pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
struct rt2x00_field32 irq_field)
{
unsigned long flags;
u32 reg;
/*
* Enable a single interrupt. The interrupt mask register
* access needs locking.
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
spin_lock_irq(&rt2x00dev->irqmask_lock);
rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
rt2x00_set_field32(&reg, irq_field, 0);
rt2x00pci_register_write(rt2x00dev, CSR8, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
static void rt2400pci_txstatus_tasklet(unsigned long data)
{
struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
u32 reg;
unsigned long flags;
/*
* Handle all tx queues.
@ -1347,7 +1347,7 @@ static void rt2400pci_txstatus_tasklet(unsigned long data)
/*
* Enable all TXDONE interrupts again.
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
spin_lock_irq(&rt2x00dev->irqmask_lock);
rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
@ -1355,7 +1355,7 @@ static void rt2400pci_txstatus_tasklet(unsigned long data)
rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
rt2x00pci_register_write(rt2x00dev, CSR8, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
static void rt2400pci_tbtt_tasklet(unsigned long data)
@ -1376,7 +1376,6 @@ static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
u32 reg, mask;
unsigned long flags;
/*
* Get the interrupt sources & saved to local variable.
@ -1418,13 +1417,13 @@ static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
* Disable all interrupts for which a tasklet was scheduled right now,
* the tasklet will reenable the appropriate interrupts.
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
spin_lock(&rt2x00dev->irqmask_lock);
rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
reg |= mask;
rt2x00pci_register_write(rt2x00dev, CSR8, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
spin_unlock(&rt2x00dev->irqmask_lock);
@ -1641,6 +1640,7 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
*/
__set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags);
/*
* Set the rssi offset.

View File

@ -293,7 +293,7 @@ static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev,
struct rt2x00intf_conf *conf,
const unsigned int flags)
{
struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON);
struct data_queue *queue = rt2x00dev->bcn;
unsigned int bcn_preload;
u32 reg;
@ -865,7 +865,7 @@ static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev)
rt2x00pci_register_read(rt2x00dev, TXCSR2, &reg);
rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size);
rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit);
rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->bcn[1].limit);
rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->atim->limit);
rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit);
rt2x00pci_register_write(rt2x00dev, TXCSR2, reg);
@ -881,13 +881,13 @@ static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev)
entry_priv->desc_dma);
rt2x00pci_register_write(rt2x00dev, TXCSR5, reg);
entry_priv = rt2x00dev->bcn[1].entries[0].priv_data;
entry_priv = rt2x00dev->atim->entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg);
rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER,
entry_priv->desc_dma);
rt2x00pci_register_write(rt2x00dev, TXCSR4, reg);
entry_priv = rt2x00dev->bcn[0].entries[0].priv_data;
entry_priv = rt2x00dev->bcn->entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg);
rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER,
entry_priv->desc_dma);
@ -1287,10 +1287,12 @@ static void rt2500pci_write_tx_desc(struct queue_entry *entry,
rt2x00_desc_write(txd, 2, word);
rt2x00_desc_read(txd, 3, &word);
rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->signal);
rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->service);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW, txdesc->length_low);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH, txdesc->length_high);
rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->u.plcp.signal);
rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->u.plcp.service);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW,
txdesc->u.plcp.length_low);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH,
txdesc->u.plcp.length_high);
rt2x00_desc_write(txd, 3, word);
rt2x00_desc_read(txd, 10, &word);
@ -1315,7 +1317,7 @@ static void rt2500pci_write_tx_desc(struct queue_entry *entry,
rt2x00_set_field32(&word, TXD_W0_OFDM,
(txdesc->rate_mode == RATE_MODE_OFDM));
rt2x00_set_field32(&word, TXD_W0_CIPHER_OWNER, 1);
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
@ -1408,7 +1410,7 @@ static void rt2500pci_fill_rxdone(struct queue_entry *entry,
static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue_idx)
{
struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
struct data_queue *queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
struct queue_entry_priv_pci *entry_priv;
struct queue_entry *entry;
struct txdone_entry_desc txdesc;
@ -1447,27 +1449,25 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
static void rt2500pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
struct rt2x00_field32 irq_field)
{
unsigned long flags;
u32 reg;
/*
* Enable a single interrupt. The interrupt mask register
* access needs locking.
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
spin_lock_irq(&rt2x00dev->irqmask_lock);
rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
rt2x00_set_field32(&reg, irq_field, 0);
rt2x00pci_register_write(rt2x00dev, CSR8, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
static void rt2500pci_txstatus_tasklet(unsigned long data)
{
struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
u32 reg;
unsigned long flags;
/*
* Handle all tx queues.
@ -1479,7 +1479,7 @@ static void rt2500pci_txstatus_tasklet(unsigned long data)
/*
* Enable all TXDONE interrupts again.
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
spin_lock_irq(&rt2x00dev->irqmask_lock);
rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
@ -1487,7 +1487,7 @@ static void rt2500pci_txstatus_tasklet(unsigned long data)
rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
rt2x00pci_register_write(rt2x00dev, CSR8, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
static void rt2500pci_tbtt_tasklet(unsigned long data)
@ -1508,7 +1508,6 @@ static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
u32 reg, mask;
unsigned long flags;
/*
* Get the interrupt sources & saved to local variable.
@ -1550,13 +1549,13 @@ static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
* Disable all interrupts for which a tasklet was scheduled right now,
* the tasklet will reenable the appropriate interrupts.
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
spin_lock(&rt2x00dev->irqmask_lock);
rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
reg |= mask;
rt2x00pci_register_write(rt2x00dev, CSR8, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
spin_unlock(&rt2x00dev->irqmask_lock);
return IRQ_HANDLED;
}
@ -1959,6 +1958,7 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
*/
__set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags);
/*
* Set the rssi offset.

View File

@ -1100,7 +1100,7 @@ static void rt2500usb_write_tx_desc(struct queue_entry *entry,
(txdesc->rate_mode == RATE_MODE_OFDM));
rt2x00_set_field32(&word, TXD_W0_NEW_SEQ,
test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher);
rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx);
@ -1114,10 +1114,12 @@ static void rt2500usb_write_tx_desc(struct queue_entry *entry,
rt2x00_desc_write(txd, 1, word);
rt2x00_desc_read(txd, 2, &word);
rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal);
rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal);
rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW,
txdesc->u.plcp.length_low);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH,
txdesc->u.plcp.length_high);
rt2x00_desc_write(txd, 2, word);
if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
@ -1795,6 +1797,7 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
__set_bit(DRIVER_REQUIRE_COPY_IV, &rt2x00dev->flags);
}
__set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags);
/*
* Set the rssi offset.

View File

@ -66,7 +66,7 @@
#define RF3320 0x000b
#define RF3322 0x000c
#define RF3853 0x000d
#define RF5390 0x5390
#define RF5390 0x5390
/*
* Chipset revisions.
@ -79,7 +79,7 @@
#define REV_RT3071E 0x0211
#define REV_RT3090E 0x0211
#define REV_RT3390E 0x0211
#define REV_RT5390F 0x0502
#define REV_RT5390F 0x0502
/*
* Signal information.
@ -126,9 +126,9 @@
/*
* AUX_CTRL: Aux/PCI-E related configuration
*/
#define AUX_CTRL 0x10c
#define AUX_CTRL_WAKE_PCIE_EN FIELD32(0x00000002)
#define AUX_CTRL_FORCE_PCIE_CLK FIELD32(0x00000400)
#define AUX_CTRL 0x10c
#define AUX_CTRL_WAKE_PCIE_EN FIELD32(0x00000002)
#define AUX_CTRL_FORCE_PCIE_CLK FIELD32(0x00000400)
/*
* OPT_14: Unknown register used by rt3xxx devices.
@ -464,7 +464,7 @@
*/
#define RF_CSR_CFG 0x0500
#define RF_CSR_CFG_DATA FIELD32(0x000000ff)
#define RF_CSR_CFG_REGNUM FIELD32(0x00003f00)
#define RF_CSR_CFG_REGNUM FIELD32(0x00003f00)
#define RF_CSR_CFG_WRITE FIELD32(0x00010000)
#define RF_CSR_CFG_BUSY FIELD32(0x00020000)
@ -1746,13 +1746,13 @@ struct mac_iveiv_entry {
*/
#define BBP4_TX_BF FIELD8(0x01)
#define BBP4_BANDWIDTH FIELD8(0x18)
#define BBP4_MAC_IF_CTRL FIELD8(0x40)
#define BBP4_MAC_IF_CTRL FIELD8(0x40)
/*
* BBP 109
*/
#define BBP109_TX0_POWER FIELD8(0x0f)
#define BBP109_TX1_POWER FIELD8(0xf0)
#define BBP109_TX0_POWER FIELD8(0x0f)
#define BBP109_TX1_POWER FIELD8(0xf0)
/*
* BBP 138: Unknown
@ -1765,7 +1765,7 @@ struct mac_iveiv_entry {
/*
* BBP 152: Rx Ant
*/
#define BBP152_RX_DEFAULT_ANT FIELD8(0x80)
#define BBP152_RX_DEFAULT_ANT FIELD8(0x80)
/*
* RFCSR registers
@ -1776,7 +1776,7 @@ struct mac_iveiv_entry {
* RFCSR 1:
*/
#define RFCSR1_RF_BLOCK_EN FIELD8(0x01)
#define RFCSR1_PLL_PD FIELD8(0x02)
#define RFCSR1_PLL_PD FIELD8(0x02)
#define RFCSR1_RX0_PD FIELD8(0x04)
#define RFCSR1_TX0_PD FIELD8(0x08)
#define RFCSR1_RX1_PD FIELD8(0x10)
@ -1785,7 +1785,7 @@ struct mac_iveiv_entry {
/*
* RFCSR 2:
*/
#define RFCSR2_RESCAL_EN FIELD8(0x80)
#define RFCSR2_RESCAL_EN FIELD8(0x80)
/*
* RFCSR 6:
@ -1801,7 +1801,7 @@ struct mac_iveiv_entry {
/*
* RFCSR 11:
*/
#define RFCSR11_R FIELD8(0x03)
#define RFCSR11_R FIELD8(0x03)
/*
* RFCSR 12:
@ -1857,9 +1857,9 @@ struct mac_iveiv_entry {
/*
* RFCSR 30:
*/
#define RFCSR30_TX_H20M FIELD8(0x02)
#define RFCSR30_RX_H20M FIELD8(0x04)
#define RFCSR30_RX_VCM FIELD8(0x18)
#define RFCSR30_TX_H20M FIELD8(0x02)
#define RFCSR30_RX_H20M FIELD8(0x04)
#define RFCSR30_RX_VCM FIELD8(0x18)
#define RFCSR30_RF_CALIBRATION FIELD8(0x80)
/*
@ -1871,17 +1871,17 @@ struct mac_iveiv_entry {
/*
* RFCSR 38:
*/
#define RFCSR38_RX_LO1_EN FIELD8(0x20)
#define RFCSR38_RX_LO1_EN FIELD8(0x20)
/*
* RFCSR 39:
*/
#define RFCSR39_RX_LO2_EN FIELD8(0x80)
#define RFCSR39_RX_LO2_EN FIELD8(0x80)
/*
* RFCSR 49:
*/
#define RFCSR49_TX FIELD8(0x3f)
#define RFCSR49_TX FIELD8(0x3f)
/*
* RF registers
@ -1918,7 +1918,7 @@ struct mac_iveiv_entry {
/*
* Chip ID
*/
#define EEPROM_CHIP_ID 0x0000
#define EEPROM_CHIP_ID 0x0000
/*
* EEPROM Version

View File

@ -400,15 +400,15 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
if (rt2800_wait_csr_ready(rt2x00dev))
return -EBUSY;
if (rt2x00_is_pci(rt2x00dev)) {
if (rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
rt2800_register_write(rt2x00dev, AUX_CTRL, reg);
}
if (rt2x00_is_pci(rt2x00dev)) {
if (rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
rt2800_register_write(rt2x00dev, AUX_CTRL, reg);
}
rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002);
}
}
/*
* Disable DMA, will be reenabled later when enabling
@ -472,14 +472,15 @@ void rt2800_write_tx_data(struct queue_entry *entry,
test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
rt2x00_set_field32(&word, TXWI_W0_AMPDU,
test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density);
rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->txop);
rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs);
rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY,
txdesc->u.ht.mpdu_density);
rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->u.ht.txop);
rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->u.ht.mcs);
rt2x00_set_field32(&word, TXWI_W0_BW,
test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc);
rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->u.ht.stbc);
rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
rt2x00_desc_write(txwi, 0, word);
@ -488,7 +489,7 @@ void rt2800_write_tx_data(struct queue_entry *entry,
test_bit(ENTRY_TXD_ACK, &txdesc->flags));
rt2x00_set_field32(&word, TXWI_W1_NSEQ,
test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->u.ht.ba_size);
rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
txdesc->key_idx : 0xff);
@ -681,7 +682,7 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status)
* confuse the rate control algortihm by providing clearly wrong
* data.
*/
if (aggr == 1 && ampdu == 0 && real_mcs != mcs) {
if (unlikely(aggr == 1 && ampdu == 0 && real_mcs != mcs)) {
skbdesc->tx_rate_idx = real_mcs;
mcs = real_mcs;
}
@ -751,7 +752,7 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
if (pid >= QID_RX)
continue;
queue = rt2x00queue_get_queue(rt2x00dev, pid);
queue = rt2x00queue_get_tx_queue(rt2x00dev, pid);
if (unlikely(!queue))
continue;
@ -1100,27 +1101,44 @@ int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
}
EXPORT_SYMBOL_GPL(rt2800_config_shared_key);
static inline int rt2800_find_pairwise_keyslot(struct rt2x00_dev *rt2x00dev)
{
int idx;
u32 offset, reg;
/*
* Search for the first free pairwise key entry and return the
* corresponding index.
*
* Make sure the WCID starts _after_ the last possible shared key
* entry (>32).
*
* Since parts of the pairwise key table might be shared with
* the beacon frame buffers 6 & 7 we should only write into the
* first 222 entries.
*/
for (idx = 33; idx <= 222; idx++) {
offset = MAC_WCID_ATTR_ENTRY(idx);
rt2800_register_read(rt2x00dev, offset, &reg);
if (!reg)
return idx;
}
return -1;
}
int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_crypto *crypto,
struct ieee80211_key_conf *key)
{
struct hw_key_entry key_entry;
u32 offset;
int idx;
if (crypto->cmd == SET_KEY) {
/*
* 1 pairwise key is possible per AID, this means that the AID
* equals our hw_key_idx. Make sure the WCID starts _after_ the
* last possible shared key entry.
*
* Since parts of the pairwise key table might be shared with
* the beacon frame buffers 6 & 7 we should only write into the
* first 222 entries.
*/
if (crypto->aid > (222 - 32))
idx = rt2800_find_pairwise_keyslot(rt2x00dev);
if (idx < 0)
return -ENOSPC;
key->hw_key_idx = 32 + crypto->aid;
key->hw_key_idx = idx;
memcpy(key_entry.key, crypto->key,
sizeof(key_entry.key));
@ -1585,92 +1603,98 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
#define RT5390_FREQ_OFFSET_BOUND 0x5f
static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
struct channel_info *info)
struct ieee80211_conf *conf,
struct rf_channel *rf,
struct channel_info *info)
{
u8 rfcsr;
u16 eeprom;
u8 rfcsr;
u16 eeprom;
rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf2);
rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf2);
rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
if (info->default_power1 > RT5390_POWER_BOUND)
rt2x00_set_field8(&rfcsr, RFCSR49_TX, RT5390_POWER_BOUND);
else
rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
if (info->default_power1 > RT5390_POWER_BOUND)
rt2x00_set_field8(&rfcsr, RFCSR49_TX, RT5390_POWER_BOUND);
else
rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
if (rt2x00dev->freq_offset > RT5390_FREQ_OFFSET_BOUND)
rt2x00_set_field8(&rfcsr, RFCSR17_CODE, RT5390_FREQ_OFFSET_BOUND);
else
rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
if (rt2x00dev->freq_offset > RT5390_FREQ_OFFSET_BOUND)
rt2x00_set_field8(&rfcsr, RFCSR17_CODE,
RT5390_FREQ_OFFSET_BOUND);
else
rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
if (rf->channel <= 14) {
int idx = rf->channel-1;
rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
if (rf->channel <= 14) {
int idx = rf->channel-1;
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) {
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
/* r55/r59 value array of channel 1~14 */
static const char r55_bt_rev[] = {0x83, 0x83,
0x83, 0x73, 0x73, 0x63, 0x53, 0x53,
0x53, 0x43, 0x43, 0x43, 0x43, 0x43};
static const char r59_bt_rev[] = {0x0e, 0x0e,
0x0e, 0x0e, 0x0e, 0x0b, 0x0a, 0x09,
0x07, 0x07, 0x07, 0x07, 0x07, 0x07};
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) {
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
/* r55/r59 value array of channel 1~14 */
static const char r55_bt_rev[] = {0x83, 0x83,
0x83, 0x73, 0x73, 0x63, 0x53, 0x53,
0x53, 0x43, 0x43, 0x43, 0x43, 0x43};
static const char r59_bt_rev[] = {0x0e, 0x0e,
0x0e, 0x0e, 0x0e, 0x0b, 0x0a, 0x09,
0x07, 0x07, 0x07, 0x07, 0x07, 0x07};
rt2800_rfcsr_write(rt2x00dev, 55, r55_bt_rev[idx]);
rt2800_rfcsr_write(rt2x00dev, 59, r59_bt_rev[idx]);
} else {
static const char r59_bt[] = {0x8b, 0x8b, 0x8b,
0x8b, 0x8b, 0x8b, 0x8b, 0x8a, 0x89,
0x88, 0x88, 0x86, 0x85, 0x84};
rt2800_rfcsr_write(rt2x00dev, 55,
r55_bt_rev[idx]);
rt2800_rfcsr_write(rt2x00dev, 59,
r59_bt_rev[idx]);
} else {
static const char r59_bt[] = {0x8b, 0x8b, 0x8b,
0x8b, 0x8b, 0x8b, 0x8b, 0x8a, 0x89,
0x88, 0x88, 0x86, 0x85, 0x84};
rt2800_rfcsr_write(rt2x00dev, 59, r59_bt[idx]);
}
} else {
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
static const char r55_nonbt_rev[] = {0x23, 0x23,
0x23, 0x23, 0x13, 0x13, 0x03, 0x03,
0x03, 0x03, 0x03, 0x03, 0x03, 0x03};
static const char r59_nonbt_rev[] = {0x07, 0x07,
0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x07, 0x07, 0x06, 0x05, 0x04, 0x04};
rt2800_rfcsr_write(rt2x00dev, 59, r59_bt[idx]);
}
} else {
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
static const char r55_nonbt_rev[] = {0x23, 0x23,
0x23, 0x23, 0x13, 0x13, 0x03, 0x03,
0x03, 0x03, 0x03, 0x03, 0x03, 0x03};
static const char r59_nonbt_rev[] = {0x07, 0x07,
0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x07, 0x07, 0x06, 0x05, 0x04, 0x04};
rt2800_rfcsr_write(rt2x00dev, 55, r55_nonbt_rev[idx]);
rt2800_rfcsr_write(rt2x00dev, 59, r59_nonbt_rev[idx]);
} else if (rt2x00_rt(rt2x00dev, RT5390)) {
static const char r59_non_bt[] = {0x8f, 0x8f,
0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d,
0x8a, 0x88, 0x88, 0x87, 0x87, 0x86};
rt2800_rfcsr_write(rt2x00dev, 55,
r55_nonbt_rev[idx]);
rt2800_rfcsr_write(rt2x00dev, 59,
r59_nonbt_rev[idx]);
} else if (rt2x00_rt(rt2x00dev, RT5390)) {
static const char r59_non_bt[] = {0x8f, 0x8f,
0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d,
0x8a, 0x88, 0x88, 0x87, 0x87, 0x86};
rt2800_rfcsr_write(rt2x00dev, 59, r59_non_bt[idx]);
}
}
}
rt2800_rfcsr_write(rt2x00dev, 59,
r59_non_bt[idx]);
}
}
}
rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0);
rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0);
rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
}
static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
@ -1697,8 +1721,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2x00_rf(rt2x00dev, RF3052) ||
rt2x00_rf(rt2x00dev, RF3320))
rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
else if (rt2x00_rf(rt2x00dev, RF5390))
rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
else if (rt2x00_rf(rt2x00dev, RF5390))
rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
else
rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
@ -1711,14 +1735,15 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_bbp_write(rt2x00dev, 86, 0);
if (rf->channel <= 14) {
if (!rt2x00_rt(rt2x00dev, RT5390)) {
if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) {
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
} else {
rt2800_bbp_write(rt2x00dev, 82, 0x84);
rt2800_bbp_write(rt2x00dev, 75, 0x50);
}
if (!rt2x00_rt(rt2x00dev, RT5390)) {
if (test_bit(CONFIG_EXTERNAL_LNA_BG,
&rt2x00dev->flags)) {
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
} else {
rt2800_bbp_write(rt2x00dev, 82, 0x84);
rt2800_bbp_write(rt2x00dev, 75, 0x50);
}
}
} else {
rt2800_bbp_write(rt2x00dev, 82, 0xf2);
@ -2097,8 +2122,8 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3390) ||
rt2x00_rt(rt2x00dev, RT5390))
rt2x00_rt(rt2x00dev, RT3390) ||
rt2x00_rt(rt2x00dev, RT5390))
return 0x1c + (2 * rt2x00dev->lna_gain);
else
return 0x2e + rt2x00dev->lna_gain;
@ -2230,10 +2255,10 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000001f);
} else if (rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
} else if (rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
} else {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@ -2450,7 +2475,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_multiwrite(rt2x00dev, MAC_WCID_ENTRY(i),
wcid, sizeof(wcid));
rt2800_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 1);
rt2800_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 0);
rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
}
@ -2609,31 +2634,31 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_wait_bbp_ready(rt2x00dev)))
return -EACCES;
if (rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_bbp_read(rt2x00dev, 4, &value);
rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
rt2800_bbp_write(rt2x00dev, 4, value);
}
if (rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_bbp_read(rt2x00dev, 4, &value);
rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
rt2800_bbp_write(rt2x00dev, 4, value);
}
if (rt2800_is_305x_soc(rt2x00dev) ||
rt2x00_rt(rt2x00dev, RT5390))
if (rt2800_is_305x_soc(rt2x00dev) ||
rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 31, 0x08);
rt2800_bbp_write(rt2x00dev, 65, 0x2c);
rt2800_bbp_write(rt2x00dev, 66, 0x38);
if (rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 68, 0x0b);
if (rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 68, 0x0b);
if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
rt2800_bbp_write(rt2x00dev, 69, 0x16);
rt2800_bbp_write(rt2x00dev, 73, 0x12);
} else if (rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x13);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
rt2800_bbp_write(rt2x00dev, 76, 0x28);
rt2800_bbp_write(rt2x00dev, 77, 0x59);
} else if (rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x13);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
rt2800_bbp_write(rt2x00dev, 76, 0x28);
rt2800_bbp_write(rt2x00dev, 77, 0x59);
} else {
rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x10);
@ -2644,8 +2669,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3390) ||
rt2x00_rt(rt2x00dev, RT5390)) {
rt2x00_rt(rt2x00dev, RT3390) ||
rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_bbp_write(rt2x00dev, 79, 0x13);
rt2800_bbp_write(rt2x00dev, 80, 0x05);
rt2800_bbp_write(rt2x00dev, 81, 0x33);
@ -2657,62 +2682,62 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
}
rt2800_bbp_write(rt2x00dev, 82, 0x62);
if (rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 83, 0x7a);
else
rt2800_bbp_write(rt2x00dev, 83, 0x6a);
if (rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 83, 0x7a);
else
rt2800_bbp_write(rt2x00dev, 83, 0x6a);
if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D))
rt2800_bbp_write(rt2x00dev, 84, 0x19);
else if (rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 84, 0x9a);
else if (rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 84, 0x9a);
else
rt2800_bbp_write(rt2x00dev, 84, 0x99);
if (rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 86, 0x38);
else
rt2800_bbp_write(rt2x00dev, 86, 0x00);
if (rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 86, 0x38);
else
rt2800_bbp_write(rt2x00dev, 86, 0x00);
rt2800_bbp_write(rt2x00dev, 91, 0x04);
if (rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 92, 0x02);
else
rt2800_bbp_write(rt2x00dev, 92, 0x00);
if (rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 92, 0x02);
else
rt2800_bbp_write(rt2x00dev, 92, 0x00);
if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) ||
rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2800_is_305x_soc(rt2x00dev))
rt2800_bbp_write(rt2x00dev, 103, 0xc0);
else
rt2800_bbp_write(rt2x00dev, 103, 0x00);
if (rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 104, 0x92);
if (rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 104, 0x92);
if (rt2800_is_305x_soc(rt2x00dev))
rt2800_bbp_write(rt2x00dev, 105, 0x01);
else if (rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 105, 0x3c);
else if (rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 105, 0x3c);
else
rt2800_bbp_write(rt2x00dev, 105, 0x05);
if (rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 106, 0x03);
else
rt2800_bbp_write(rt2x00dev, 106, 0x35);
if (rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 106, 0x03);
else
rt2800_bbp_write(rt2x00dev, 106, 0x35);
if (rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 128, 0x12);
if (rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 128, 0x12);
if (rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3390) ||
rt2x00_rt(rt2x00dev, RT5390)) {
rt2x00_rt(rt2x00dev, RT3390) ||
rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_bbp_read(rt2x00dev, 138, &value);
rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
@ -2724,41 +2749,42 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 138, value);
}
if (rt2x00_rt(rt2x00dev, RT5390)) {
int ant, div_mode;
if (rt2x00_rt(rt2x00dev, RT5390)) {
int ant, div_mode;
rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY);
ant = (div_mode == 3) ? 1 : 0;
rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
div_mode = rt2x00_get_field16(eeprom,
EEPROM_NIC_CONF1_ANT_DIVERSITY);
ant = (div_mode == 3) ? 1 : 0;
/* check if this is a Bluetooth combo card */
rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) {
u32 reg;
/* check if this is a Bluetooth combo card */
rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) {
u32 reg;
rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0);
rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT6, 0);
rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 0);
rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 0);
if (ant == 0)
rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 1);
else if (ant == 1)
rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 1);
rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
}
rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0);
rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT6, 0);
rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 0);
rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 0);
if (ant == 0)
rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 1);
else if (ant == 1)
rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 1);
rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
}
rt2800_bbp_read(rt2x00dev, 152, &value);
if (ant == 0)
rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
else
rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
rt2800_bbp_write(rt2x00dev, 152, value);
rt2800_bbp_read(rt2x00dev, 152, &value);
if (ant == 0)
rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
else
rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
rt2800_bbp_write(rt2x00dev, 152, value);
/* Init frequency calibration */
rt2800_bbp_write(rt2x00dev, 142, 1);
rt2800_bbp_write(rt2x00dev, 143, 57);
}
/* Init frequency calibration */
rt2800_bbp_write(rt2x00dev, 142, 1);
rt2800_bbp_write(rt2x00dev, 143, 57);
}
for (i = 0; i < EEPROM_BBP_SIZE; i++) {
rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
@ -2848,28 +2874,28 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
!rt2x00_rt(rt2x00dev, RT3071) &&
!rt2x00_rt(rt2x00dev, RT3090) &&
!rt2x00_rt(rt2x00dev, RT3390) &&
!rt2x00_rt(rt2x00dev, RT5390) &&
!rt2x00_rt(rt2x00dev, RT5390) &&
!rt2800_is_305x_soc(rt2x00dev))
return 0;
/*
* Init RF calibration.
*/
if (rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
msleep(1);
rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
} else {
rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
msleep(1);
rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
}
if (rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
msleep(1);
rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
} else {
rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
msleep(1);
rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
}
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3071) ||
@ -2960,87 +2986,87 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
rt2800_rfcsr_write(rt2x00dev, 31, 0x00);
return 0;
} else if (rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
else
rt2800_rfcsr_write(rt2x00dev, 6, 0xa0);
rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
rt2800_rfcsr_write(rt2x00dev, 12, 0xc6);
rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
rt2800_rfcsr_write(rt2x00dev, 16, 0x00);
rt2800_rfcsr_write(rt2x00dev, 18, 0x03);
rt2800_rfcsr_write(rt2x00dev, 19, 0x00);
} else if (rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
else
rt2800_rfcsr_write(rt2x00dev, 6, 0xa0);
rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
rt2800_rfcsr_write(rt2x00dev, 12, 0xc6);
rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
rt2800_rfcsr_write(rt2x00dev, 16, 0x00);
rt2800_rfcsr_write(rt2x00dev, 18, 0x03);
rt2800_rfcsr_write(rt2x00dev, 19, 0x00);
rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
else
rt2800_rfcsr_write(rt2x00dev, 25, 0xc0);
rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
else
rt2800_rfcsr_write(rt2x00dev, 25, 0xc0);
rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
rt2800_rfcsr_write(rt2x00dev, 34, 0x07);
rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
rt2800_rfcsr_write(rt2x00dev, 37, 0x08);
rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
rt2800_rfcsr_write(rt2x00dev, 34, 0x07);
rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
rt2800_rfcsr_write(rt2x00dev, 37, 0x08);
rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
else
rt2800_rfcsr_write(rt2x00dev, 40, 0x4b);
rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
rt2800_rfcsr_write(rt2x00dev, 42, 0xd2);
rt2800_rfcsr_write(rt2x00dev, 43, 0x9a);
rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
else
rt2800_rfcsr_write(rt2x00dev, 46, 0x7b);
rt2800_rfcsr_write(rt2x00dev, 47, 0x00);
rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
rt2800_rfcsr_write(rt2x00dev, 49, 0x94);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
else
rt2800_rfcsr_write(rt2x00dev, 40, 0x4b);
rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
rt2800_rfcsr_write(rt2x00dev, 42, 0xd2);
rt2800_rfcsr_write(rt2x00dev, 43, 0x9a);
rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
else
rt2800_rfcsr_write(rt2x00dev, 46, 0x7b);
rt2800_rfcsr_write(rt2x00dev, 47, 0x00);
rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
rt2800_rfcsr_write(rt2x00dev, 49, 0x94);
rt2800_rfcsr_write(rt2x00dev, 52, 0x38);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
rt2800_rfcsr_write(rt2x00dev, 53, 0x00);
else
rt2800_rfcsr_write(rt2x00dev, 53, 0x84);
rt2800_rfcsr_write(rt2x00dev, 54, 0x78);
rt2800_rfcsr_write(rt2x00dev, 55, 0x44);
rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
rt2800_rfcsr_write(rt2x00dev, 59, 0x63);
rt2800_rfcsr_write(rt2x00dev, 52, 0x38);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
rt2800_rfcsr_write(rt2x00dev, 53, 0x00);
else
rt2800_rfcsr_write(rt2x00dev, 53, 0x84);
rt2800_rfcsr_write(rt2x00dev, 54, 0x78);
rt2800_rfcsr_write(rt2x00dev, 55, 0x44);
rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
rt2800_rfcsr_write(rt2x00dev, 59, 0x63);
rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
rt2800_rfcsr_write(rt2x00dev, 61, 0xd1);
else
rt2800_rfcsr_write(rt2x00dev, 61, 0xdd);
rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
rt2800_rfcsr_write(rt2x00dev, 61, 0xd1);
else
rt2800_rfcsr_write(rt2x00dev, 61, 0xdd);
rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
}
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
@ -3094,23 +3120,23 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x15);
}
if (!rt2x00_rt(rt2x00dev, RT5390)) {
/*
* Set back to initial state
*/
rt2800_bbp_write(rt2x00dev, 24, 0);
if (!rt2x00_rt(rt2x00dev, RT5390)) {
/*
* Set back to initial state
*/
rt2800_bbp_write(rt2x00dev, 24, 0);
rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
/*
* Set BBP back to BW20
*/
rt2800_bbp_read(rt2x00dev, 4, &bbp);
rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
rt2800_bbp_write(rt2x00dev, 4, bbp);
}
/*
* Set BBP back to BW20
*/
rt2800_bbp_read(rt2x00dev, 4, &bbp);
rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
rt2800_bbp_write(rt2x00dev, 4, bbp);
}
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
@ -3122,23 +3148,24 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, OPT_14_CSR_BIT0, 1);
rt2800_register_write(rt2x00dev, OPT_14_CSR, reg);
if (!rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
if (!test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
}
rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1)
rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
rt2x00_get_field16(eeprom,
EEPROM_TXMIXER_GAIN_BG_VAL));
rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
}
if (!rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
if (!test_bit(CONFIG_EXTERNAL_LNA_BG,
&rt2x00dev->flags))
rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
}
rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1)
rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
rt2x00_get_field16(eeprom,
EEPROM_TXMIXER_GAIN_BG_VAL));
rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
}
if (rt2x00_rt(rt2x00dev, RT3090)) {
rt2800_bbp_read(rt2x00dev, 138, &bbp);
@ -3189,19 +3216,19 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 27, rfcsr);
}
if (rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 38, rfcsr);
if (rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 38, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 39, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR39_RX_LO2_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 39, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR39_RX_LO2_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
}
rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
}
return 0;
}
@ -3467,15 +3494,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
/*
* Identify RF chipset by EEPROM value
* RT28xx/RT30xx: defined in "EEPROM_NIC_CONF0_RF_TYPE" field
* RT53xx: defined in "EEPROM_CHIP_ID" field
* Identify RF chipset by EEPROM value
* RT28xx/RT30xx: defined in "EEPROM_NIC_CONF0_RF_TYPE" field
* RT53xx: defined in "EEPROM_CHIP_ID" field
*/
rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
if (rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5390)
rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &value);
else
value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
if (rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5390)
rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &value);
else
value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
@ -3487,8 +3514,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
!rt2x00_rt(rt2x00dev, RT3071) &&
!rt2x00_rt(rt2x00dev, RT3090) &&
!rt2x00_rt(rt2x00dev, RT3390) &&
!rt2x00_rt(rt2x00dev, RT3572) &&
!rt2x00_rt(rt2x00dev, RT5390)) {
!rt2x00_rt(rt2x00dev, RT3572) &&
!rt2x00_rt(rt2x00dev, RT5390)) {
ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
return -ENODEV;
}
@ -3502,8 +3529,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
!rt2x00_rf(rt2x00dev, RF3021) &&
!rt2x00_rf(rt2x00dev, RF3022) &&
!rt2x00_rf(rt2x00dev, RF3052) &&
!rt2x00_rf(rt2x00dev, RF3320) &&
!rt2x00_rf(rt2x00dev, RF5390)) {
!rt2x00_rf(rt2x00dev, RF3320) &&
!rt2x00_rf(rt2x00dev, RF5390)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@ -3800,8 +3827,8 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
rt2x00_rf(rt2x00dev, RF2020) ||
rt2x00_rf(rt2x00dev, RF3021) ||
rt2x00_rf(rt2x00dev, RF3022) ||
rt2x00_rf(rt2x00dev, RF3320) ||
rt2x00_rf(rt2x00dev, RF5390)) {
rt2x00_rf(rt2x00dev, RF3320) ||
rt2x00_rf(rt2x00dev, RF5390)) {
spec->num_channels = 14;
spec->channels = rf_vals_3x;
} else if (rt2x00_rf(rt2x00dev, RF3052)) {
@ -3965,7 +3992,7 @@ int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
if (queue_idx >= 4)
return 0;
queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
/* Update WMM TXOP register */
offset = WMM_TXOP0_CFG + (sizeof(u32) * (!!(queue_idx & 2)));

View File

@ -493,12 +493,12 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
if (rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
rt2800_register_write(rt2x00dev, AUX_CTRL, reg);
}
if (rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
rt2800_register_write(rt2x00dev, AUX_CTRL, reg);
}
rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
@ -726,7 +726,7 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
if (qid >= QID_RX) {
if (unlikely(qid >= QID_RX)) {
/*
* Unknown queue, this shouldn't happen. Just drop
* this tx status.
@ -736,7 +736,7 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
break;
}
queue = rt2x00queue_get_queue(rt2x00dev, qid);
queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
if (unlikely(queue == NULL)) {
/*
* The queue is NULL, this shouldn't happen. Stop
@ -747,7 +747,7 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
break;
}
if (rt2x00queue_empty(queue)) {
if (unlikely(rt2x00queue_empty(queue))) {
/*
* The queue is empty. Stop processing here
* and drop the tx status.
@ -765,18 +765,17 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
static void rt2800pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
struct rt2x00_field32 irq_field)
{
unsigned long flags;
u32 reg;
/*
* Enable a single interrupt. The interrupt mask register
* access needs locking.
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
spin_lock_irq(&rt2x00dev->irqmask_lock);
rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
rt2x00_set_field32(&reg, irq_field, 1);
rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
static void rt2800pci_txstatus_tasklet(unsigned long data)
@ -836,7 +835,7 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
*
* Furthermore we don't disable the TX_FIFO_STATUS
* interrupt here but leave it enabled so that the TX_STA_FIFO
* can also be read while the interrupt thread gets executed.
* can also be read while the tx status tasklet gets executed.
*
* Since we have only one producer and one consumer we don't
* need to lock the kfifo.
@ -862,7 +861,6 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
u32 reg, mask;
unsigned long flags;
/* Read status and ACK all interrupts */
rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
@ -905,11 +903,11 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
* Disable all interrupts for which a tasklet was scheduled right now,
* the tasklet will reenable the appropriate interrupts.
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
spin_lock(&rt2x00dev->irqmask_lock);
rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
reg &= mask;
rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
spin_unlock(&rt2x00dev->irqmask_lock);
return IRQ_HANDLED;
}
@ -979,6 +977,7 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
if (!modparam_nohwcrypt)
__set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
__set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags);
/*
* Set the rssi offset.
@ -1135,7 +1134,7 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x3593), PCI_DEVICE_DATA(&rt2800pci_ops) },
#endif
#ifdef CONFIG_RT2800PCI_RT53XX
{ PCI_DEVICE(0x1814, 0x5390), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x5390), PCI_DEVICE_DATA(&rt2800pci_ops) },
#endif
{ 0, }
};

View File

@ -565,6 +565,7 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
__set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
__set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags);
__set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags);
/*
* Set the rssi offset.

View File

@ -467,7 +467,6 @@ struct rt2x00lib_crypto {
const u8 *address;
u32 bssidx;
u32 aid;
u8 key[16];
u8 tx_mic[8];
@ -662,6 +661,8 @@ enum rt2x00_flags {
DRIVER_REQUIRE_L2PAD,
DRIVER_REQUIRE_TXSTATUS_FIFO,
DRIVER_REQUIRE_TASKLET_CONTEXT,
DRIVER_REQUIRE_SW_SEQNO,
DRIVER_REQUIRE_HT_TX_DESC,
/*
* Driver features
@ -886,14 +887,13 @@ struct rt2x00_dev {
struct work_struct txdone_work;
/*
* Data queue arrays for RX, TX and Beacon.
* The Beacon array also contains the Atim queue
* if that is supported by the device.
* Data queue arrays for RX, TX, Beacon and ATIM.
*/
unsigned int data_queues;
struct data_queue *rx;
struct data_queue *tx;
struct data_queue *bcn;
struct data_queue *atim;
/*
* Firmware image.
@ -1063,12 +1063,24 @@ void rt2x00queue_map_txskb(struct queue_entry *entry);
void rt2x00queue_unmap_skb(struct queue_entry *entry);
/**
* rt2x00queue_get_queue - Convert queue index to queue pointer
* rt2x00queue_get_tx_queue - Convert tx queue index to queue pointer
* @rt2x00dev: Pointer to &struct rt2x00_dev.
* @queue: rt2x00 queue index (see &enum data_queue_qid).
*
* Returns NULL for non tx queues.
*/
struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue);
static inline struct data_queue *
rt2x00queue_get_tx_queue(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue)
{
if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
return &rt2x00dev->tx[queue];
if (queue == QID_ATIM)
return rt2x00dev->atim;
return NULL;
}
/**
* rt2x00queue_get_entry - Get queue entry where the given index points to.

View File

@ -38,12 +38,12 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
if (tx_info->control.sta)
txdesc->mpdu_density =
txdesc->u.ht.mpdu_density =
tx_info->control.sta->ht_cap.ampdu_density;
txdesc->ba_size = 7; /* FIXME: What value is needed? */
txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
txdesc->stbc =
txdesc->u.ht.stbc =
(tx_info->flags & IEEE80211_TX_CTL_STBC) >> IEEE80211_TX_CTL_STBC_SHIFT;
/*
@ -51,22 +51,22 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
* mcs rate to be used
*/
if (txrate->flags & IEEE80211_TX_RC_MCS) {
txdesc->mcs = txrate->idx;
txdesc->u.ht.mcs = txrate->idx;
/*
* MIMO PS should be set to 1 for STA's using dynamic SM PS
* when using more then one tx stream (>MCS7).
*/
if (tx_info->control.sta && txdesc->mcs > 7 &&
if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
((tx_info->control.sta->ht_cap.cap &
IEEE80211_HT_CAP_SM_PS) >>
IEEE80211_HT_CAP_SM_PS_SHIFT) ==
WLAN_HT_CAP_SM_PS_DYNAMIC)
__set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
} else {
txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs);
txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
txdesc->mcs |= 0x08;
txdesc->u.ht.mcs |= 0x08;
}
/*
@ -77,14 +77,6 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
!(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
__set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
/*
* Determine HT Mix/Greenfield rate mode
*/
if (txrate->flags & IEEE80211_TX_RC_MCS)
txdesc->rate_mode = RATE_MODE_HT_MIX;
if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
/*
* Set 40Mhz mode if necessary (for legacy rates this will
* duplicate the frame to both channels).
@ -105,11 +97,11 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
* for frames not transmitted with TXOP_HTTXOP
*/
if (ieee80211_is_mgmt(hdr->frame_control))
txdesc->txop = TXOP_BACKOFF;
txdesc->u.ht.txop = TXOP_BACKOFF;
else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
txdesc->txop = TXOP_SIFS;
txdesc->u.ht.txop = TXOP_SIFS;
else
txdesc->txop = TXOP_HTTXOP;
txdesc->u.ht.txop = TXOP_HTTXOP;
}
u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev,

View File

@ -116,13 +116,13 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
goto exit_fail;
/*
* Determine which queue to put packet on.
* Use the ATIM queue if appropriate and present.
*/
if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags))
queue = rt2x00queue_get_queue(rt2x00dev, QID_ATIM);
else
queue = rt2x00queue_get_queue(rt2x00dev, qid);
qid = QID_ATIM;
queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
if (unlikely(!queue)) {
ERROR(rt2x00dev,
"Attempt to send packet over invalid queue %d.\n"
@ -149,7 +149,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
goto exit_fail;
}
if (rt2x00queue_write_tx_frame(queue, skb, false))
if (unlikely(rt2x00queue_write_tx_frame(queue, skb, false)))
goto exit_fail;
if (rt2x00queue_threshold(queue))
@ -190,7 +190,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct rt2x00_intf *intf = vif_to_intf(vif);
struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON);
struct data_queue *queue = rt2x00dev->bcn;
struct queue_entry *entry = NULL;
unsigned int i;
@ -518,11 +518,9 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
crypto.cmd = cmd;
if (sta) {
/* some drivers need the AID */
crypto.aid = sta->aid;
if (sta)
crypto.address = sta->addr;
} else
else
crypto.address = bcast_addr;
if (crypto.cipher == CIPHER_TKIP)
@ -692,7 +690,7 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
struct rt2x00_dev *rt2x00dev = hw->priv;
struct data_queue *queue;
queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
if (unlikely(!queue))
return -EINVAL;

View File

@ -221,14 +221,17 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
unsigned long irqflags;
if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) ||
unlikely(!tx_info->control.vif))
if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
return;
__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
if (!test_bit(DRIVER_REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->flags))
return;
/*
* Hardware should insert sequence counter.
* FIXME: We insert a software sequence counter first for
* hardware that doesn't support hardware sequence counting.
* The hardware is not able to insert a sequence number. Assign a
* software generated one here.
*
* This is wrong because beacons are not getting sequence
* numbers assigned properly.
@ -246,7 +249,6 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
spin_unlock_irqrestore(&intf->seqlock, irqflags);
__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
}
static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
@ -260,6 +262,16 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
unsigned int duration;
unsigned int residual;
/*
* Determine with what IFS priority this frame should be send.
* Set ifs to IFS_SIFS when the this is not the first fragment,
* or this fragment came after RTS/CTS.
*/
if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
txdesc->u.plcp.ifs = IFS_BACKOFF;
else
txdesc->u.plcp.ifs = IFS_SIFS;
/* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
data_length = entry->skb->len + 4;
data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb);
@ -268,12 +280,12 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
* PLCP setup
* Length calculation depends on OFDM/CCK rate.
*/
txdesc->signal = hwrate->plcp;
txdesc->service = 0x04;
txdesc->u.plcp.signal = hwrate->plcp;
txdesc->u.plcp.service = 0x04;
if (hwrate->flags & DEV_RATE_OFDM) {
txdesc->length_high = (data_length >> 6) & 0x3f;
txdesc->length_low = data_length & 0x3f;
txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
txdesc->u.plcp.length_low = data_length & 0x3f;
} else {
/*
* Convert length to microseconds.
@ -288,18 +300,18 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
* Check if we need to set the Length Extension
*/
if (hwrate->bitrate == 110 && residual <= 30)
txdesc->service |= 0x80;
txdesc->u.plcp.service |= 0x80;
}
txdesc->length_high = (duration >> 8) & 0xff;
txdesc->length_low = duration & 0xff;
txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
txdesc->u.plcp.length_low = duration & 0xff;
/*
* When preamble is enabled we should set the
* preamble bit for the signal.
*/
if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
txdesc->signal |= 0x08;
txdesc->u.plcp.signal |= 0x08;
}
}
@ -309,9 +321,9 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
struct ieee80211_rate *rate =
ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
const struct rt2x00_rate *hwrate;
struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
struct ieee80211_rate *rate;
const struct rt2x00_rate *hwrate = NULL;
memset(txdesc, 0, sizeof(*txdesc));
@ -371,33 +383,36 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
ieee80211_is_probe_resp(hdr->frame_control))
__set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
/*
* Determine with what IFS priority this frame should be send.
* Set ifs to IFS_SIFS when the this is not the first fragment,
* or this fragment came after RTS/CTS.
*/
if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
!test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
!test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
__set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
txdesc->ifs = IFS_BACKOFF;
} else
txdesc->ifs = IFS_SIFS;
/*
* Determine rate modulation.
*/
hwrate = rt2x00_get_rate(rate->hw_value);
txdesc->rate_mode = RATE_MODE_CCK;
if (hwrate->flags & DEV_RATE_OFDM)
txdesc->rate_mode = RATE_MODE_OFDM;
if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
else if (txrate->flags & IEEE80211_TX_RC_MCS)
txdesc->rate_mode = RATE_MODE_HT_MIX;
else {
rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
hwrate = rt2x00_get_rate(rate->hw_value);
if (hwrate->flags & DEV_RATE_OFDM)
txdesc->rate_mode = RATE_MODE_OFDM;
else
txdesc->rate_mode = RATE_MODE_CCK;
}
/*
* Apply TX descriptor handling by components
*/
rt2x00crypto_create_tx_descriptor(entry, txdesc);
rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
if (test_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags))
rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
else
rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
}
static int rt2x00queue_write_tx_data(struct queue_entry *entry,
@ -690,29 +705,6 @@ void rt2x00queue_for_each_entry(struct data_queue *queue,
}
EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue)
{
int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
if (queue == QID_RX)
return rt2x00dev->rx;
if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
return &rt2x00dev->tx[queue];
if (!rt2x00dev->bcn)
return NULL;
if (queue == QID_BEACON)
return &rt2x00dev->bcn[0];
else if (queue == QID_ATIM && atim)
return &rt2x00dev->bcn[1];
return NULL;
}
EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);
struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
enum queue_index index)
{
@ -1088,7 +1080,7 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
goto exit;
if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
status = rt2x00queue_alloc_entries(rt2x00dev->atim,
rt2x00dev->ops->atim);
if (status)
goto exit;
@ -1162,6 +1154,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
rt2x00dev->rx = queue;
rt2x00dev->tx = &queue[1];
rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
/*
* Initialize queue parameters.
@ -1178,9 +1171,9 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
tx_queue_for_each(rt2x00dev, queue)
rt2x00queue_init(rt2x00dev, queue, qid++);
rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON);
rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
if (req_atim)
rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM);
rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
return 0;
}

View File

@ -305,20 +305,27 @@ struct txentry_desc {
u16 length;
u16 header_length;
u16 length_high;
u16 length_low;
u16 signal;
u16 service;
union {
struct {
u16 length_high;
u16 length_low;
u16 signal;
u16 service;
enum ifs ifs;
} plcp;
u16 mcs;
u16 stbc;
u16 ba_size;
u16 rate_mode;
u16 mpdu_density;
struct {
u16 mcs;
u8 stbc;
u8 ba_size;
u8 mpdu_density;
enum txop txop;
} ht;
} u;
enum rate_modulation rate_mode;
short retry_limit;
short ifs;
short txop;
enum cipher cipher;
u16 key_idx;

View File

@ -1898,10 +1898,12 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
rt2x00_desc_write(txd, 1, word);
rt2x00_desc_read(txd, 2, &word);
rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal);
rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal);
rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW,
txdesc->u.plcp.length_low);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH,
txdesc->u.plcp.length_high);
rt2x00_desc_write(txd, 2, word);
if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
@ -1946,7 +1948,7 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_OFDM,
(txdesc->rate_mode == RATE_MODE_OFDM));
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
@ -2190,7 +2192,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
* queue identication number.
*/
type = rt2x00_get_field32(reg, STA_CSR4_PID_TYPE);
queue = rt2x00queue_get_queue(rt2x00dev, type);
queue = rt2x00queue_get_tx_queue(rt2x00dev, type);
if (unlikely(!queue))
continue;
@ -2261,39 +2263,37 @@ static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
static void rt61pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
struct rt2x00_field32 irq_field)
{
unsigned long flags;
u32 reg;
/*
* Enable a single interrupt. The interrupt mask register
* access needs locking.
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
spin_lock_irq(&rt2x00dev->irqmask_lock);
rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
rt2x00_set_field32(&reg, irq_field, 0);
rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
static void rt61pci_enable_mcu_interrupt(struct rt2x00_dev *rt2x00dev,
struct rt2x00_field32 irq_field)
{
unsigned long flags;
u32 reg;
/*
* Enable a single MCU interrupt. The interrupt mask register
* access needs locking.
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
spin_lock_irq(&rt2x00dev->irqmask_lock);
rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg);
rt2x00_set_field32(&reg, irq_field, 0);
rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
static void rt61pci_txstatus_tasklet(unsigned long data)
@ -2331,7 +2331,6 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
struct rt2x00_dev *rt2x00dev = dev_instance;
u32 reg_mcu, mask_mcu;
u32 reg, mask;
unsigned long flags;
/*
* Get the interrupt sources & saved to local variable.
@ -2376,7 +2375,7 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
* Disable all interrupts for which a tasklet was scheduled right now,
* the tasklet will reenable the appropriate interrupts.
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
spin_lock(&rt2x00dev->irqmask_lock);
rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
reg |= mask;
@ -2386,7 +2385,7 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
reg |= mask_mcu;
rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
spin_unlock(&rt2x00dev->irqmask_lock);
return IRQ_HANDLED;
}
@ -2917,7 +2916,7 @@ static int rt61pci_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
if (queue_idx >= 4)
return 0;
queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
/* Update WMM TXOP register */
offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2)));

View File

@ -1474,7 +1474,7 @@ static void rt73usb_write_tx_desc(struct queue_entry *entry,
test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_OFDM,
(txdesc->rate_mode == RATE_MODE_OFDM));
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
@ -1499,10 +1499,12 @@ static void rt73usb_write_tx_desc(struct queue_entry *entry,
rt2x00_desc_write(txd, 1, word);
rt2x00_desc_read(txd, 2, &word);
rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal);
rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal);
rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW,
txdesc->u.plcp.length_low);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH,
txdesc->u.plcp.length_high);
rt2x00_desc_write(txd, 2, word);
if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
@ -2247,7 +2249,7 @@ static int rt73usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
if (queue_idx >= 4)
return 0;
queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
/* Update WMM TXOP register */
offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2)));

View File

@ -869,23 +869,35 @@ static void rtl8187_work(struct work_struct *work)
/* The RTL8187 returns the retry count through register 0xFFFA. In
* addition, it appears to be a cumulative retry count, not the
* value for the current TX packet. When multiple TX entries are
* queued, the retry count will be valid for the last one in the queue.
* The "error" should not matter for purposes of rate setting. */
* waiting in the queue, the retry count will be the total for all.
* The "error" may matter for purposes of rate setting, but there is
* no other choice with this hardware.
*/
struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv,
work.work);
struct ieee80211_tx_info *info;
struct ieee80211_hw *dev = priv->dev;
static u16 retry;
u16 tmp;
u16 avg_retry;
int length;
mutex_lock(&priv->conf_mutex);
tmp = rtl818x_ioread16(priv, (__le16 *)0xFFFA);
length = skb_queue_len(&priv->b_tx_status.queue);
if (unlikely(!length))
length = 1;
if (unlikely(tmp < retry))
tmp = retry;
avg_retry = (tmp - retry) / length;
while (skb_queue_len(&priv->b_tx_status.queue) > 0) {
struct sk_buff *old_skb;
old_skb = skb_dequeue(&priv->b_tx_status.queue);
info = IEEE80211_SKB_CB(old_skb);
info->status.rates[0].count = tmp - retry + 1;
info->status.rates[0].count = avg_retry + 1;
if (info->status.rates[0].count > RETRY_COUNT)
info->flags &= ~IEEE80211_TX_STAT_ACK;
ieee80211_tx_status_irqsafe(dev, old_skb);
}
retry = tmp;
@ -931,8 +943,8 @@ static int rtl8187_start(struct ieee80211_hw *dev)
rtl818x_iowrite32(priv, &priv->map->TX_CONF,
RTL818X_TX_CONF_HW_SEQNUM |
RTL818X_TX_CONF_DISREQQSIZE |
(7 << 8 /* short retry limit */) |
(7 << 0 /* long retry limit */) |
(RETRY_COUNT << 8 /* short retry limit */) |
(RETRY_COUNT << 0 /* long retry limit */) |
(7 << 21 /* MAX TX DMA */));
rtl8187_init_urbs(dev);
rtl8187b_init_status_urb(dev);
@ -1376,6 +1388,9 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_RX_INCLUDES_FCS;
/* Initialize rate-control variables */
dev->max_rates = 1;
dev->max_rate_tries = RETRY_COUNT;
eeprom.data = dev;
eeprom.register_read = rtl8187_eeprom_register_read;

View File

@ -35,6 +35,8 @@
#define RFKILL_MASK_8187_89_97 0x2
#define RFKILL_MASK_8198 0x4
#define RETRY_COUNT 7
struct rtl8187_rx_info {
struct urb *urb;
struct ieee80211_hw *dev;

View File

@ -7,15 +7,18 @@ rtlwifi-objs := \
efuse.o \
ps.o \
rc.o \
regd.o \
usb.o
regd.o
rtl8192c_common-objs += \
ifeq ($(CONFIG_PCI),y)
ifneq ($(CONFIG_PCI),)
rtlwifi-objs += pci.o
endif
ifneq ($(CONFIG_USB),)
rtlwifi-objs += usb.o
endif
obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/
obj-$(CONFIG_RTL8192CE) += rtl8192ce/
obj-$(CONFIG_RTL8192CU) += rtl8192cu/

View File

@ -54,7 +54,6 @@
/* This really should be 8, but not for our firmware */
#define MAX_SUPPORTED_RATES 32
#define COUNTRY_STRING_LEN 3
#define MAX_COUNTRY_TRIPLETS 32
/* Headers */
@ -98,7 +97,7 @@ struct country_triplet {
struct wl12xx_ie_country {
struct wl12xx_ie_header header;
u8 country_string[COUNTRY_STRING_LEN];
u8 country_string[IEEE80211_COUNTRY_STRING_LEN];
struct country_triplet triplets[MAX_COUNTRY_TRIPLETS];
} __packed;

View File

@ -1361,7 +1361,8 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl,
acx->ht_protection =
(u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
acx->rifs_mode = 0;
acx->gf_protection = 0;
acx->gf_protection =
!!(ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
acx->ht_tx_burst_limit = 0;
acx->dual_cts_protection = 0;

View File

@ -488,6 +488,9 @@ static void wl1271_boot_hw_version(struct wl1271 *wl)
fuse = (fuse & PG_VER_MASK) >> PG_VER_OFFSET;
wl->hw_pg_ver = (s8)fuse;
if (((wl->hw_pg_ver & PG_MAJOR_VER_MASK) >> PG_MAJOR_VER_OFFSET) < 3)
wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION;
}
/* uploads NVS and firmware */

View File

@ -59,6 +59,11 @@ struct wl1271_static_data {
#define PG_VER_MASK 0x3c
#define PG_VER_OFFSET 2
#define PG_MAJOR_VER_MASK 0x3
#define PG_MAJOR_VER_OFFSET 0x0
#define PG_MINOR_VER_MASK 0xc
#define PG_MINOR_VER_OFFSET 0x2
#define CMD_MBOX_ADDRESS 0x407B4
#define POLARITY_LOW BIT(1)

View File

@ -63,6 +63,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
cmd->status = 0;
WARN_ON(len % 4 != 0);
WARN_ON(test_bit(WL1271_FLAG_IN_ELP, &wl->flags));
wl1271_write(wl, wl->cmd_box_addr, buf, len, false);

View File

@ -99,7 +99,7 @@ static void wl1271_debugfs_update_stats(struct wl1271 *wl)
mutex_lock(&wl->mutex);
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;

View File

@ -168,5 +168,6 @@ void wl1271_unregister_hw(struct wl1271 *wl);
int wl1271_init_ieee80211(struct wl1271 *wl);
struct ieee80211_hw *wl1271_alloc_hw(void);
int wl1271_free_hw(struct wl1271 *wl);
irqreturn_t wl1271_irq(int irq, void *data);
#endif

View File

@ -304,7 +304,7 @@ static struct conf_drv_settings default_conf = {
.rx_block_num = 70,
.tx_min_block_num = 40,
.dynamic_memory = 0,
.min_req_tx_blocks = 104,
.min_req_tx_blocks = 100,
.min_req_rx_blocks = 22,
.tx_min = 27,
}
@ -374,7 +374,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
goto out;
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@ -635,16 +635,44 @@ static void wl1271_fw_status(struct wl1271 *wl,
(s64)le32_to_cpu(status->fw_localtime);
}
#define WL1271_IRQ_MAX_LOOPS 10
static void wl1271_flush_deferred_work(struct wl1271 *wl)
{
struct sk_buff *skb;
static void wl1271_irq_work(struct work_struct *work)
/* Pass all received frames to the network stack */
while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
ieee80211_rx_ni(wl->hw, skb);
/* Return sent skbs to the network stack */
while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
ieee80211_tx_status(wl->hw, skb);
}
static void wl1271_netstack_work(struct work_struct *work)
{
struct wl1271 *wl =
container_of(work, struct wl1271, netstack_work);
do {
wl1271_flush_deferred_work(wl);
} while (skb_queue_len(&wl->deferred_rx_queue));
}
#define WL1271_IRQ_MAX_LOOPS 256
irqreturn_t wl1271_irq(int irq, void *cookie)
{
int ret;
u32 intr;
int loopcount = WL1271_IRQ_MAX_LOOPS;
struct wl1271 *wl = (struct wl1271 *)cookie;
bool done = false;
unsigned int defer_count;
unsigned long flags;
struct wl1271 *wl =
container_of(work, struct wl1271, irq_work);
/* TX might be handled here, avoid redundant work */
set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
cancel_work_sync(&wl->tx_work);
mutex_lock(&wl->mutex);
@ -653,26 +681,27 @@ static void wl1271_irq_work(struct work_struct *work)
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
ret = wl1271_ps_elp_wakeup(wl, true);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
spin_lock_irqsave(&wl->wl_lock, flags);
while (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags) && loopcount) {
clear_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
spin_unlock_irqrestore(&wl->wl_lock, flags);
loopcount--;
while (!done && loopcount--) {
/*
* In order to avoid a race with the hardirq, clear the flag
* before acknowledging the chip. Since the mutex is held,
* wl1271_ps_elp_wakeup cannot be called concurrently.
*/
clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
smp_mb__after_clear_bit();
wl1271_fw_status(wl, wl->fw_status);
intr = le32_to_cpu(wl->fw_status->common.intr);
intr &= WL1271_INTR_MASK;
if (!intr) {
wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
spin_lock_irqsave(&wl->wl_lock, flags);
done = true;
continue;
}
intr &= WL1271_INTR_MASK;
if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
wl1271_error("watchdog interrupt received! "
"starting recovery.");
@ -682,25 +711,35 @@ static void wl1271_irq_work(struct work_struct *work)
goto out;
}
if (intr & WL1271_ACX_INTR_DATA) {
if (likely(intr & WL1271_ACX_INTR_DATA)) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
wl1271_rx(wl, &wl->fw_status->common);
/* Check if any tx blocks were freed */
spin_lock_irqsave(&wl->wl_lock, flags);
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
wl->tx_queue_count) {
spin_unlock_irqrestore(&wl->wl_lock, flags);
/*
* In order to avoid starvation of the TX path,
* call the work function directly.
*/
wl1271_tx_work_locked(wl);
} else {
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
/* check for tx results */
if (wl->fw_status->common.tx_results_counter !=
(wl->tx_results_count & 0xff))
wl1271_tx_complete(wl);
/* Check if any tx blocks were freed */
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
wl->tx_queue_count) {
/*
* In order to avoid starvation of the TX path,
* call the work function directly.
*/
wl1271_tx_work_locked(wl);
}
wl1271_rx(wl, &wl->fw_status->common);
/* Make sure the deferred queues don't get too long */
defer_count = skb_queue_len(&wl->deferred_tx_queue) +
skb_queue_len(&wl->deferred_rx_queue);
if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
wl1271_flush_deferred_work(wl);
}
if (intr & WL1271_ACX_INTR_EVENT_A) {
@ -719,21 +758,24 @@ static void wl1271_irq_work(struct work_struct *work)
if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
spin_lock_irqsave(&wl->wl_lock, flags);
}
if (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags))
ieee80211_queue_work(wl->hw, &wl->irq_work);
else
clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
spin_unlock_irqrestore(&wl->wl_lock, flags);
wl1271_ps_elp_sleep(wl);
out:
spin_lock_irqsave(&wl->wl_lock, flags);
/* In case TX was not handled here, queue TX work */
clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
wl->tx_queue_count)
ieee80211_queue_work(wl->hw, &wl->tx_work);
spin_unlock_irqrestore(&wl->wl_lock, flags);
mutex_unlock(&wl->mutex);
return IRQ_HANDLED;
}
EXPORT_SYMBOL_GPL(wl1271_irq);
static int wl1271_fetch_firmware(struct wl1271 *wl)
{
@ -974,7 +1016,6 @@ int wl1271_plt_start(struct wl1271 *wl)
goto out;
irq_disable:
wl1271_disable_interrupts(wl);
mutex_unlock(&wl->mutex);
/* Unlocking the mutex in the middle of handling is
inherently unsafe. In this case we deem it safe to do,
@ -983,7 +1024,9 @@ irq_disable:
work function will not do anything.) Also, any other
possible concurrent operations will fail due to the
current state, hence the wl1271 struct should be safe. */
cancel_work_sync(&wl->irq_work);
wl1271_disable_interrupts(wl);
wl1271_flush_deferred_work(wl);
cancel_work_sync(&wl->netstack_work);
mutex_lock(&wl->mutex);
power_off:
wl1271_power_off(wl);
@ -1010,14 +1053,15 @@ int __wl1271_plt_stop(struct wl1271 *wl)
goto out;
}
wl1271_disable_interrupts(wl);
wl1271_power_off(wl);
wl->state = WL1271_STATE_OFF;
wl->rx_counter = 0;
mutex_unlock(&wl->mutex);
cancel_work_sync(&wl->irq_work);
wl1271_disable_interrupts(wl);
wl1271_flush_deferred_work(wl);
cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->recovery_work);
mutex_lock(&wl->mutex);
out:
@ -1041,7 +1085,13 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
int q;
u8 hlid = 0;
q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
if (wl->bss_type == BSS_TYPE_AP_BSS)
hlid = wl1271_tx_get_hlid(skb);
spin_lock_irqsave(&wl->wl_lock, flags);
wl->tx_queue_count++;
/*
@ -1054,12 +1104,8 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
}
spin_unlock_irqrestore(&wl->wl_lock, flags);
/* queue the packet */
q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
if (wl->bss_type == BSS_TYPE_AP_BSS) {
hlid = wl1271_tx_get_hlid(skb);
wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
} else {
@ -1071,8 +1117,11 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* before that, the tx_work will not be initialized!
*/
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
!test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
ieee80211_queue_work(wl->hw, &wl->tx_work);
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
static struct notifier_block wl1271_dev_notifier = {
@ -1169,7 +1218,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
break;
irq_disable:
wl1271_disable_interrupts(wl);
mutex_unlock(&wl->mutex);
/* Unlocking the mutex in the middle of handling is
inherently unsafe. In this case we deem it safe to do,
@ -1178,7 +1226,9 @@ irq_disable:
work function will not do anything.) Also, any other
possible concurrent operations will fail due to the
current state, hence the wl1271 struct should be safe. */
cancel_work_sync(&wl->irq_work);
wl1271_disable_interrupts(wl);
wl1271_flush_deferred_work(wl);
cancel_work_sync(&wl->netstack_work);
mutex_lock(&wl->mutex);
power_off:
wl1271_power_off(wl);
@ -1244,12 +1294,12 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
wl->state = WL1271_STATE_OFF;
wl1271_disable_interrupts(wl);
mutex_unlock(&wl->mutex);
wl1271_disable_interrupts(wl);
wl1271_flush_deferred_work(wl);
cancel_delayed_work_sync(&wl->scan_complete_work);
cancel_work_sync(&wl->irq_work);
cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->tx_work);
cancel_delayed_work_sync(&wl->pspoll_work);
cancel_delayed_work_sync(&wl->elp_work);
@ -1525,7 +1575,7 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@ -1681,7 +1731,7 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@ -1910,7 +1960,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
goto out_unlock;
}
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_unlock;
@ -2013,7 +2063,7 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
goto out;
}
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@ -2039,7 +2089,7 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
goto out;
}
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@ -2067,7 +2117,7 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
goto out;
}
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@ -2546,7 +2596,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@ -2601,7 +2651,7 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
conf_tid->apsd_conf[0] = 0;
conf_tid->apsd_conf[1] = 0;
} else {
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@ -2647,7 +2697,7 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw)
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@ -2736,7 +2786,7 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_free_sta;
@ -2779,7 +2829,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
goto out;
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@ -2812,7 +2862,7 @@ int wl1271_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto out;
}
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@ -3176,7 +3226,7 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
if (wl->state == WL1271_STATE_OFF)
goto out;
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@ -3376,9 +3426,12 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
for (j = 0; j < AP_MAX_LINKS; j++)
skb_queue_head_init(&wl->links[j].tx_queue[i]);
skb_queue_head_init(&wl->deferred_rx_queue);
skb_queue_head_init(&wl->deferred_tx_queue);
INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
INIT_WORK(&wl->irq_work, wl1271_irq_work);
INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
INIT_WORK(&wl->tx_work, wl1271_tx_work);
INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
@ -3404,6 +3457,7 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
wl->last_tx_hlid = 0;
wl->ap_ps_map = 0;
wl->ap_fw_ps_map = 0;
wl->quirks = 0;
memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
for (i = 0; i < ACX_TX_DESCRIPTORS; i++)

View File

@ -69,7 +69,7 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
}
}
int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
int wl1271_ps_elp_wakeup(struct wl1271 *wl)
{
DECLARE_COMPLETION_ONSTACK(compl);
unsigned long flags;
@ -87,7 +87,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
* the completion variable in one entity.
*/
spin_lock_irqsave(&wl->wl_lock, flags);
if (work_pending(&wl->irq_work) || chip_awake)
if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
pending = true;
else
wl->elp_compl = &compl;
@ -149,7 +149,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
case STATION_ACTIVE_MODE:
default:
wl1271_debug(DEBUG_PSM, "leaving psm");
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
return ret;

View File

@ -30,7 +30,7 @@
int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
u32 rates, bool send);
void wl1271_ps_elp_sleep(struct wl1271 *wl);
int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake);
int wl1271_ps_elp_wakeup(struct wl1271 *wl);
void wl1271_elp_work(struct work_struct *work);
void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues);
void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid);

View File

@ -129,7 +129,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
skb_trim(skb, skb->len - desc->pad_len);
ieee80211_rx_ni(wl->hw, skb);
skb_queue_tail(&wl->deferred_rx_queue, skb);
ieee80211_queue_work(wl->hw, &wl->netstack_work);
return 0;
}
@ -198,7 +199,13 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
pkt_offset += pkt_length;
}
}
wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
/*
* Write the driver's packet counter to the FW. This is only required
* for older hardware revisions
*/
if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
}
void wl1271_set_default_filters(struct wl1271 *wl)

View File

@ -27,6 +27,7 @@
#include "cmd.h"
#include "scan.h"
#include "acx.h"
#include "ps.h"
void wl1271_scan_complete_work(struct work_struct *work)
{
@ -40,10 +41,11 @@ void wl1271_scan_complete_work(struct work_struct *work)
mutex_lock(&wl->mutex);
if (wl->scan.state == WL1271_SCAN_STATE_IDLE) {
mutex_unlock(&wl->mutex);
return;
}
if (wl->state == WL1271_STATE_OFF)
goto out;
if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
goto out;
wl->scan.state = WL1271_SCAN_STATE_IDLE;
kfree(wl->scan.scanned_ch);
@ -52,13 +54,19 @@ void wl1271_scan_complete_work(struct work_struct *work)
ieee80211_scan_completed(wl->hw, false);
/* restore hardware connection monitoring template */
if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
if (wl1271_ps_elp_wakeup(wl) == 0) {
wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
wl1271_ps_elp_sleep(wl);
}
}
if (wl->scan.failed) {
wl1271_info("Scan completed due to error.");
ieee80211_queue_work(wl->hw, &wl->recovery_work);
}
out:
mutex_unlock(&wl->mutex);
}

View File

@ -28,6 +28,7 @@
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/gpio.h>
#include <linux/wl12xx.h>
#include <linux/pm_runtime.h>
@ -60,7 +61,7 @@ static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
return &(wl_to_func(wl)->dev);
}
static irqreturn_t wl1271_irq(int irq, void *cookie)
static irqreturn_t wl1271_hardirq(int irq, void *cookie)
{
struct wl1271 *wl = cookie;
unsigned long flags;
@ -69,17 +70,14 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
/* complete the ELP completion */
spin_lock_irqsave(&wl->wl_lock, flags);
set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
if (wl->elp_compl) {
complete(wl->elp_compl);
wl->elp_compl = NULL;
}
if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
ieee80211_queue_work(wl->hw, &wl->irq_work);
set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
spin_unlock_irqrestore(&wl->wl_lock, flags);
return IRQ_HANDLED;
return IRQ_WAKE_THREAD;
}
static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
@ -106,8 +104,6 @@ static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
int ret;
struct sdio_func *func = wl_to_func(wl);
sdio_claim_host(func);
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
@ -123,8 +119,6 @@ static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
}
sdio_release_host(func);
if (ret)
wl1271_error("sdio read failed (%d)", ret);
}
@ -135,8 +129,6 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
int ret;
struct sdio_func *func = wl_to_func(wl);
sdio_claim_host(func);
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
@ -152,8 +144,6 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
ret = sdio_memcpy_toio(func, addr, buf, len);
}
sdio_release_host(func);
if (ret)
wl1271_error("sdio write failed (%d)", ret);
}
@ -163,14 +153,18 @@ static int wl1271_sdio_power_on(struct wl1271 *wl)
struct sdio_func *func = wl_to_func(wl);
int ret;
/* Power up the card */
/* Make sure the card will not be powered off by runtime PM */
ret = pm_runtime_get_sync(&func->dev);
if (ret < 0)
goto out;
/* Runtime PM might be disabled, so power up the card manually */
ret = mmc_power_restore_host(func->card->host);
if (ret < 0)
goto out;
sdio_claim_host(func);
sdio_enable_func(func);
sdio_release_host(func);
out:
return ret;
@ -179,12 +173,17 @@ out:
static int wl1271_sdio_power_off(struct wl1271 *wl)
{
struct sdio_func *func = wl_to_func(wl);
int ret;
sdio_claim_host(func);
sdio_disable_func(func);
sdio_release_host(func);
/* Power down the card */
/* Runtime PM might be disabled, so power off the card manually */
ret = mmc_power_save_host(func->card->host);
if (ret < 0)
return ret;
/* Let runtime PM know the card is powered off */
return pm_runtime_put_sync(&func->dev);
}
@ -241,14 +240,14 @@ static int __devinit wl1271_probe(struct sdio_func *func,
wl->irq = wlan_data->irq;
wl->ref_clock = wlan_data->board_ref_clock;
ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
DRIVER_NAME, wl);
if (ret < 0) {
wl1271_error("request_irq() failed: %d", ret);
goto out_free;
}
set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
disable_irq(wl->irq);
ret = wl1271_init_ieee80211(wl);
@ -271,7 +270,6 @@ static int __devinit wl1271_probe(struct sdio_func *func,
out_irq:
free_irq(wl->irq, wl);
out_free:
wl1271_free_hw(wl);

View File

@ -320,28 +320,23 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
spi_sync(wl_to_spi(wl), &m);
}
static irqreturn_t wl1271_irq(int irq, void *cookie)
static irqreturn_t wl1271_hardirq(int irq, void *cookie)
{
struct wl1271 *wl;
struct wl1271 *wl = cookie;
unsigned long flags;
wl1271_debug(DEBUG_IRQ, "IRQ");
wl = cookie;
/* complete the ELP completion */
spin_lock_irqsave(&wl->wl_lock, flags);
set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
if (wl->elp_compl) {
complete(wl->elp_compl);
wl->elp_compl = NULL;
}
if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
ieee80211_queue_work(wl->hw, &wl->irq_work);
set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
spin_unlock_irqrestore(&wl->wl_lock, flags);
return IRQ_HANDLED;
return IRQ_WAKE_THREAD;
}
static int wl1271_spi_set_power(struct wl1271 *wl, bool enable)
@ -413,14 +408,14 @@ static int __devinit wl1271_probe(struct spi_device *spi)
goto out_free;
}
ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
DRIVER_NAME, wl);
if (ret < 0) {
wl1271_error("request_irq() failed: %d", ret);
goto out_free;
}
set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
disable_irq(wl->irq);
ret = wl1271_init_ieee80211(wl);

View File

@ -464,7 +464,7 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
while ((skb = wl1271_skb_dequeue(wl))) {
if (!woken_up) {
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_ack;
woken_up = true;
@ -506,8 +506,14 @@ out_ack:
sent_packets = true;
}
if (sent_packets) {
/* interrupt the firmware with the new packets */
wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
/*
* Interrupt the firmware with the new packets. This is only
* required for older hardware revisions
*/
if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
wl1271_write32(wl, WL1271_HOST_WR_ACCESS,
wl->tx_packets_count);
wl1271_handle_tx_low_watermark(wl);
}
@ -583,7 +589,8 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
result->rate_class_index, result->status);
/* return the packet to the stack */
ieee80211_tx_status(wl->hw, skb);
skb_queue_tail(&wl->deferred_tx_queue, skb);
ieee80211_queue_work(wl->hw, &wl->netstack_work);
wl1271_free_tx_id(wl, result->id);
}
@ -687,16 +694,30 @@ void wl1271_tx_reset(struct wl1271 *wl)
*/
wl1271_handle_tx_low_watermark(wl);
for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
if (wl->tx_frames[i] != NULL) {
skb = wl->tx_frames[i];
wl1271_free_tx_id(wl, i);
wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
info = IEEE80211_SKB_CB(skb);
info->status.rates[0].idx = -1;
info->status.rates[0].count = 0;
ieee80211_tx_status(wl->hw, skb);
for (i = 0; i < ACX_TX_DESCRIPTORS; i++) {
if (wl->tx_frames[i] == NULL)
continue;
skb = wl->tx_frames[i];
wl1271_free_tx_id(wl, i);
wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
/* Remove private headers before passing the skb to mac80211 */
info = IEEE80211_SKB_CB(skb);
skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
if (info->control.hw_key &&
info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data,
hdrlen);
skb_pull(skb, WL1271_TKIP_IV_SPACE);
}
info->status.rates[0].idx = -1;
info->status.rates[0].count = 0;
ieee80211_tx_status(wl->hw, skb);
}
}
#define WL1271_TX_FLUSH_TIMEOUT 500000

View File

@ -130,10 +130,10 @@ extern u32 wl12xx_debug_level;
#define WL1271_FW_NAME "wl1271-fw-2.bin"
#define WL1271_AP_FW_NAME "wl1271-fw-ap.bin"
#define WL1271_FW_NAME "ti-connectivity/wl1271-fw-2.bin"
#define WL1271_AP_FW_NAME "ti-connectivity/wl1271-fw-ap.bin"
#define WL1271_NVS_NAME "wl1271-nvs.bin"
#define WL1271_NVS_NAME "ti-connectivity/wl1271-nvs.bin"
#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
@ -317,10 +317,10 @@ enum wl12xx_flags {
WL1271_FLAG_JOINED,
WL1271_FLAG_GPIO_POWER,
WL1271_FLAG_TX_QUEUE_STOPPED,
WL1271_FLAG_TX_PENDING,
WL1271_FLAG_IN_ELP,
WL1271_FLAG_PSM,
WL1271_FLAG_PSM_REQUESTED,
WL1271_FLAG_IRQ_PENDING,
WL1271_FLAG_IRQ_RUNNING,
WL1271_FLAG_IDLE,
WL1271_FLAG_IDLE_REQUESTED,
@ -404,6 +404,12 @@ struct wl1271 {
struct sk_buff_head tx_queue[NUM_TX_QUEUES];
int tx_queue_count;
/* Frames received, not handled yet by mac80211 */
struct sk_buff_head deferred_rx_queue;
/* Frames sent, not returned yet to mac80211 */
struct sk_buff_head deferred_tx_queue;
struct work_struct tx_work;
/* Pending TX frames */
@ -424,8 +430,8 @@ struct wl1271 {
/* Intermediate buffer, used for packet aggregation */
u8 *aggr_buf;
/* The target interrupt mask */
struct work_struct irq_work;
/* Network stack work */
struct work_struct netstack_work;
/* Hardware recovery work */
struct work_struct recovery_work;
@ -535,6 +541,9 @@ struct wl1271 {
/* AP-mode - a bitmap of links currently in PS mode in mac80211 */
unsigned long ap_ps_map;
/* Quirks of specific hardware revisions */
unsigned int quirks;
};
struct wl1271_station {
@ -553,6 +562,8 @@ int wl1271_plt_stop(struct wl1271 *wl);
#define WL1271_TX_QUEUE_LOW_WATERMARK 10
#define WL1271_TX_QUEUE_HIGH_WATERMARK 25
#define WL1271_DEFERRED_QUEUE_LIMIT 64
/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
on in case is has been shut down shortly before */
#define WL1271_PRE_POWER_ON_SLEEP 20 /* in milliseconds */
@ -562,4 +573,9 @@ int wl1271_plt_stop(struct wl1271 *wl);
#define HW_BG_RATES_MASK 0xffff
#define HW_HT_RATES_OFFSET 16
/* Quirks */
/* Each RX/TX transaction requires an end-of-transaction transfer */
#define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0)
#endif

View File

@ -55,7 +55,6 @@
/* This really should be 8, but not for our firmware */
#define MAX_SUPPORTED_RATES 32
#define COUNTRY_STRING_LEN 3
#define MAX_COUNTRY_TRIPLETS 32
/* Headers */
@ -99,7 +98,7 @@ struct country_triplet {
struct wl12xx_ie_country {
struct wl12xx_ie_header header;
u8 country_string[COUNTRY_STRING_LEN];
u8 country_string[IEEE80211_COUNTRY_STRING_LEN];
struct country_triplet triplets[MAX_COUNTRY_TRIPLETS];
} __packed;

View File

@ -1325,6 +1325,9 @@ enum {
/* Although the spec says 8 I'm seeing 6 in practice */
#define IEEE80211_COUNTRY_IE_MIN_LEN 6
/* The Country String field of the element shall be 3 octets in length */
#define IEEE80211_COUNTRY_STRING_LEN 3
/*
* For regulatory extension stuff see IEEE 802.11-2007
* Annex I (page 1141) and Annex J (page 1147). Also

View File

@ -415,6 +415,17 @@ struct hci_cp_io_capability_reply {
__u8 authentication;
} __packed;
#define HCI_OP_USER_CONFIRM_REPLY 0x042c
struct hci_cp_user_confirm_reply {
bdaddr_t bdaddr;
} __packed;
struct hci_rp_user_confirm_reply {
__u8 status;
bdaddr_t bdaddr;
} __packed;
#define HCI_OP_USER_CONFIRM_NEG_REPLY 0x042d
#define HCI_OP_IO_CAPABILITY_NEG_REPLY 0x0434
struct hci_cp_io_capability_neg_reply {
bdaddr_t bdaddr;
@ -936,6 +947,12 @@ struct hci_ev_io_capa_reply {
__u8 authentication;
} __packed;
#define HCI_EV_USER_CONFIRM_REQUEST 0x33
struct hci_ev_user_confirm_req {
bdaddr_t bdaddr;
__le32 passkey;
} __packed;
#define HCI_EV_SIMPLE_PAIR_COMPLETE 0x36
struct hci_ev_simple_pair_complete {
__u8 status;

View File

@ -248,6 +248,10 @@ struct hci_conn {
void *priv;
struct hci_conn *link;
void (*connect_cfm_cb) (struct hci_conn *conn, u8 status);
void (*security_cfm_cb) (struct hci_conn *conn, u8 status);
void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason);
};
extern struct hci_proto *hci_proto[];
@ -571,6 +575,9 @@ static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
hp = hci_proto[HCI_PROTO_SCO];
if (hp && hp->connect_cfm)
hp->connect_cfm(conn, status);
if (conn->connect_cfm_cb)
conn->connect_cfm_cb(conn, status);
}
static inline int hci_proto_disconn_ind(struct hci_conn *conn)
@ -600,6 +607,9 @@ static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
hp = hci_proto[HCI_PROTO_SCO];
if (hp && hp->disconn_cfm)
hp->disconn_cfm(conn, reason);
if (conn->disconn_cfm_cb)
conn->disconn_cfm_cb(conn, reason);
}
static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
@ -619,6 +629,9 @@ static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
hp = hci_proto[HCI_PROTO_SCO];
if (hp && hp->security_cfm)
hp->security_cfm(conn, status, encrypt);
if (conn->security_cfm_cb)
conn->security_cfm_cb(conn, status);
}
static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
@ -632,6 +645,9 @@ static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u
hp = hci_proto[HCI_PROTO_SCO];
if (hp && hp->security_cfm)
hp->security_cfm(conn, status, encrypt);
if (conn->security_cfm_cb)
conn->security_cfm_cb(conn, status);
}
int hci_register_proto(struct hci_proto *hproto);
@ -746,6 +762,11 @@ int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status);
int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr);
int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value);
int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status);
int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr,
u8 status);
int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status);
/* HCI info for socket */
#define hci_pi(sk) ((struct hci_pinfo *) sk)

View File

@ -21,11 +21,13 @@
SOFTWARE IS DISCLAIMED.
*/
#define MGMT_INDEX_NONE 0xFFFF
struct mgmt_hdr {
__le16 opcode;
__le16 index;
__le16 len;
} __packed;
#define MGMT_HDR_SIZE 4
#define MGMT_OP_READ_VERSION 0x0001
struct mgmt_rp_read_version {
@ -40,11 +42,7 @@ struct mgmt_rp_read_index_list {
} __packed;
#define MGMT_OP_READ_INFO 0x0004
struct mgmt_cp_read_info {
__le16 index;
} __packed;
struct mgmt_rp_read_info {
__le16 index;
__u8 type;
__u8 powered;
__u8 connectable;
@ -60,7 +58,6 @@ struct mgmt_rp_read_info {
} __packed;
struct mgmt_mode {
__le16 index;
__u8 val;
} __packed;
@ -74,27 +71,23 @@ struct mgmt_mode {
#define MGMT_OP_ADD_UUID 0x0009
struct mgmt_cp_add_uuid {
__le16 index;
__u8 uuid[16];
__u8 svc_hint;
} __packed;
#define MGMT_OP_REMOVE_UUID 0x000A
struct mgmt_cp_remove_uuid {
__le16 index;
__u8 uuid[16];
} __packed;
#define MGMT_OP_SET_DEV_CLASS 0x000B
struct mgmt_cp_set_dev_class {
__le16 index;
__u8 major;
__u8 minor;
} __packed;
#define MGMT_OP_SET_SERVICE_CACHE 0x000C
struct mgmt_cp_set_service_cache {
__le16 index;
__u8 enable;
} __packed;
@ -107,7 +100,6 @@ struct mgmt_key_info {
#define MGMT_OP_LOAD_KEYS 0x000D
struct mgmt_cp_load_keys {
__le16 index;
__u8 debug_keys;
__le16 key_count;
struct mgmt_key_info keys[0];
@ -115,51 +107,66 @@ struct mgmt_cp_load_keys {
#define MGMT_OP_REMOVE_KEY 0x000E
struct mgmt_cp_remove_key {
__le16 index;
bdaddr_t bdaddr;
__u8 disconnect;
} __packed;
#define MGMT_OP_DISCONNECT 0x000F
struct mgmt_cp_disconnect {
__le16 index;
bdaddr_t bdaddr;
} __packed;
struct mgmt_rp_disconnect {
__le16 index;
bdaddr_t bdaddr;
} __packed;
#define MGMT_OP_GET_CONNECTIONS 0x0010
struct mgmt_cp_get_connections {
__le16 index;
} __packed;
struct mgmt_rp_get_connections {
__le16 index;
__le16 conn_count;
bdaddr_t conn[0];
} __packed;
#define MGMT_OP_PIN_CODE_REPLY 0x0011
struct mgmt_cp_pin_code_reply {
__le16 index;
bdaddr_t bdaddr;
__u8 pin_len;
__u8 pin_code[16];
} __packed;
struct mgmt_rp_pin_code_reply {
bdaddr_t bdaddr;
uint8_t status;
} __packed;
#define MGMT_OP_PIN_CODE_NEG_REPLY 0x0012
struct mgmt_cp_pin_code_neg_reply {
__le16 index;
bdaddr_t bdaddr;
} __packed;
#define MGMT_OP_SET_IO_CAPABILITY 0x0013
struct mgmt_cp_set_io_capability {
__le16 index;
__u8 io_capability;
} __packed;
#define MGMT_OP_PAIR_DEVICE 0x0014
struct mgmt_cp_pair_device {
bdaddr_t bdaddr;
__u8 io_cap;
} __packed;
struct mgmt_rp_pair_device {
bdaddr_t bdaddr;
__u8 status;
} __packed;
#define MGMT_OP_USER_CONFIRM_REPLY 0x0015
struct mgmt_cp_user_confirm_reply {
bdaddr_t bdaddr;
} __packed;
struct mgmt_rp_user_confirm_reply {
bdaddr_t bdaddr;
__u8 status;
} __packed;
#define MGMT_OP_USER_CONFIRM_NEG_REPLY 0x0016
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
@ -174,19 +181,12 @@ struct mgmt_ev_cmd_status {
#define MGMT_EV_CONTROLLER_ERROR 0x0003
struct mgmt_ev_controller_error {
__le16 index;
__u8 error_code;
} __packed;
#define MGMT_EV_INDEX_ADDED 0x0004
struct mgmt_ev_index_added {
__le16 index;
} __packed;
#define MGMT_EV_INDEX_REMOVED 0x0005
struct mgmt_ev_index_removed {
__le16 index;
} __packed;
#define MGMT_EV_POWERED 0x0006
@ -198,32 +198,39 @@ struct mgmt_ev_index_removed {
#define MGMT_EV_NEW_KEY 0x000A
struct mgmt_ev_new_key {
__le16 index;
struct mgmt_key_info key;
__u8 old_key_type;
} __packed;
#define MGMT_EV_CONNECTED 0x000B
struct mgmt_ev_connected {
__le16 index;
bdaddr_t bdaddr;
} __packed;
#define MGMT_EV_DISCONNECTED 0x000C
struct mgmt_ev_disconnected {
__le16 index;
bdaddr_t bdaddr;
} __packed;
#define MGMT_EV_CONNECT_FAILED 0x000D
struct mgmt_ev_connect_failed {
__le16 index;
bdaddr_t bdaddr;
__u8 status;
} __packed;
#define MGMT_EV_PIN_CODE_REQUEST 0x000E
struct mgmt_ev_pin_code_request {
__le16 index;
bdaddr_t bdaddr;
} __packed;
#define MGMT_EV_USER_CONFIRM_REQUEST 0x000F
struct mgmt_ev_user_confirm_request {
bdaddr_t bdaddr;
__le32 value;
} __packed;
#define MGMT_EV_AUTH_FAILED 0x0010
struct mgmt_ev_auth_failed {
bdaddr_t bdaddr;
__u8 status;
} __packed;

View File

@ -221,6 +221,13 @@ config LRU_CACHE
tristate
config AVERAGE
bool
bool "Averaging functions"
help
This option is provided for the case where no in-kernel-tree
modules require averaging functions, but a module built outside
the kernel tree does. Such modules that use library averaging
functions require Y here.
If unsure, say N.
endmenu

View File

@ -550,10 +550,8 @@ static int __init bt_init(void)
goto error;
err = l2cap_init();
if (err < 0) {
hci_sock_cleanup();
if (err < 0)
goto sock_err;
}
err = sco_init();
if (err < 0) {

View File

@ -286,6 +286,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
conn->state = BT_OPEN;
conn->auth_type = HCI_AT_GENERAL_BONDING;
conn->io_capability = hdev->io_capability;
conn->remote_auth = 0xff;
conn->power_save = 1;
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
@ -429,10 +430,11 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
if (type == LE_LINK) {
le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
if (le)
return ERR_PTR(-EBUSY);
le = hci_conn_add(hdev, LE_LINK, dst);
if (!le)
le = hci_conn_add(hdev, LE_LINK, dst);
if (!le)
return NULL;
return ERR_PTR(-ENOMEM);
if (le->state == BT_OPEN)
hci_le_connect(le);

View File

@ -796,6 +796,29 @@ static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
}
static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (test_bit(HCI_MGMT, &hdev->flags))
mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr,
rp->status);
}
static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
BT_DBG("%s status 0x%x", hdev->name, rp->status);
if (test_bit(HCI_MGMT, &hdev->flags))
mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr,
rp->status);
}
static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
{
BT_DBG("%s status 0x%x", hdev->name, status);
@ -1401,8 +1424,10 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
if (!ev->status) {
conn->link_mode |= HCI_LM_AUTH;
conn->sec_level = conn->pending_sec_level;
} else
} else {
mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
conn->sec_level = BT_SECURITY_LOW;
}
clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
@ -1728,6 +1753,14 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
hci_cc_le_read_buffer_size(hdev, skb);
break;
case HCI_OP_USER_CONFIRM_REPLY:
hci_cc_user_confirm_reply(hdev, skb);
break;
case HCI_OP_USER_CONFIRM_NEG_REPLY:
hci_cc_user_confirm_neg_reply(hdev, skb);
break;
default:
BT_DBG("%s opcode 0x%x", hdev->name, opcode);
break;
@ -2362,6 +2395,21 @@ unlock:
hci_dev_unlock(hdev);
}
static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
struct sk_buff *skb)
{
struct hci_ev_user_confirm_req *ev = (void *) skb->data;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
if (test_bit(HCI_MGMT, &hdev->flags))
mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey);
hci_dev_unlock(hdev);
}
static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
@ -2372,9 +2420,20 @@ static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (conn)
hci_conn_put(conn);
if (!conn)
goto unlock;
/* To avoid duplicate auth_failed events to user space we check
* the HCI_CONN_AUTH_PEND flag which will be set if we
* initiated the authentication. A traditional auth_complete
* event gets always produced as initiator and is also mapped to
* the mgmt_auth_failed event */
if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0)
mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
hci_conn_put(conn);
unlock:
hci_dev_unlock(hdev);
}
@ -2580,6 +2639,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
hci_io_capa_reply_evt(hdev, skb);
break;
case HCI_EV_USER_CONFIRM_REQUEST:
hci_user_confirm_request_evt(hdev, skb);
break;
case HCI_EV_SIMPLE_PAIR_COMPLETE:
hci_simple_pair_complete_evt(hdev, skb);
break;

View File

@ -861,7 +861,7 @@ error:
return err;
}
void __exit hci_sock_cleanup(void)
void hci_sock_cleanup(void)
{
if (bt_sock_unregister(BTPROTO_HCI) < 0)
BT_ERR("HCI socket unregistration failed");

View File

@ -852,8 +852,6 @@ int l2cap_do_connect(struct sock *sk)
hci_dev_lock_bh(hdev);
err = -ENOMEM;
auth_type = l2cap_get_auth_type(sk);
if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
@ -863,17 +861,18 @@ int l2cap_do_connect(struct sock *sk)
hcon = hci_connect(hdev, ACL_LINK, dst,
l2cap_pi(sk)->sec_level, auth_type);
if (!hcon)
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto done;
}
conn = l2cap_conn_add(hcon, 0);
if (!conn) {
hci_conn_put(hcon);
err = -ENOMEM;
goto done;
}
err = 0;
/* Update source addr of the socket */
bacpy(src, conn->src);
@ -892,6 +891,8 @@ int l2cap_do_connect(struct sock *sk)
l2cap_do_start(sk);
}
err = 0;
done:
hci_dev_unlock_bh(hdev);
hci_dev_put(hdev);
@ -4033,8 +4034,6 @@ int __init l2cap_init(void)
BT_ERR("Failed to create L2CAP debug file");
}
BT_INFO("L2CAP socket layer initialized");
return 0;
error:

File diff suppressed because it is too large Load Diff

View File

@ -190,20 +190,21 @@ static int sco_connect(struct sock *sk)
hci_dev_lock_bh(hdev);
err = -ENOMEM;
if (lmp_esco_capable(hdev) && !disable_esco)
type = ESCO_LINK;
else
type = SCO_LINK;
hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
if (!hcon)
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto done;
}
conn = sco_conn_add(hcon, 0);
if (!conn) {
hci_conn_put(hcon);
err = -ENOMEM;
goto done;
}

View File

@ -21,7 +21,6 @@
#define WEP_IV_LEN 4
#define WEP_ICV_LEN 4
#define ALG_TKIP_KEY_LEN 32
#define ALG_CCMP_KEY_LEN 16
#define CCMP_HDR_LEN 8
#define CCMP_MIC_LEN 8

View File

@ -380,6 +380,9 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw)
trace_api_restart_hw(local);
wiphy_info(hw->wiphy,
"Hardware restart was requested\n");
/* use this reason, ieee80211_reconfig will unblock it */
ieee80211_stop_queues_by_reason(hw,
IEEE80211_QUEUE_STOP_REASON_SUSPEND);

View File

@ -415,10 +415,8 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
mi->sample_count--;
}
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
mi->sample_packets += info->status.ampdu_len;
minstrel_next_sample_idx(mi);
}
for (i = 0; !last; i++) {
last = (i == IEEE80211_TX_MAX_RATES - 1) ||
@ -519,9 +517,7 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
rate->count = mr->retry_count;
rate->flags = IEEE80211_TX_RC_MCS | group->flags;
if (txrc->short_preamble)
rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
if (txrc->rts || rtscts)
if (rtscts)
rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
rate->idx = index % MCS_GROUP_RATES + (group->streams - 1) * MCS_GROUP_RATES;
}
@ -553,13 +549,14 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
sample_idx = sample_table[mg->column][mg->index];
mr = &mg->rates[sample_idx];
sample_idx += mi->sample_group * MCS_GROUP_RATES;
minstrel_next_sample_idx(mi);
/*
* When not using MRR, do not sample if the probability is already
* higher than 95% to avoid wasting airtime
*/
if (!mp->has_mrr && (mr->probability > MINSTREL_FRAC(95, 100)))
goto next;
return -1;
/*
* Make sure that lower rates get sampled only occasionally,
@ -568,17 +565,13 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
if (minstrel_get_duration(sample_idx) >
minstrel_get_duration(mi->max_tp_rate)) {
if (mr->sample_skipped < 20)
goto next;
return -1;
if (mi->sample_slow++ > 2)
goto next;
return -1;
}
return sample_idx;
next:
minstrel_next_sample_idx(mi);
return -1;
}
static void

View File

@ -24,9 +24,6 @@
/* Fixed point arithmetic shifting amount. */
#define RC_PID_ARITH_SHIFT 8
/* Fixed point arithmetic factor. */
#define RC_PID_ARITH_FACTOR (1 << RC_PID_ARITH_SHIFT)
/* Proportional PID component coefficient. */
#define RC_PID_COEFF_P 15
/* Integral PID component coefficient. */

View File

@ -258,10 +258,12 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
return true;
}
static bool __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
bool was_hw_scan)
{
struct ieee80211_local *local = hw_to_local(hw);
bool on_oper_chan;
bool enable_beacons = false;
lockdep_assert_held(&local->mtx);
@ -275,12 +277,12 @@ static bool __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
aborted = true;
if (WARN_ON(!local->scan_req))
return false;
return;
if (was_hw_scan && !aborted && ieee80211_prep_hw_scan(local)) {
int rc = drv_hw_scan(local, local->scan_sdata, local->hw_scan_req);
if (rc == 0)
return false;
return;
}
kfree(local->hw_scan_req);
@ -294,26 +296,11 @@ static bool __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
local->scanning = 0;
local->scan_channel = NULL;
return true;
}
static void __ieee80211_scan_completed_finish(struct ieee80211_hw *hw,
bool was_hw_scan)
{
struct ieee80211_local *local = hw_to_local(hw);
bool on_oper_chan;
bool enable_beacons = false;
mutex_lock(&local->mtx);
on_oper_chan = ieee80211_cfg_on_oper_channel(local);
WARN_ON(local->scanning & (SCAN_SW_SCANNING | SCAN_HW_SCANNING));
if (was_hw_scan || !on_oper_chan) {
if (WARN_ON(local->scan_channel))
local->scan_channel = NULL;
if (was_hw_scan || !on_oper_chan)
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
} else
else
/* Set power back to normal operating levels. */
ieee80211_hw_config(local, 0);
@ -331,7 +318,6 @@ static void __ieee80211_scan_completed_finish(struct ieee80211_hw *hw,
}
ieee80211_recalc_idle(local);
mutex_unlock(&local->mtx);
ieee80211_mlme_notify_scan_completed(local);
ieee80211_ibss_notify_scan_completed(local);
@ -686,12 +672,14 @@ void ieee80211_scan_work(struct work_struct *work)
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, scan_work.work);
struct ieee80211_sub_if_data *sdata = local->scan_sdata;
struct ieee80211_sub_if_data *sdata;
unsigned long next_delay = 0;
bool aborted, hw_scan, finish;
bool aborted, hw_scan;
mutex_lock(&local->mtx);
sdata = local->scan_sdata;
if (test_and_clear_bit(SCAN_COMPLETED, &local->scanning)) {
aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning);
goto out_complete;
@ -755,17 +743,11 @@ void ieee80211_scan_work(struct work_struct *work)
} while (next_delay == 0);
ieee80211_queue_delayed_work(&local->hw, &local->scan_work, next_delay);
mutex_unlock(&local->mtx);
return;
goto out;
out_complete:
hw_scan = test_bit(SCAN_HW_SCANNING, &local->scanning);
finish = __ieee80211_scan_completed(&local->hw, aborted, hw_scan);
mutex_unlock(&local->mtx);
if (finish)
__ieee80211_scan_completed_finish(&local->hw, hw_scan);
return;
__ieee80211_scan_completed(&local->hw, aborted, hw_scan);
out:
mutex_unlock(&local->mtx);
}
@ -835,7 +817,6 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
void ieee80211_scan_cancel(struct ieee80211_local *local)
{
bool abortscan;
bool finish = false;
/*
* We are only canceling software scan, or deferred scan that was not
@ -855,14 +836,17 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
mutex_lock(&local->mtx);
abortscan = local->scan_req && !test_bit(SCAN_HW_SCANNING, &local->scanning);
if (abortscan)
finish = __ieee80211_scan_completed(&local->hw, true, false);
mutex_unlock(&local->mtx);
if (abortscan) {
/* The scan is canceled, but stop work from being pending */
cancel_delayed_work_sync(&local->scan_work);
/*
* The scan is canceled, but stop work from being pending.
*
* If the work is currently running, it must be blocked on
* the mutex, but we'll set scan_sdata = NULL and it'll
* simply exit once it acquires the mutex.
*/
cancel_delayed_work(&local->scan_work);
/* and clean up */
__ieee80211_scan_completed(&local->hw, true, false);
}
if (finish)
__ieee80211_scan_completed_finish(&local->hw, false);
mutex_unlock(&local->mtx);
}

View File

@ -30,7 +30,6 @@
#define IEEE80211_AUTH_MAX_TRIES 3
#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
#define IEEE80211_ASSOC_MAX_TRIES 3
#define IEEE80211_MAX_PROBE_TRIES 5
enum work_action {
WORK_ACT_MISMATCH,

View File

@ -63,6 +63,10 @@ static struct regulatory_request *last_request;
/* To trigger userspace events */
static struct platform_device *reg_pdev;
static struct device_type reg_device_type = {
.uevent = reg_device_uevent,
};
/*
* Central wireless core regulatory domains, we only need two,
* the current one and a world regulatory domain in case we have no
@ -362,16 +366,11 @@ static inline void reg_regdb_query(const char *alpha2) {}
/*
* This lets us keep regulatory code which is updated on a regulatory
* basis in userspace.
* basis in userspace. Country information is filled in by
* reg_device_uevent
*/
static int call_crda(const char *alpha2)
{
char country_env[9 + 2] = "COUNTRY=";
char *envp[] = {
country_env,
NULL
};
if (!is_world_regdom((char *) alpha2))
pr_info("Calling CRDA for country: %c%c\n",
alpha2[0], alpha2[1]);
@ -381,10 +380,7 @@ static int call_crda(const char *alpha2)
/* query internal regulatory database (if it exists) */
reg_regdb_query(alpha2);
country_env[8] = alpha2[0];
country_env[9] = alpha2[1];
return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, envp);
return kobject_uevent(&reg_pdev->dev.kobj, KOBJ_CHANGE);
}
/* Used by nl80211 before kmalloc'ing our regulatory domain */
@ -2087,6 +2083,25 @@ int set_regdom(const struct ieee80211_regdomain *rd)
return r;
}
#ifdef CONFIG_HOTPLUG
int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
{
if (last_request && !last_request->processed) {
if (add_uevent_var(env, "COUNTRY=%c%c",
last_request->alpha2[0],
last_request->alpha2[1]))
return -ENOMEM;
}
return 0;
}
#else
int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
{
return -ENODEV;
}
#endif /* CONFIG_HOTPLUG */
/* Caller must hold cfg80211_mutex */
void reg_device_remove(struct wiphy *wiphy)
{
@ -2118,6 +2133,8 @@ int __init regulatory_init(void)
if (IS_ERR(reg_pdev))
return PTR_ERR(reg_pdev);
reg_pdev->dev.type = &reg_device_type;
spin_lock_init(&reg_requests_lock);
spin_lock_init(&reg_pending_beacons_lock);

View File

@ -8,6 +8,7 @@ bool reg_is_valid_request(const char *alpha2);
int regulatory_hint_user(const char *alpha2);
int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env);
void reg_device_remove(struct wiphy *wiphy);
int __init regulatory_init(void);