dect
/
linux-2.6
Archived
13
0
Fork 0

dma-mapping: replace all DMA_64BIT_MASK macro with DMA_BIT_MASK(64)

Replace all DMA_64BIT_MASK macro with DMA_BIT_MASK(64)

Signed-off-by: Yang Hongyang<yanghy@cn.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Yang Hongyang 2009-04-06 19:01:13 -07:00 committed by Linus Torvalds
parent 8a59f5d252
commit 6a35528a83
79 changed files with 140 additions and 143 deletions

View File

@ -307,7 +307,7 @@ static struct resource iop13xx_adma_2_resources[] = {
}
};
static u64 iop13xx_adma_dmamask = DMA_64BIT_MASK;
static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64);
static struct iop_adma_platform_data iop13xx_adma_0_data = {
.hw_id = 0,
.pool_size = PAGE_SIZE,
@ -331,7 +331,7 @@ static struct platform_device iop13xx_adma_0_channel = {
.resource = iop13xx_adma_0_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *) &iop13xx_adma_0_data,
},
};
@ -343,7 +343,7 @@ static struct platform_device iop13xx_adma_1_channel = {
.resource = iop13xx_adma_1_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *) &iop13xx_adma_1_data,
},
};
@ -355,7 +355,7 @@ static struct platform_device iop13xx_adma_2_channel = {
.resource = iop13xx_adma_2_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *) &iop13xx_adma_2_data,
},
};

View File

@ -151,7 +151,7 @@ static struct resource iop13xx_tpmi_3_resources[] = {
}
};
u64 iop13xx_tpmi_mask = DMA_64BIT_MASK;
u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64);
static struct platform_device iop13xx_tpmi_0_device = {
.name = "iop-tpmi",
.id = 0,
@ -159,7 +159,7 @@ static struct platform_device iop13xx_tpmi_0_device = {
.resource = iop13xx_tpmi_0_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
},
};
@ -170,7 +170,7 @@ static struct platform_device iop13xx_tpmi_1_device = {
.resource = iop13xx_tpmi_1_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
},
};
@ -181,7 +181,7 @@ static struct platform_device iop13xx_tpmi_2_device = {
.resource = iop13xx_tpmi_2_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
},
};
@ -192,7 +192,7 @@ static struct platform_device iop13xx_tpmi_3_device = {
.resource = iop13xx_tpmi_3_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
},
};

View File

@ -559,7 +559,7 @@ static struct platform_device kirkwood_xor00_channel = {
.resource = kirkwood_xor00_resources,
.dev = {
.dma_mask = &kirkwood_xor_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *)&kirkwood_xor00_data,
},
};
@ -585,7 +585,7 @@ static struct platform_device kirkwood_xor01_channel = {
.resource = kirkwood_xor01_resources,
.dev = {
.dma_mask = &kirkwood_xor_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *)&kirkwood_xor01_data,
},
};
@ -657,7 +657,7 @@ static struct platform_device kirkwood_xor10_channel = {
.resource = kirkwood_xor10_resources,
.dev = {
.dma_mask = &kirkwood_xor_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *)&kirkwood_xor10_data,
},
};
@ -683,7 +683,7 @@ static struct platform_device kirkwood_xor11_channel = {
.resource = kirkwood_xor11_resources,
.dev = {
.dma_mask = &kirkwood_xor_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *)&kirkwood_xor11_data,
},
};

View File

@ -486,7 +486,7 @@ static struct platform_device orion5x_xor0_channel = {
.resource = orion5x_xor0_resources,
.dev = {
.dma_mask = &orion5x_xor_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *)&orion5x_xor0_data,
},
};
@ -512,7 +512,7 @@ static struct platform_device orion5x_xor1_channel = {
.resource = orion5x_xor1_resources,
.dev = {
.dma_mask = &orion5x_xor_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *)&orion5x_xor1_data,
},
};

View File

@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = {
.resource = iop3xx_dma_0_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *) &iop3xx_dma_0_data,
},
};
@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = {
.resource = iop3xx_dma_1_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *) &iop3xx_dma_1_data,
},
};
@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = {
.resource = iop3xx_aau_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.platform_data = (void *) &iop3xx_aau_data,
},
};

View File

@ -349,7 +349,7 @@ static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
u64 sn_dma_get_required_mask(struct device *dev)
{
return DMA_64BIT_MASK;
return DMA_BIT_MASK(64);
}
EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);

View File

@ -644,7 +644,7 @@ static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg,
static int dma_fixed_dma_supported(struct device *dev, u64 mask)
{
return mask == DMA_64BIT_MASK;
return mask == DMA_BIT_MASK(64);
}
static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);

View File

@ -2405,8 +2405,8 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
int rc;
if (using_dac &&
!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
if (rc) {

View File

@ -3913,8 +3913,8 @@ static int pci_go_64(struct pci_dev *pdev)
{
int rc;
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
if (rc) {

View File

@ -584,8 +584,8 @@ static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT);
if (have_64bit_bus &&
!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
if (rc) {

View File

@ -1297,8 +1297,8 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
host->iomap = iomap;
/* configure and activate the device */
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
if (rc) {

View File

@ -1372,8 +1372,8 @@ static bool DAC960_V2_EnableMemoryMailboxInterface(DAC960_Controller_T
dma_addr_t CommandMailboxDMA;
DAC960_V2_CommandStatus_T CommandStatus;
if (!pci_set_dma_mask(Controller->PCIDevice, DMA_64BIT_MASK))
Controller->BounceBufferLimit = DMA_64BIT_MASK;
if (!pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(64)))
Controller->BounceBufferLimit = DMA_BIT_MASK(64);
else if (!pci_set_dma_mask(Controller->PCIDevice, DMA_32BIT_MASK))
Controller->BounceBufferLimit = DMA_32BIT_MASK;
else

View File

@ -3637,7 +3637,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
hba[i]->pdev = pdev;
/* configure PCI DMA stuff */
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
dac = 1;
else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
dac = 0;

View File

@ -1586,9 +1586,9 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out;
#ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
rc = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc) {
printk(KERN_ERR DRV_NAME "(%s): consistent DMA mask failure\n",
pci_name(pdev));

View File

@ -829,7 +829,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev,
dev_printk(KERN_INFO, &dev->dev,
"Micro Memory(tm) controller found (PCI Mem Module (Battery Backup))\n");
if (pci_set_dma_mask(dev, DMA_64BIT_MASK) &&
if (pci_set_dma_mask(dev, DMA_BIT_MASK(64)) &&
pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
dev_printk(KERN_WARNING, &dev->dev, "NO suitable DMA found\n");
return -ENOMEM;

View File

@ -98,13 +98,13 @@ static int __devinit ioat_probe(struct pci_dev *pdev,
if (err)
goto err_request_regions;
err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err)
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (err)
goto err_set_dma_mask;
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err)
err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
if (err)

View File

@ -178,7 +178,7 @@ static int __init i7300_idle_ioat_selftest(u8 *ctl,
static struct device dummy_dma_dev = {
.init_name = "fallback device",
.coherent_dma_mask = DMA_64BIT_MASK,
.coherent_dma_mask = DMA_BIT_MASK(64),
.dma_mask = &dummy_dma_dev.coherent_dma_mask,
};

View File

@ -989,7 +989,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
}
if ((sizeof(dma_addr_t) > 4)) {
ret = pci_set_dma_mask(pcidev, DMA_64BIT_MASK);
ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
if (ret < 0) {
printk(KERN_ERR PFX "64b DMA configuration failed\n");
goto bail2;

View File

@ -470,7 +470,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
goto bail_disable;
}
ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (ret) {
/*
* if the 64 bit setup fails, try 32 bit. Some systems
@ -496,7 +496,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
}
}
else {
ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (ret)
dev_info(&pdev->dev,
"Unable to set DMA consistent mask "

View File

@ -1016,7 +1016,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
@ -1025,7 +1025,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
goto err_free_res;
}
}
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
"consistent PCI DMA mask.\n");

View File

@ -478,12 +478,12 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
}
if ((sizeof(dma_addr_t) > 4)) {
ret = pci_set_dma_mask(pcidev, DMA_64BIT_MASK);
ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
if (ret < 0) {
printk(KERN_ERR PFX "64b DMA mask configuration failed\n");
goto bail2;
}
ret = pci_set_consistent_dma_mask(pcidev, DMA_64BIT_MASK);
ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
if (ret) {
printk(KERN_ERR PFX "64b DMA consistent mask configuration failed\n");
goto bail2;

View File

@ -1534,8 +1534,8 @@ mpt_mapresources(MPT_ADAPTER *ioc)
pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)
&& !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
&& !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
ioc->name));

View File

@ -185,7 +185,7 @@ int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len)
int dma_64 = 0;
mutex_lock(&mem_lock);
if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) {
if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_BIT_MASK(64))) {
dma_64 = 1;
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
mutex_unlock(&mem_lock);
@ -196,7 +196,7 @@ int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len)
addr->virt = dma_alloc_coherent(dev, len, &addr->phys, GFP_KERNEL);
if ((sizeof(dma_addr_t) > 4) && dma_64)
if (pci_set_dma_mask(pdev, DMA_64BIT_MASK))
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
printk(KERN_WARNING "i2o: unable to set 64-bit DMA");
mutex_unlock(&mem_lock);

View File

@ -397,7 +397,7 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
}
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if (sizeof(dma_addr_t) > 4) {
if (pci_set_dma_mask(pdev, DMA_64BIT_MASK))
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
printk(KERN_INFO "%s: 64-bit DMA unavailable\n",
c->name);
else {

View File

@ -1929,8 +1929,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
/* Configure DMA attributes. */
if ((sizeof(dma_addr_t) > 4) &&
!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) &&
!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
pci_using_dac = 0;

View File

@ -1161,7 +1161,7 @@ static int __devinit ace_init(struct net_device *dev)
/*
* Configure DMA attributes.
*/
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
ap->pci_using_dac = 1;
} else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
ap->pci_using_dac = 0;

View File

@ -7527,7 +7527,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
if (CHIP_NUM(bp) == CHIP_NUM_5708)
persist_dma_mask = dma_mask = DMA_40BIT_MASK;
else
persist_dma_mask = dma_mask = DMA_64BIT_MASK;
persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
/* Configure DMA attributes. */
if (pci_set_dma_mask(pdev, dma_mask) == 0) {

View File

@ -10979,9 +10979,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
goto err_out_release;
}
if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
bp->flags |= USING_DAC_FLAG;
if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
" failed, aborting\n");
rc = -EIO;

View File

@ -5074,10 +5074,10 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
/* Configure DMA attributes. */
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
err = pci_set_consistent_dma_mask(pdev,
DMA_64BIT_MASK);
DMA_BIT_MASK(64));
if (err < 0) {
dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
"for consistent allocations\n");

View File

@ -1056,10 +1056,10 @@ static int __devinit init_one(struct pci_dev *pdev,
goto out_disable_pdev;
}
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
CH_ERR("%s: unable to obtain 64-bit DMA for "
"consistent allocations\n", pci_name(pdev));
err = -ENODEV;

View File

@ -3038,9 +3038,9 @@ static int __devinit init_one(struct pci_dev *pdev,
goto out_release_regions;
}
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
"coherent allocations\n");

View File

@ -962,8 +962,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (err)
return err;
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);

View File

@ -4763,9 +4763,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
return err;
pci_using_dac = 0;
err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!err) {
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (!err)
pci_using_dac = 1;
} else {

View File

@ -1154,9 +1154,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
return err;
pci_using_dac = 0;
err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!err) {
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (!err)
pci_using_dac = 1;
} else {

View File

@ -1226,10 +1226,10 @@ static int __devinit ioc3_probe(struct pci_dev *pdev,
int err, pci_using_dac;
/* Configure DMA attributes. */
err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!err) {
pci_using_dac = 1;
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err < 0) {
printk(KERN_ERR "%s: Unable to obtain 64 bit DMA "
"for consistent allocations\n", pci_name(pdev));

View File

@ -365,8 +365,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
!(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
!(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
pci_using_dac = 1;
} else {
if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||

View File

@ -4509,8 +4509,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err)
return err;
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);

View File

@ -1076,7 +1076,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
@ -1085,7 +1085,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_bar2;
}
}
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
"consistent PCI DMA mask.\n");

View File

@ -3792,7 +3792,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
dac_enabled = 1;
status = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (status != 0) {
dac_enabled = 0;
dev_err(&pdev->dev,
@ -3804,7 +3804,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
goto abort_with_enabled;
}
(void)pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
(void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
&mgp->cmd_bus, GFP_KERNEL);
if (mgp->cmd == NULL)

View File

@ -1973,7 +1973,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
/* See if we can set the dma mask early on; failure is fatal. */
if (sizeof(dma_addr_t) == 8 &&
!pci_set_dma_mask(pci_dev, DMA_64BIT_MASK)) {
!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
using_dac = 1;
} else if (!pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) {
using_dac = 0;

View File

@ -3934,9 +3934,9 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
pci_set_master(pdev);
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
} else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
pci_using_dac = 0;
err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);

View File

@ -3726,9 +3726,9 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
}
pci_set_master(pdev);
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
set_bit(QL_DMA64, &qdev->flags);
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
} else {
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (!err)

View File

@ -2046,7 +2046,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->cp_cmd = PCIMulRW | RxChkSum;
if ((sizeof(dma_addr_t) > 4) &&
!pci_set_dma_mask(pdev, DMA_64BIT_MASK) && use_dac) {
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
tp->cp_cmd |= PCIDAC;
dev->features |= NETIF_F_HIGHDMA;
} else {

View File

@ -7775,11 +7775,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
return ret;
}
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
dma_flag = TRUE;
if (pci_set_consistent_dma_mask
(pdev, DMA_64BIT_MASK)) {
(pdev, DMA_BIT_MASK(64))) {
DBG_PRINT(ERR_DBG,
"Unable to obtain 64bit DMA for \
consistent allocations\n");

View File

@ -3912,9 +3912,9 @@ static int __devinit skge_probe(struct pci_dev *pdev,
pci_set_master(pdev);
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
using_dac = 1;
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
} else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
using_dac = 0;
err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);

View File

@ -4374,9 +4374,9 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
pci_set_master(pdev);
if (sizeof(dma_addr_t) > sizeof(u32) &&
!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))) {
using_dac = 1;
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err < 0) {
dev_err(&pdev->dev, "unable to obtain 64 bit DMA "
"for consistent allocations\n");

View File

@ -3042,7 +3042,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
*/
if (pdev->vendor == PCI_VENDOR_ID_SUN &&
pdev->device == PCI_DEVICE_ID_SUN_GEM &&
!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
} else {
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);

View File

@ -1941,8 +1941,8 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((err = pci_enable_device(pdev))) /* it trigers interrupt, dunno why. */
goto err_pci; /* it's not a problem though */
if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
!(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
!(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
pci_using_dac = 1;
} else {
if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||

View File

@ -31,6 +31,7 @@
#include <linux/vmalloc.h>
#include <linux/firmware.h>
#include <asm/byteorder.h>
#include <linux/dma-mapping.h>
/* Compile Time Switches */
/* start */
@ -98,10 +99,6 @@
#define READ_REG(pp, reg) readl(pp->pBdxRegs + reg)
#define WRITE_REG(pp, reg, val) writel(val, pp->pBdxRegs + reg)
#ifndef DMA_64BIT_MASK
# define DMA_64BIT_MASK 0xffffffffffffffffULL
#endif
#ifndef DMA_32BIT_MASK
# define DMA_32BIT_MASK 0x00000000ffffffffULL
#endif

View File

@ -13232,10 +13232,10 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
persist_dma_mask = dma_mask = DMA_40BIT_MASK;
#ifdef CONFIG_HIGHMEM
dma_mask = DMA_64BIT_MASK;
dma_mask = DMA_BIT_MASK(64);
#endif
} else
persist_dma_mask = dma_mask = DMA_64BIT_MASK;
persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
/* Configure DMA attributes. */
if (dma_mask > DMA_32BIT_MASK) {

View File

@ -1180,7 +1180,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
#if 0
// dma_supported() is deeply broken on almost all architectures
// possible with some EHCI controllers
if (dma_supported (&udev->dev, DMA_64BIT_MASK))
if (dma_supported (&udev->dev, DMA_BIT_MASK(64)))
net->features |= NETIF_F_HIGHDMA;
#endif

View File

@ -772,7 +772,7 @@ static u64 supported_dma_mask(struct b43_wldev *dev)
tmp = b43_read32(dev, SSB_TMSHIGH);
if (tmp & SSB_TMSHIGH_DMA64)
return DMA_64BIT_MASK;
return DMA_BIT_MASK(64);
mmio_base = b43_dmacontroller_base(0, 0);
b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
@ -788,7 +788,7 @@ static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
return B43_DMA_30BIT;
if (dmamask == DMA_32BIT_MASK)
return B43_DMA_32BIT;
if (dmamask == DMA_64BIT_MASK)
if (dmamask == DMA_BIT_MASK(64))
return B43_DMA_64BIT;
B43_WARN_ON(1);
return B43_DMA_30BIT;
@ -999,7 +999,7 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
err = ssb_dma_set_mask(dev->dev, mask);
if (!err)
break;
if (mask == DMA_64BIT_MASK) {
if (mask == DMA_BIT_MASK(64)) {
mask = DMA_32BIT_MASK;
fallback = 1;
continue;

View File

@ -846,7 +846,7 @@ static u64 supported_dma_mask(struct b43legacy_wldev *dev)
tmp = b43legacy_read32(dev, SSB_TMSHIGH);
if (tmp & SSB_TMSHIGH_DMA64)
return DMA_64BIT_MASK;
return DMA_BIT_MASK(64);
mmio_base = b43legacy_dmacontroller_base(0, 0);
b43legacy_write32(dev,
mmio_base + B43legacy_DMA32_TXCTL,
@ -865,7 +865,7 @@ static enum b43legacy_dmatype dma_mask_to_engine_type(u64 dmamask)
return B43legacy_DMA_30BIT;
if (dmamask == DMA_32BIT_MASK)
return B43legacy_DMA_32BIT;
if (dmamask == DMA_64BIT_MASK)
if (dmamask == DMA_BIT_MASK(64))
return B43legacy_DMA_64BIT;
B43legacy_WARN_ON(1);
return B43legacy_DMA_30BIT;
@ -1042,7 +1042,7 @@ static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
err = ssb_dma_set_mask(dev->dev, mask);
if (!err)
break;
if (mask == DMA_64BIT_MASK) {
if (mask == DMA_BIT_MASK(64)) {
mask = DMA_32BIT_MASK;
fallback = 1;
continue;

View File

@ -57,7 +57,7 @@
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
/* global iommu list, set NULL for ignored DMAR units */
static struct intel_iommu **g_iommus;

View File

@ -2016,8 +2016,8 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
pci_set_master(pdev);
pci_try_set_mwi(pdev);
if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)
|| pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
|| pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)
|| pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");

View File

@ -1402,8 +1402,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
}
if(dev->dac_support != 0) {
if (!pci_set_dma_mask(dev->pdev, DMA_64BIT_MASK) &&
!pci_set_consistent_dma_mask(dev->pdev, DMA_64BIT_MASK)) {
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64)) &&
!pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(64))) {
if (!dev->in_reset)
printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n",
dev->name, dev->id);

View File

@ -195,7 +195,7 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
const u64 required_mask = dma_get_required_mask(dev);
if (required_mask > DMA_39BIT_MASK &&
dma_set_mask(dev, DMA_64BIT_MASK) == 0)
dma_set_mask(dev, DMA_BIT_MASK(64)) == 0)
ahd->flags |= AHD_64BIT_ADDRESSING;
else if (required_mask > DMA_32BIT_MASK &&
dma_set_mask(dev, DMA_39BIT_MASK) == 0)

View File

@ -790,8 +790,8 @@ static int __devinit asd_pci_probe(struct pci_dev *dev,
goto Err_remove;
err = -ENODEV;
if (!pci_set_dma_mask(dev, DMA_64BIT_MASK)
&& !pci_set_consistent_dma_mask(dev, DMA_64BIT_MASK))
if (!pci_set_dma_mask(dev, DMA_BIT_MASK(64))
&& !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64)))
;
else if (!pci_set_dma_mask(dev, DMA_32BIT_MASK)
&& !pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK))

View File

@ -393,7 +393,7 @@ static int arcmsr_probe(struct pci_dev *pdev,
acb = (struct AdapterControlBlock *)host->hostdata;
memset(acb, 0, sizeof (struct AdapterControlBlock));
error = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (error) {
error = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (error) {

View File

@ -1014,7 +1014,7 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
* See if we should enable dma64 mode.
*/
if (sizeof(dma_addr_t) > 4 &&
pci_set_dma_mask(pDev, DMA_64BIT_MASK) == 0) {
pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
if (dma_get_required_mask(&pDev->dev) > DMA_32BIT_MASK)
dma64 = 1;
}

View File

@ -5030,7 +5030,7 @@ static int __devinit gdth_pci_probe_one(gdth_pci_str *pcistr,
}
} else {
shp->max_cmd_len = 16;
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
printk("GDT-PCI %d: 64-bit DMA enabled\n", ha->hanum);
} else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
printk(KERN_WARNING "GDT-PCI %d: "

View File

@ -958,7 +958,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
pci_set_master(pcidev);
/* Enable 64bit DMA if possible */
if (pci_set_dma_mask(pcidev, DMA_64BIT_MASK)) {
if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
if (pci_set_dma_mask(pcidev, DMA_32BIT_MASK)) {
printk(KERN_ERR "hptiop: fail to set dma_mask\n");
goto disable_pci_device;

View File

@ -7048,7 +7048,7 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
* are guaranteed to be < 4G.
*/
if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) &&
!pci_set_dma_mask(ha->pcidev, DMA_64BIT_MASK)) {
!pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(64))) {
(ha)->flags |= IPS_HA_ENH_SG;
} else {
if (pci_set_dma_mask(ha->pcidev, DMA_32BIT_MASK) != 0) {

View File

@ -2660,7 +2660,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
pci_save_state(pdev);
pci_try_set_mwi(pdev);
if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0)
if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(64)) != 0)
if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0)
goto out_idr_remove;

View File

@ -4793,7 +4793,7 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set the Mode of addressing to 64 bit if we can */
if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) {
pci_set_dma_mask(pdev, DMA_64BIT_MASK);
pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
adapter->has_64bit_addr = 1;
} else {
pci_set_dma_mask(pdev, DMA_32BIT_MASK);

View File

@ -900,7 +900,7 @@ megaraid_init_mbox(adapter_t *adapter)
adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
(adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK)) {
if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(64))) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: DMA mask for 64-bit failed\n"));

View File

@ -2497,7 +2497,7 @@ megasas_set_dma_mask(struct pci_dev *pdev)
* All our contollers are capable of performing 64-bit DMA
*/
if (IS_DMA64) {
if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
goto fail_set_dma_mask;

View File

@ -875,8 +875,8 @@ static int pci_go_64(struct pci_dev *pdev)
{
int rc;
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
if (rc) {

View File

@ -4275,7 +4275,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->devnum = devnum; /* specifies microcode load address */
#ifdef QLA_64BIT_PTR
if (pci_set_dma_mask(ha->pdev, DMA_64BIT_MASK)) {
if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
if (pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK)) {
printk(KERN_WARNING "scsi(%li): Unable to set a "
"suitable DMA mask - aborting\n", ha->host_no);

View File

@ -1176,10 +1176,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
/* Assume a 32bit DMA mask. */
ha->flags.enable_64bit_addressing = 0;
if (!dma_set_mask(&ha->pdev->dev, DMA_64BIT_MASK)) {
if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
/* Any upper-dword bits set? */
if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
!pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) {
!pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
/* Ok, a 64bit DMA mask is applicable. */
ha->flags.enable_64bit_addressing = 1;
ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;

View File

@ -1369,8 +1369,8 @@ static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
int retval;
/* Update our PCI device dma_mask for full 64 bit mask */
if (pci_set_dma_mask(ha->pdev, DMA_64BIT_MASK) == 0) {
if (pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) {
if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
dev_dbg(&ha->pdev->dev,
"Failed to set 64 bit PCI consistent mask; "
"using 32 bit.\n");

View File

@ -1395,8 +1395,8 @@ static int stex_set_dma_mask(struct pci_dev * pdev)
{
int ret;
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)
&& !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
&& !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
return 0;
ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (!ret)

View File

@ -1094,7 +1094,7 @@ do { \
(data)->size = cpu_to_scr((((badd) >> 8) & 0xff000000) + len); \
} while (0)
#elif SYM_CONF_DMA_ADDRESSING_MODE == 2
#define DMA_DAC_MASK DMA_64BIT_MASK
#define DMA_DAC_MASK DMA_BIT_MASK(64)
int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s);
static inline void
sym_build_sge(struct sym_hcb *np, struct sym_tblmove *data, u64 badd, int len)

View File

@ -617,9 +617,9 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
pci_set_master(pdev);
#ifdef USE_64BIT_DMA
ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!ret) {
ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (ret < 0) {
printk(KERN_WARNING "%s: Unable to obtain 64 bit DMA "
"for consistent allocations\n",

View File

@ -849,8 +849,8 @@ static int __devinit probe(struct pci_dev *dev, const struct pci_device_id *id)
#if 1 /* @todo For now, disable 64-bit, because I do not understand the implications (DAC!) */
/* query for DMA transfer */
/* @see Documentation/PCI/PCI-DMA-mapping.txt */
if (!pci_set_dma_mask(dev, DMA_64BIT_MASK)) {
pci_set_consistent_dma_mask(dev, DMA_64BIT_MASK);
if (!pci_set_dma_mask(dev, DMA_BIT_MASK(64))) {
pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64));
/* use 64-bit DMA */
printk(KERN_DEBUG "Using a 64-bit DMA mask.\n");
} else

View File

@ -371,9 +371,9 @@ static int __devinit slic_entry_probe(struct pci_dev *pcidev,
printk(KERN_DEBUG "%s\n", slic_proc_version);
}
err = pci_set_dma_mask(pcidev, DMA_64BIT_MASK);
err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
if (err) {
err = pci_set_dma_mask(pcidev, DMA_32BIT_MASK);
err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
if (err)
goto err_out_disable_pci;
}

View File

@ -934,8 +934,8 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
pci_read_config_byte(pcidev, PCI_REVISION_ID, &revision_id);
if (!(err = pci_set_dma_mask(pcidev, DMA_64BIT_MASK))) {
DBG_ERROR("pci_set_dma_mask(DMA_64BIT_MASK) successful\n");
if (!(err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)))) {
DBG_ERROR("pci_set_dma_mask(DMA_BIT_MASK(64)) successful\n");
} else {
if ((err = pci_set_dma_mask(pcidev, DMA_32BIT_MASK))) {
DBG_ERROR

View File

@ -622,7 +622,7 @@ static int ehci_run (struct usb_hcd *hcd)
ehci_writel(ehci, 0, &ehci->regs->segment);
#if 0
// this is deeply broken on almost all architectures
if (!dma_set_mask(hcd->self.controller, DMA_64BIT_MASK))
if (!dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)))
ehci_info(ehci, "enabled 64bit DMA\n");
#endif
}

View File

@ -160,8 +160,8 @@ static int whci_probe(struct pci_dev *pci, const struct pci_device_id *id)
pci_enable_msi(pci);
pci_set_master(pci);
err = -ENXIO;
if (!pci_set_dma_mask(pci, DMA_64BIT_MASK))
pci_set_consistent_dma_mask(pci, DMA_64BIT_MASK);
if (!pci_set_dma_mask(pci, DMA_BIT_MASK(64)))
pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64));
else if (!pci_set_dma_mask(pci, DMA_32BIT_MASK))
pci_set_consistent_dma_mask(pci, DMA_32BIT_MASK);
else