summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/kernel/pci_iommu.c2
-rw-r--r--arch/arm/common/dmabounce.c2
-rw-r--r--arch/blackfin/kernel/dma-mapping.c3
-rw-r--r--arch/ia64/hp/common/sba_iommu.c2
-rw-r--r--arch/ia64/hp/sim/simscsi.c4
-rw-r--r--arch/ia64/kernel/efi.c4
-rw-r--r--arch/ia64/kernel/setup.c14
-rw-r--r--arch/ia64/sn/pci/pci_dma.c2
-rw-r--r--arch/m68k/kernel/dma.c2
-rw-r--r--arch/mips/mm/dma-default.c16
-rw-r--r--arch/powerpc/kernel/dma_64.c3
-rw-r--r--arch/powerpc/kernel/ibmebus.c3
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c5
-rw-r--r--arch/sparc/kernel/ioport.c17
-rw-r--r--arch/sparc/mm/io-unit.c2
-rw-r--r--arch/sparc/mm/iommu.c8
-rw-r--r--arch/sparc/mm/sun4c.c2
-rw-r--r--arch/sparc64/kernel/iommu.c7
-rw-r--r--arch/sparc64/kernel/iommu_common.c13
-rw-r--r--arch/sparc64/kernel/ldc.c2
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c7
-rw-r--r--arch/um/drivers/ubd_kern.c2
-rw-r--r--arch/x86/boot/compressed/head_32.S15
-rw-r--r--arch/x86/boot/compressed/misc_32.c3
-rw-r--r--arch/x86/boot/header.S7
-rw-r--r--arch/x86/kernel/asm-offsets_32.c7
-rw-r--r--arch/x86/kernel/e820_32.c18
-rw-r--r--arch/x86/kernel/e820_64.c22
-rw-r--r--arch/x86/kernel/efi_32.c4
-rw-r--r--arch/x86/kernel/head_32.S44
-rw-r--r--arch/x86/kernel/io_apic_64.c59
-rw-r--r--arch/x86/kernel/pci-calgary_64.c10
-rw-r--r--arch/x86/kernel/pci-dma_64.c5
-rw-r--r--arch/x86/kernel/pci-gart_64.c4
-rw-r--r--arch/x86/kernel/pci-nommu_64.c4
-rw-r--r--arch/x86/kernel/setup_32.c4
-rw-r--r--arch/x86/kernel/setup_64.c9
-rw-r--r--arch/x86/mm/pageattr_64.c6
-rw-r--r--arch/x86_64/Kconfig32
40 files changed, 290 insertions, 87 deletions
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index e1c470752eb..ee07dceae1d 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -465,7 +465,7 @@ EXPORT_SYMBOL(pci_free_consistent);
Write dma_length of each leader with the combined lengths of
the mergable followers. */
-#define SG_ENT_VIRT_ADDRESS(SG) (page_address((SG)->page) + (SG)->offset)
+#define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
static void
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 44ab0dad403..9d371e47655 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -442,7 +442,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
BUG_ON(dir == DMA_NONE);
for (i = 0; i < nents; i++, sg++) {
- struct page *page = sg->page;
+ struct page *page = sg_page(sg);
unsigned int offset = sg->offset;
unsigned int length = sg->length;
void *ptr = page_address(page) + offset;
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c
index 94d7b119b71..a16cb03c529 100644
--- a/arch/blackfin/kernel/dma-mapping.c
+++ b/arch/blackfin/kernel/dma-mapping.c
@@ -160,8 +160,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
BUG_ON(direction == DMA_NONE);
for (i = 0; i < nents; i++, sg++) {
- sg->dma_address = (dma_addr_t)(page_address(sg->page) +
- sg->offset);
+ sg->dma_address = (dma_addr_t) sg_virt(sg);
invalidate_dcache_range(sg_dma_address(sg),
sg_dma_address(sg) +
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 3c95f4184b9..bc859a311ea 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -246,7 +246,7 @@ static int reserve_sba_gart = 1;
static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
-#define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset)
+#define sba_sg_address(sg) sg_virt((sg))
#ifdef FULL_VALID_PDIR
static u64 prefetch_spill_page;
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index a3a558a0675..6ef9b521993 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -131,7 +131,7 @@ simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset)
stat.fd = desc[sc->device->id];
scsi_for_each_sg(sc, sl, scsi_sg_count(sc), i) {
- req.addr = __pa(page_address(sl->page) + sl->offset);
+ req.addr = __pa(sg_virt(sl));
req.len = sl->length;
if (DBG)
printk("simscsi_sg_%s @ %lx (off %lx) use_sg=%d len=%d\n",
@@ -212,7 +212,7 @@ static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len)
if (!len)
break;
thislen = min(len, slp->length);
- memcpy(page_address(slp->page) + slp->offset, buf, thislen);
+ memcpy(sg_virt(slp), buf, thislen);
len -= thislen;
}
}
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 8e4894b205e..3f7ea13358e 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -1090,7 +1090,8 @@ efi_memmap_init(unsigned long *s, unsigned long *e)
void
efi_initialize_iomem_resources(struct resource *code_resource,
- struct resource *data_resource)
+ struct resource *data_resource,
+ struct resource *bss_resource)
{
struct resource *res;
void *efi_map_start, *efi_map_end, *p;
@@ -1171,6 +1172,7 @@ efi_initialize_iomem_resources(struct resource *code_resource,
*/
insert_resource(res, code_resource);
insert_resource(res, data_resource);
+ insert_resource(res, bss_resource);
#ifdef CONFIG_KEXEC
insert_resource(res, &efi_memmap_res);
insert_resource(res, &boot_param_res);
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index cbf67f1aa29..ae6c3c02e11 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -90,7 +90,12 @@ static struct resource code_resource = {
.name = "Kernel code",
.flags = IORESOURCE_BUSY | IORESOURCE_MEM
};
-extern char _text[], _end[], _etext[];
+
+static struct resource bss_resource = {
+ .name = "Kernel bss",
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+extern char _text[], _end[], _etext[], _edata[], _bss[];
unsigned long ia64_max_cacheline_size;
@@ -200,8 +205,11 @@ static int __init register_memory(void)
code_resource.start = ia64_tpa(_text);
code_resource.end = ia64_tpa(_etext) - 1;
data_resource.start = ia64_tpa(_etext);
- data_resource.end = ia64_tpa(_end) - 1;
- efi_initialize_iomem_resources(&code_resource, &data_resource);
+ data_resource.end = ia64_tpa(_edata) - 1;
+ bss_resource.start = ia64_tpa(_bss);
+ bss_resource.end = ia64_tpa(_end) - 1;
+ efi_initialize_iomem_resources(&code_resource, &data_resource,
+ &bss_resource);
return 0;
}
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index ecd8a52b9b9..511db2fd7bf 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -16,7 +16,7 @@
#include <asm/sn/pcidev.h>
#include <asm/sn/sn_sal.h>
-#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
+#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
/**
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index 9d4e4b5b6bd..ef490e1ce60 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -121,7 +121,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
int i;
for (i = 0; i < nents; sg++, i++) {
- sg->dma_address = page_to_phys(sg->page) + sg->offset;
+ sg->dma_address = sg_phys(sg);
dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
}
return nents;
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 98b5e5bac02..b0b034c4654 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -165,12 +165,11 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
for (i = 0; i < nents; i++, sg++) {
unsigned long addr;
- addr = (unsigned long) page_address(sg->page);
+ addr = (unsigned long) sg_virt(sg);
if (!plat_device_is_coherent(dev) && addr)
- __dma_sync(addr + sg->offset, sg->length, direction);
+ __dma_sync(addr, sg->length, direction);
sg->dma_address = plat_map_dma_mem(dev,
- (void *)(addr + sg->offset),
- sg->length);
+ (void *)addr, sg->length);
}
return nents;
@@ -223,10 +222,9 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
for (i = 0; i < nhwentries; i++, sg++) {
if (!plat_device_is_coherent(dev) &&
direction != DMA_TO_DEVICE) {
- addr = (unsigned long) page_address(sg->page);
+ addr = (unsigned long) sg_virt(sg);
if (addr)
- __dma_sync(addr + sg->offset, sg->length,
- direction);
+ __dma_sync(addr, sg->length, direction);
}
plat_unmap_dma_mem(sg->dma_address);
}
@@ -304,7 +302,7 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
/* Make sure that gcc doesn't leave the empty loop body. */
for (i = 0; i < nelems; i++, sg++) {
if (cpu_is_noncoherent_r10000(dev))
- __dma_sync((unsigned long)page_address(sg->page),
+ __dma_sync((unsigned long)page_address(sg_page(sg)),
sg->length, direction);
plat_unmap_dma_mem(sg->dma_address);
}
@@ -322,7 +320,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele
/* Make sure that gcc doesn't leave the empty loop body. */
for (i = 0; i < nelems; i++, sg++) {
if (!plat_device_is_coherent(dev))
- __dma_sync((unsigned long)page_address(sg->page),
+ __dma_sync((unsigned long)page_address(sg_page(sg)),
sg->length, direction);
plat_unmap_dma_mem(sg->dma_address);
}
diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c
index 9001104b56b..14206e3f081 100644
--- a/arch/powerpc/kernel/dma_64.c
+++ b/arch/powerpc/kernel/dma_64.c
@@ -161,8 +161,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
int i;
for_each_sg(sgl, sg, nents, i) {
- sg->dma_address = (page_to_phys(sg->page) + sg->offset) |
- dma_direct_offset;
+ sg->dma_address = sg_phys(sg) | dma_direct_offset;
sg->dma_length = sg->length;
}
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 289d7e93591..72fd87156b2 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -102,8 +102,7 @@ static int ibmebus_map_sg(struct device *dev,
int i;
for_each_sg(sgl, sg, nents, i) {
- sg->dma_address = (dma_addr_t)page_address(sg->page)
- + sg->offset;
+ sg->dma_address = (dma_addr_t) sg_virt(sg);
sg->dma_length = sg->length;
}
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 306a6f75b6c..2d0c9ef555e 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -307,7 +307,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
continue;
}
/* Allocate iommu entries for that segment */
- vaddr = (unsigned long)page_address(s->page) + s->offset;
+ vaddr = (unsigned long) sg_virt(s);
npages = iommu_num_pages(vaddr, slen);
entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0);
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 07e64b48e7f..6405f4a3676 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -628,9 +628,8 @@ static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl,
int i;
for_each_sg(sgl, sg, nents, i) {
- int result = ps3_dma_map(dev->d_region,
- page_to_phys(sg->page) + sg->offset, sg->length,
- &sg->dma_address, 0);
+ int result = ps3_dma_map(dev->d_region, sg_phys(sg),
+ sg->length, &sg->dma_address, 0);
if (result) {
pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 9c3ed88853f..97aa50d1e4a 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -727,9 +727,8 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
BUG_ON(direction == PCI_DMA_NONE);
/* IIep is write-through, not flushing. */
for_each_sg(sgl, sg, nents, n) {
- BUG_ON(page_address(sg->page) == NULL);
- sg->dvma_address =
- virt_to_phys(page_address(sg->page)) + sg->offset;
+ BUG_ON(page_address(sg_page(sg)) == NULL);
+ sg->dvma_address = virt_to_phys(sg_virt(sg));
sg->dvma_length = sg->length;
}
return nents;
@@ -748,9 +747,9 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
BUG_ON(direction == PCI_DMA_NONE);
if (direction != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
- BUG_ON(page_address(sg->page) == NULL);
+ BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
- (unsigned long) page_address(sg->page),
+ (unsigned long) page_address(sg_page(sg)),
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
}
}
@@ -798,9 +797,9 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int
BUG_ON(direction == PCI_DMA_NONE);
if (direction != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
- BUG_ON(page_address(sg->page) == NULL);
+ BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
- (unsigned long) page_address(sg->page),
+ (unsigned long) page_address(sg_page(sg)),
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
}
}
@@ -814,9 +813,9 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl,
BUG_ON(direction == PCI_DMA_NONE);
if (direction != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
- BUG_ON(page_address(sg->page) == NULL);
+ BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
- (unsigned long) page_address(sg->page),
+ (unsigned long) page_address(sg_page(sg)),
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
}
}
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
index 375b4db6370..1666087c5b8 100644
--- a/arch/sparc/mm/io-unit.c
+++ b/arch/sparc/mm/io-unit.c
@@ -144,7 +144,7 @@ static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus
spin_lock_irqsave(&iounit->lock, flags);
while (sz != 0) {
--sz;
- sg->dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg->page) + sg->offset, sg->length);
+ sg->dvma_address = iounit_get_area(iounit, sg_virt(sg), sg->length);
sg->dvma_length = sg->length;
sg = sg_next(sg);
}
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index 283656d9f6e..4b934270f05 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -238,7 +238,7 @@ static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sb
while (sz != 0) {
--sz;
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
- sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
+ sg->dvma_address = iommu_get_one(sg_page(sg), n, sbus) + sg->offset;
sg->dvma_length = (__u32) sg->length;
sg = sg_next(sg);
}
@@ -252,7 +252,7 @@ static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbu
while (sz != 0) {
--sz;
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
- sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
+ sg->dvma_address = iommu_get_one(sg_page(sg), n, sbus) + sg->offset;
sg->dvma_length = (__u32) sg->length;
sg = sg_next(sg);
}
@@ -273,7 +273,7 @@ static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbu
* XXX Is this a good assumption?
* XXX What if someone else unmaps it here and races us?
*/
- if ((page = (unsigned long) page_address(sg->page)) != 0) {
+ if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
for (i = 0; i < n; i++) {
if (page != oldpage) { /* Already flushed? */
flush_page_for_dma(page);
@@ -283,7 +283,7 @@ static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbu
}
}
- sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
+ sg->dvma_address = iommu_get_one(sg_page(sg), n, sbus) + sg->offset;
sg->dvma_length = (__u32) sg->length;
sg = sg_next(sg);
}
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index ee6708fc449..a2cc141291c 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -1228,7 +1228,7 @@ static void sun4c_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *
{
while (sz != 0) {
--sz;
- sg->dvma_address = (__u32)sun4c_lockarea(page_address(sg->page) + sg->offset, sg->length);
+ sg->dvma_address = (__u32)sun4c_lockarea(sg_virt(sg), sg->length);
sg->dvma_length = sg->length;
sg = sg_next(sg);
}
diff --git a/arch/sparc64/kernel/iommu.c b/arch/sparc64/kernel/iommu.c
index 29af777d7ac..070a4846c0c 100644
--- a/arch/sparc64/kernel/iommu.c
+++ b/arch/sparc64/kernel/iommu.c
@@ -472,8 +472,7 @@ static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
spin_unlock_irqrestore(&iommu->lock, flags);
}
-#define SG_ENT_PHYS_ADDRESS(SG) \
- (__pa(page_address((SG)->page)) + (SG)->offset)
+#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
static void fill_sg(iopte_t *iopte, struct scatterlist *sg,
int nused, int nelems,
@@ -565,9 +564,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
/* Fast path single entry scatterlists. */
if (nelems == 1) {
sglist->dma_address =
- dma_4u_map_single(dev,
- (page_address(sglist->page) +
- sglist->offset),
+ dma_4u_map_single(dev, sg_virt(sglist),
sglist->length, direction);
if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
return 0;
diff --git a/arch/sparc64/kernel/iommu_common.c b/arch/sparc64/kernel/iommu_common.c
index d7ca900ec51..78e8277df65 100644
--- a/arch/sparc64/kernel/iommu_common.c
+++ b/arch/sparc64/kernel/iommu_common.c
@@ -73,7 +73,7 @@ static int verify_one_map(struct scatterlist *dma_sg, struct scatterlist **__sg,
daddr = dma_sg->dma_address;
sglen = sg->length;
- sgaddr = (unsigned long) (page_address(sg->page) + sg->offset);
+ sgaddr = (unsigned long) sg_virt(sg);
while (dlen > 0) {
unsigned long paddr;
@@ -123,7 +123,7 @@ static int verify_one_map(struct scatterlist *dma_sg, struct scatterlist **__sg,
sg = sg_next(sg);
if (--nents <= 0)
break;
- sgaddr = (unsigned long) (page_address(sg->page) + sg->offset);
+ sgaddr = (unsigned long) sg_virt(sg);
sglen = sg->length;
}
if (dlen < 0) {
@@ -191,7 +191,7 @@ void verify_sglist(struct scatterlist *sglist, int nents, iopte_t *iopte, int np
printk("sg(%d): page_addr(%p) off(%x) length(%x) "
"dma_address[%016x] dma_length[%016x]\n",
i,
- page_address(sg->page), sg->offset,
+ page_address(sg_page(sg)), sg->offset,
sg->length,
sg->dma_address, sg->dma_length);
}
@@ -207,15 +207,14 @@ unsigned long prepare_sg(struct scatterlist *sg, int nents)
unsigned long prev;
u32 dent_addr, dent_len;
- prev = (unsigned long) (page_address(sg->page) + sg->offset);
+ prev = (unsigned long) sg_virt(sg);
prev += (unsigned long) (dent_len = sg->length);
- dent_addr = (u32) ((unsigned long)(page_address(sg->page) + sg->offset)
- & (IO_PAGE_SIZE - 1UL));
+ dent_addr = (u32) ((unsigned long)(sg_virt(sg)) & (IO_PAGE_SIZE - 1UL));
while (--nents) {
unsigned long addr;
sg = sg_next(sg);
- addr = (unsigned long) (page_address(sg->page) + sg->offset);
+ addr = (unsigned long) sg_virt(sg);
if (! VCONTIG(prev, addr)) {
dma_sg->dma_address = dent_addr;
dma_sg->dma_length = dent_len;
diff --git a/arch/sparc64/kernel/ldc.c b/arch/sparc64/kernel/ldc.c
index 85a2be0b096..c8313cb60f0 100644
--- a/arch/sparc64/kernel/ldc.c
+++ b/arch/sparc64/kernel/ldc.c
@@ -2057,7 +2057,7 @@ static void fill_cookies(struct cookie_state *sp, unsigned long pa,
static int sg_count_one(struct scatterlist *sg)
{
- unsigned long base = page_to_pfn(sg->page) << PAGE_SHIFT;
+ unsigned long base = page_to_pfn(sg_page(sg)) << PAGE_SHIFT;
long len = sg->length;
if ((sg->offset | len) & (8UL - 1))
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index fe46ace3e59..8c4875bdb4a 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -365,8 +365,7 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
spin_unlock_irqrestore(&iommu->lock, flags);
}
-#define SG_ENT_PHYS_ADDRESS(SG) \
- (__pa(page_address((SG)->page)) + (SG)->offset)
+#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
static long fill_sg(long entry, struct device *dev,
struct scatterlist *sg,
@@ -477,9 +476,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
/* Fast path single entry scatterlists. */
if (nelems == 1) {
sglist->dma_address =
- dma_4v_map_single(dev,
- (page_address(sglist->page) +
- sglist->offset),
+ dma_4v_map_single(dev, sg_virt(sglist),
sglist->length, direction);
if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
return 0;
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 25b248a0250..3a8cd3dfb51 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -1115,7 +1115,7 @@ static void do_ubd_request(struct request_queue *q)
}
prepare_request(req, io_req,
(unsigned long long) req->sector << 9,
- sg->offset, sg->length, sg->page);
+ sg->offset, sg->length, sg_page(sg));
last_sectors = sg->length >> 9;
n = os_write_file(thread_fd, &io_req,
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index f35ea223752..a0ae2e7f6ce 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -27,13 +27,22 @@
#include <asm/segment.h>
#include <asm/page.h>
#include <asm/boot.h>
+#include <asm/asm-offsets.h>
.section ".text.head","ax",@progbits
.globl startup_32
startup_32:
- cld
- cli
+ /* check to see if KEEP_SEGMENTS flag is meaningful */
+ cmpw $0x207, BP_version(%esi)
+ jb 1f
+
+ /* test KEEP_SEGMENTS flag to see if the bootloader is asking
+ * us to not reload segments */
+ testb $(1<<6), BP_loadflags(%esi)
+ jnz 2f
+
+1: cli
movl $(__BOOT_DS),%eax
movl %eax,%ds
movl %eax,%es
@@ -41,6 +50,8 @@ startup_32:
movl %eax,%gs
movl %eax,%ss
+2: cld
+
/* Calculate the delta between where we were compiled to run
* at and where we were actually loaded at. This can only be done
* with a short local call on x86. Nothing else will tell us what
diff --git a/arch/x86/boot/compressed/misc_32.c b/arch/x86/boot/compressed/misc_32.c
index 1dc1e19c0a9..b74d60d1b2f 100644
--- a/arch/x86/boot/compressed/misc_32.c
+++ b/arch/x86/boot/compressed/misc_32.c
@@ -247,6 +247,9 @@ static void putstr(const char *s)
int x,y,pos;
char c;
+ if (RM_SCREEN_INFO.orig_video_mode == 0 && lines == 0 && cols == 0)
+ return;
+
x = RM_SCREEN_INFO.orig_x;
y = RM_SCREEN_INFO.orig_y;
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index f3140e596d4..8353c81c41c 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -119,7 +119,7 @@ _start:
# Part 2 of the header, from the old setup.S
.ascii "HdrS" # header signature
- .word 0x0206 # header version number (>= 0x0105)
+ .word 0x0207 # header version number (>= 0x0105)
# or else old loadlin-1.5 will fail)
.globl realmode_swtch
realmode_swtch: .word 0, 0 # default_switch, SETUPSEG
@@ -214,6 +214,11 @@ cmdline_size: .long COMMAND_LINE_SIZE-1 #length of the command line,
#added with boot protocol
#version 2.06
+hardware_subarch: .long 0 # subarchitecture, added with 2.07
+ # default to 0 for normal x86 PC
+
+hardware_subarch_data: .quad 0
+
# End of setup header #####################################################
.section ".inittext", "ax"
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index f1b7cdda82b..f8764716b0c 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -15,6 +15,7 @@
#include <asm/fixmap.h>
#include <asm/processor.h>
#include <asm/thread_info.h>
+#include <asm/bootparam.h>
#include <asm/elf.h>
#include <xen/interface/xen.h>
@@ -146,4 +147,10 @@ void foo(void)
OFFSET(LGUEST_PAGES_regs_errcode, lguest_pages, regs.errcode);
OFFSET(LGUEST_PAGES_regs, lguest_pages, regs);
#endif
+
+ BLANK();
+ OFFSET(BP_scratch, boot_params, scratch);
+ OFFSET(BP_loadflags, boot_params, hdr.loadflags);
+ OFFSET(BP_hardware_subarch, boot_params, hdr.hardware_subarch);
+ OFFSET(BP_version, boot_params, hdr.version);
}
diff --git a/arch/x86/kernel/e820_32.c b/arch/x86/kernel/e820_32.c
index 58fd54eb557..18f500d185a 100644
--- a/arch/x86/kernel/e820_32.c
+++ b/arch/x86/kernel/e820_32.c
@@ -51,6 +51,13 @@ struct resource code_resource = {
.flags = IORESOURCE_BUSY | IORESOURCE_MEM
};
+struct resource bss_resource = {
+ .name = "Kernel bss",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
static struct resource system_rom_resource = {
.name = "System ROM",
.start = 0xf0000,
@@ -254,7 +261,9 @@ static void __init probe_roms(void)
* and also for regions reported as reserved by the e820.
*/
static void __init
-legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
+legacy_init_iomem_resources(struct resource *code_resource,
+ struct resource *data_resource,
+ struct resource *bss_resource)
{
int i;
@@ -287,6 +296,7 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat
*/
request_resource(res, code_resource);
request_resource(res, data_resource);
+ request_resource(res, bss_resource);
#ifdef CONFIG_KEXEC
if (crashk_res.start != crashk_res.end)
request_resource(res, &crashk_res);
@@ -307,9 +317,11 @@ static int __init request_standard_resources(void)
printk("Setting up standard PCI resources\n");
if (efi_enabled)
- efi_initialize_iomem_resources(&code_resource, &data_resource);
+ efi_initialize_iomem_resources(&code_resource,
+ &data_resource, &bss_resource);
else
- legacy_init_iomem_resources(&code_resource, &data_resource);
+ legacy_init_iomem_resources(&code_resource,
+ &data_resource, &bss_resource);
/* EFI systems may still have VGA */
request_resource(&iomem_resource, &video_ram_resource);
diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c
index 57616865d8a..04698e0b056 100644
--- a/arch/x86/kernel/e820_64.c
+++ b/arch/x86/kernel/e820_64.c
@@ -47,7 +47,7 @@ unsigned long end_pfn_map;
*/
static unsigned long __initdata end_user_pfn = MAXMEM>>PAGE_SHIFT;
-extern struct resource code_resource, data_resource;
+extern struct resource code_resource, data_resource, bss_resource;
/* Check for some hardcoded bad areas that early boot is not allowed to touch */
static inline int bad_addr(unsigned long *addrp, unsigned long size)
@@ -225,6 +225,7 @@ void __init e820_reserve_resources(void)
*/
request_resource(res, &code_resource);
request_resource(res, &data_resource);
+ request_resource(res, &bss_resource);
#ifdef CONFIG_KEXEC
if (crashk_res.start != crashk_res.end)
request_resource(res, &crashk_res);
@@ -729,3 +730,22 @@ __init void e820_setup_gap(void)
printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
pci_mem_start, gapstart, gapsize);
}
+
+int __init arch_get_ram_range(int slot, u64 *addr, u64 *size)
+{
+ int i;
+
+ if (slot < 0 || slot >= e820.nr_map)
+ return -1;
+ for (i = slot; i < e820.nr_map; i++) {
+ if (e820.map[i].type != E820_RAM)
+ continue;
+ break;
+ }
+ if (i == e820.nr_map || e820.map[i].addr > (max_pfn << PAGE_SHIFT))
+ return -1;
+ *addr = e820.map[i].addr;
+ *size = min_t(u64, e820.map[i].size + e820.map[i].addr,
+ max_pfn << PAGE_SHIFT) - *addr;
+ return i + 1;
+}
diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
index b42558c48e9..e2be78f4939 100644
--- a/arch/x86/kernel/efi_32.c
+++ b/arch/x86/kernel/efi_32.c
@@ -603,7 +603,8 @@ void __init efi_enter_virtual_mode(void)
void __init
efi_initialize_iomem_resources(struct resource *code_resource,
- struct resource *data_resource)
+ struct resource *data_resource,
+ struct resource *bss_resource)
{
struct resource *res;
efi_memory_desc_t *md;
@@ -675,6 +676,7 @@ efi_initialize_iomem_resources(struct resource *code_resource,
if (md->type == EFI_CONVENTIONAL_MEMORY) {
request_resource(res, code_resource);
request_resource(res, data_resource);
+ request_resource(res, bss_resource);
#ifdef CONFIG_KEXEC
request_resource(res, &crashk_res);
#endif
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 39677965e16..00b1c2c5645 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -79,22 +79,30 @@ INIT_MAP_BEYOND_END = BOOTBITMAP_SIZE + (PAGE_TABLE_SIZE + ALLOCATOR_SLOP)*PAGE_
*/
.section .text.head,"ax",@progbits
ENTRY(startup_32)
+ /* check to see if KEEP_SEGMENTS flag is meaningful */
+ cmpw $0x207, BP_version(%esi)
+ jb 1f
+
+ /* test KEEP_SEGMENTS flag to see if the bootloader is asking
+ us to not reload segments */
+ testb $(1<<6), BP_loadflags(%esi)
+ jnz 2f
/*
* Set segments to known values.
*/
- cld
- lgdt boot_gdt_descr - __PAGE_OFFSET
+1: lgdt boot_gdt_descr - __PAGE_OFFSET
movl $(__BOOT_DS),%eax
movl %eax,%ds
movl %eax,%es
movl %eax,%fs
movl %eax,%gs
+2:
/*
* Clear BSS first so that there are no surprises...
- * No need to cld as DF is already clear from cld above...
*/
+ cld
xorl %eax,%eax
movl $__bss_start - __PAGE_OFFSET,%edi
movl $__bss_stop - __PAGE_OFFSET,%ecx
@@ -128,6 +136,35 @@ ENTRY(startup_32)
movsl
1:
+#ifdef CONFIG_PARAVIRT
+ cmpw $0x207, (boot_params + BP_version - __PAGE_OFFSET)
+ jb default_entry
+
+ /* Paravirt-compatible boot parameters. Look to see what architecture
+ we're booting under. */
+ movl (boot_params + BP_hardware_subarch - __PAGE_OFFSET), %eax
+ cmpl $num_subarch_entries, %eax
+ jae bad_subarch
+
+ movl subarch_entries - __PAGE_OFFSET(,%eax,4), %eax
+ subl $__PAGE_OFFSET, %eax
+ jmp *%eax
+
+bad_subarch:
+WEAK(lguest_entry)
+WEAK(xen_entry)
+ /* Unknown implementation; there's really
+ nothing we can do at this point. */
+ ud2a
+.data
+subarch_entries:
+ .long default_entry /* normal x86/PC */
+ .long lguest_entry /* lguest hypervisor */
+ .long xen_entry /* Xen hypervisor */
+num_subarch_entries = (. - subarch_entries) / 4
+.previous
+#endif /* CONFIG_PARAVIRT */
+
/*
* Initialize page tables. This creates a PDE and a set of page
* tables, which are located immediately beyond _end. The variable
@@ -140,6 +177,7 @@ ENTRY(startup_32)
*/
page_pde_offset = (__PAGE_OFFSET >> 20);
+default_entry:
movl $(pg0 - __PAGE_OFFSET), %edi
movl $(swapper_pg_dir - __PAGE_OFFSET), %edx
movl $0x007, %eax /* 0x007 = PRESENT+RW+USER */
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index b3c2d268d70..953328b55a3 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -31,6 +31,7 @@
#include <linux/sysdev.h>
#include <linux/msi.h>
#include <linux/htirq.h>
+#include <linux/dmar.h>
#ifdef CONFIG_ACPI
#include <acpi/acpi_bus.h>
#endif
@@ -2031,8 +2032,64 @@ void arch_teardown_msi_irq(unsigned int irq)
destroy_irq(irq);
}
-#endif /* CONFIG_PCI_MSI */
+#ifdef CONFIG_DMAR
+#ifdef CONFIG_SMP
+static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
+{
+ struct irq_cfg *cfg = irq_cfg + irq;
+ struct msi_msg msg;
+ unsigned int dest;
+ cpumask_t tmp;
+
+ cpus_and(tmp, mask, cpu_online_map);
+ if (cpus_empty(tmp))
+ return;
+
+ if (assign_irq_vector(irq, mask))
+ return;
+
+ cpus_and(tmp, cfg->domain, mask);
+ dest = cpu_mask_to_apicid(tmp);
+
+ dmar_msi_read(irq, &msg);
+
+ msg.data &= ~MSI_DATA_VECTOR_MASK;
+ msg.data |= MSI_DATA_VECTOR(cfg->vector);
+ msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
+ msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+
+ dmar_msi_write(irq, &msg);
+ irq_desc[irq].affinity = mask;
+}
+#endif /* CONFIG_SMP */
+
+struct irq_chip dmar_msi_type = {
+ .name = "DMAR_MSI",
+ .unmask = dmar_msi_unmask,
+ .mask = dmar_msi_mask,
+ .ack = ack_apic_edge,
+#ifdef CONFIG_SMP
+ .set_affinity = dmar_msi_set_affinity,
+#endif
+ .retrigger = ioapic_retrigger_irq,
+};
+
+int arch_setup_dmar_msi(unsigned int irq)
+{
+ int ret;
+ struct msi_msg msg;
+
+ ret = msi_compose_msg(NULL, irq, &msg);
+ if (ret < 0)
+ return ret;
+ dmar_msi_write(irq, &msg);
+ set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
+ "edge");
+ return 0;
+}
+#endif
+#endif /* CONFIG_PCI_MSI */
/*
* Hypertransport interrupt support
*/
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 5098f58063a..1a20fe31338 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -411,8 +411,10 @@ static int calgary_nontranslate_map_sg(struct device* dev,
int i;
for_each_sg(sg, s, nelems, i) {
- BUG_ON(!s->page);
- s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
+ struct page *p = sg_page(s);
+
+ BUG_ON(!p);
+ s->dma_address = virt_to_bus(sg_virt(s));
s->dma_length = s->length;
}
return nelems;
@@ -432,9 +434,9 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
for_each_sg(sg, s, nelems, i) {
- BUG_ON(!s->page);
+ BUG_ON(!sg_page(s));
- vaddr = (unsigned long)page_address(s->page) + s->offset;
+ vaddr = (unsigned long) sg_virt(s);
npages = num_dma_pages(vaddr, s->length);
entry = iommu_range_alloc(tbl, npages);
diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
index afaf9f12c03..393e2725a6e 100644
--- a/arch/x86/kernel/pci-dma_64.c
+++ b/arch/x86/kernel/pci-dma_64.c
@@ -7,6 +7,7 @@
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/module.h>
+#include <linux/dmar.h>
#include <asm/io.h>
#include <asm/iommu.h>
#include <asm/calgary.h>
@@ -305,6 +306,8 @@ void __init pci_iommu_alloc(void)
detect_calgary();
#endif
+ detect_intel_iommu();
+
#ifdef CONFIG_SWIOTLB
pci_swiotlb_init();
#endif
@@ -316,6 +319,8 @@ static int __init pci_iommu_init(void)
calgary_iommu_init();
#endif
+ intel_iommu_init();
+
#ifdef CONFIG_IOMMU
gart_iommu_init();
#endif
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 5cdfab65e93..c56e9ee6496 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -302,7 +302,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
#endif
for_each_sg(sg, s, nents, i) {
- unsigned long addr = page_to_phys(s->page) + s->offset;
+ unsigned long addr = sg_phys(s);
if (nonforced_iommu(dev, addr, s->length)) {
addr = dma_map_area(dev, addr, s->length, dir);
if (addr == bad_dma_address) {
@@ -397,7 +397,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
start_sg = sgmap = sg;
ps = NULL; /* shut up gcc */
for_each_sg(sg, s, nents, i) {
- dma_addr_t addr = page_to_phys(s->page) + s->offset;
+ dma_addr_t addr = sg_phys(s);
s->dma_address = addr;
BUG_ON(s->length == 0);
diff --git a/arch/x86/kernel/pci-nommu_64.c b/arch/x86/kernel/pci-nommu_64.c
index e85d4360360..faf70bdca33 100644
--- a/arch/x86/kernel/pci-nommu_64.c
+++ b/arch/x86/kernel/pci-nommu_64.c
@@ -62,8 +62,8 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
int i;
for_each_sg(sg, s, nents, i) {
- BUG_ON(!s->page);
- s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
+ BUG_ON(!sg_page(s));
+ s->dma_address = virt_to_bus(sg_virt(s));
if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
return 0;
s->dma_length = s->length;
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index ba2e165a8a0..cc0e91447b7 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -60,6 +60,7 @@
#include <asm/vmi.h>
#include <setup_arch.h>
#include <bios_ebda.h>
+#include <asm/cacheflush.h>
/* This value is set up by the early boot code to point to the value
immediately after the boot time page tables. It contains a *physical*
@@ -73,6 +74,7 @@ int disable_pse __devinitdata = 0;
*/
extern struct resource code_resource;
extern struct resource data_resource;
+extern struct resource bss_resource;
/* cpu data as detected by the assembly code in head.S */
struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
@@ -600,6 +602,8 @@ void __init setup_arch(char **cmdline_p)
code_resource.end = virt_to_phys(_etext)-1;
data_resource.start = virt_to_phys(_etext);
data_resource.end = virt_to_phys(_edata)-1;
+ bss_resource.start = virt_to_phys(&__bss_start);
+ bss_resource.end = virt_to_phys(&__bss_stop)-1;
parse_early_param();
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index 31322d42eaa..e7a9e36bd52 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -58,6 +58,7 @@
#include <asm/numa.h>
#include <asm/sections.h>
#include <asm/dmi.h>
+#include <asm/cacheflush.h>
/*
* Machine setup..
@@ -133,6 +134,12 @@ struct resource code_resource = {
.end = 0,
.flags = IORESOURCE_RAM,
};
+struct resource bss_resource = {
+ .name = "Kernel bss",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_RAM,
+};
#ifdef CONFIG_PROC_VMCORE
/* elfcorehdr= specifies the location of elf core header
@@ -276,6 +283,8 @@ void __init setup_arch(char **cmdline_p)
code_resource.end = virt_to_phys(&_etext)-1;
data_resource.start = virt_to_phys(&_etext);
data_resource.end = virt_to_phys(&_edata)-1;
+ bss_resource.start = virt_to_phys(&__bss_start);
+ bss_resource.end = virt_to_phys(&__bss_stop)-1;
early_identify_cpu(&boot_cpu_data);
diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c
index c7b7dfe1d40..c40afbaaf93 100644
--- a/arch/x86/mm/pageattr_64.c
+++ b/arch/x86/mm/pageattr_64.c
@@ -61,10 +61,10 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
return base;
}
-static void cache_flush_page(void *adr)
+void clflush_cache_range(void *adr, int size)
{
int i;
- for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+ for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
clflush(adr+i);
}
@@ -80,7 +80,7 @@ static void flush_kernel_map(void *arg)
asm volatile("wbinvd" ::: "memory");
else list_for_each_entry(pg, l, lru) {
void *adr = page_address(pg);
- cache_flush_page(adr);
+ clflush_cache_range(adr, PAGE_SIZE);
}
__flush_tlb_all();
}
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index aab25f3ba3c..c2d24991bb2 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -750,6 +750,38 @@ config PCI_DOMAINS
depends on PCI
default y
+config DMAR
+ bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
+ depends on PCI_MSI && ACPI && EXPERIMENTAL
+ default y
+ help
+ DMA remapping (DMAR) devices support enables independent address
+ translations for Direct Memory Access (DMA) from devices.
+ These DMA remapping devices are reported via ACPI tables
+ and include PCI device scope covered by these DMA
+ remapping devices.
+
+config DMAR_GFX_WA
+ bool "Support for Graphics workaround"
+ depends on DMAR
+ default y
+ help
+ Current Graphics drivers tend to use physical address
+ for DMA and avoid using DMA APIs. Setting this config
+ option permits the IOMMU driver to set a unity map for
+ all the OS-visible memory. Hence the driver can continue
+ to use physical addresses for DMA.
+
+config DMAR_FLOPPY_WA
+ bool
+ depends on DMAR
+ default y
+ help
+ Floppy disk drivers are know to bypass DMA API calls
+ thereby failing to work when IOMMU is enabled. This
+ workaround will setup a 1:1 mapping for the first
+ 16M to make floppy (an ISA device) work.
+
source "drivers/pci/pcie/Kconfig"
source "drivers/pci/Kconfig"