diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-02 22:09:10 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-02 22:09:10 -0700 |
commit | 56d92aa5cf7c96c70f81d0350c94faf46a9fb76d (patch) | |
tree | 2fb5d5b891903cada4dff9c581c70d33340a3769 /lib | |
parent | 33c2a174120b2c1baec9d1dac513f9d4b761b26a (diff) | |
parent | c341ca45ce56143804ef5a8f4db753e554e640b4 (diff) |
Merge tag 'stable/for-linus-3.7-x86-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen
Pull Xen update from Konrad Rzeszutek Wilk:
"Features:
- When hotplugging PCI devices in a PV guest we can allocate
Xen-SWIOTLB later.
- Cleanup Xen SWIOTLB.
- Support pages out grants from HVM domains in the backends.
- Support wild cards in xen-pciback.hide=(BDF) arguments.
- Update grant status updates with upstream hypervisor.
- Boot PV guests with more than 128GB.
- Cleanup Xen MMU code/add comments.
- Obtain XENVERS using a preferred method.
- Lay out generic changes to support Xen ARM.
- Allow privcmd ioctl for HVM (used to do only PV).
- Do v2 of mmap_batch for privcmd ioctls.
- If hypervisor saves the LED keyboard light - we will now instruct
the kernel about its state.
Fixes:
- More fixes to Xen PCI backend for various calls/FLR/etc.
- With more than 4GB in a 64-bit PV guest disable native SWIOTLB.
- Fix up smatch warnings.
- Fix up various return values in privmcmd and mm."
* tag 'stable/for-linus-3.7-x86-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen: (48 commits)
xen/pciback: Restore the PCI config space after an FLR.
xen-pciback: properly clean up after calling pcistub_device_find()
xen/vga: add the xen EFI video mode support
xen/x86: retrieve keyboard shift status flags from hypervisor.
xen/gndev: Xen backend support for paged out grant targets V4.
xen-pciback: support wild cards in slot specifications
xen/swiotlb: Fix compile warnings when using plain integer instead of NULL pointer.
xen/swiotlb: Remove functions not needed anymore.
xen/pcifront: Use Xen-SWIOTLB when initting if required.
xen/swiotlb: For early initialization, return zero on success.
xen/swiotlb: Use the swiotlb_late_init_with_tbl to init Xen-SWIOTLB late when PV PCI is used.
xen/swiotlb: Move the error strings to its own function.
xen/swiotlb: Move the nr_tbl determination in its own function.
xen/arm: compile and run xenbus
xen: resynchronise grant table status codes with upstream
xen/privcmd: return -EFAULT on error
xen/privcmd: Fix mmap batch ioctl error status copy back.
xen/privcmd: add PRIVCMD_MMAPBATCH_V2 ioctl
xen/mm: return more precise error from xen_remap_domain_range()
xen/mmu: If the revector fails, don't attempt to revector anything else.
...
Diffstat (limited to 'lib')
-rw-r--r-- | lib/swiotlb.c | 33 |
1 files changed, 24 insertions, 9 deletions
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 45bc1f83a5ad..f114bf6a8e13 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -170,7 +170,7 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) * Statically reserve bounce buffer space and initialize bounce buffer data * structures for the software IO TLB used to implement the DMA API. */ -void __init +static void __init swiotlb_init_with_default_size(size_t default_size, int verbose) { unsigned long bytes; @@ -206,8 +206,9 @@ swiotlb_init(int verbose) int swiotlb_late_init_with_default_size(size_t default_size) { - unsigned long i, bytes, req_nslabs = io_tlb_nslabs; + unsigned long bytes, req_nslabs = io_tlb_nslabs; unsigned int order; + int rc = 0; if (!io_tlb_nslabs) { io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); @@ -229,16 +230,32 @@ swiotlb_late_init_with_default_size(size_t default_size) order--; } - if (!io_tlb_start) - goto cleanup1; - + if (!io_tlb_start) { + io_tlb_nslabs = req_nslabs; + return -ENOMEM; + } if (order != get_order(bytes)) { printk(KERN_WARNING "Warning: only able to allocate %ld MB " "for software IO TLB\n", (PAGE_SIZE << order) >> 20); io_tlb_nslabs = SLABS_PER_PAGE << order; - bytes = io_tlb_nslabs << IO_TLB_SHIFT; } + rc = swiotlb_late_init_with_tbl(io_tlb_start, io_tlb_nslabs); + if (rc) + free_pages((unsigned long)io_tlb_start, order); + return rc; +} + +int +swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) +{ + unsigned long i, bytes; + + bytes = nslabs << IO_TLB_SHIFT; + + io_tlb_nslabs = nslabs; + io_tlb_start = tlb; io_tlb_end = io_tlb_start + bytes; + memset(io_tlb_start, 0, bytes); /* @@ -288,10 +305,8 @@ cleanup3: io_tlb_list = NULL; cleanup2: io_tlb_end = NULL; - free_pages((unsigned long)io_tlb_start, order); io_tlb_start = NULL; -cleanup1: - io_tlb_nslabs = req_nslabs; + io_tlb_nslabs = 0; return -ENOMEM; } |