summaryrefslogtreecommitdiff
path: root/lib/scatterlist.c
diff options
context:
space:
mode:
authorDavid Dillow <dillow@google.com>2017-02-08 10:25:40 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2017-02-08 10:25:40 +1100
commit616c977504a0a2ccbbaa702febce30a525a83f16 (patch)
tree1f41702b5758ff74a17ab1ccdb576eb98096356b /lib/scatterlist.c
parent9528c450608fcef69687a02091c1d8dfc6102ab6 (diff)
scatterlist: don't overflow length field
When called with a region of contiguous pages totaling > 4 GB of memory, sg_alloc_table_from_pages() will overflow the length field, leading to a corrupt scatter list. Fix this by tracking the number of pages we've merged and start a new chunk when we would overflow. Tested by building various page lists with contiguous 8GB regions and observing that they are correctly split without overflowing length. This isn't from normal read/write IO -- some applications want to access large amounts of userspace memory directly from hardware, and it is cleaner for them to manage one mapping than multiple 1GB or 2GB mappings -- assuming the hardware can even support multiple mappings. If they have room in their container to allocate and pin the memory, we'd like to allow it. There's definitely potential for problems downstream, even without going through the filesystems and block layers -- we noticed this potential issue while tracking down an bug in the IOMMU code when an entry in the list was over 1GB. We still see a benefit from building the large entries, though -- it allows superpages in the IOMMU mapping which helps the IOTLB cache. We currently use sg_alloc_table_from_pages() to build the scatterlist for dma_map_sg() but we could do it ourselves if you'd rather add a length limit to the more general code. Link: http://lkml.kernel.org/r/20170201212917.11278-1-dillow@google.com Signed-off-by: David Dillow <dillow@google.com> Cc: Jens Axboe <axboe@kernel.dk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'lib/scatterlist.c')
-rw-r--r--lib/scatterlist.c22
1 files changed, 18 insertions, 4 deletions
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 004fc70fc56a..539dd344f1c5 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -394,17 +394,26 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
unsigned long offset, unsigned long size,
gfp_t gfp_mask)
{
+ unsigned int chunk_pages;
unsigned int chunks;
unsigned int i;
unsigned int cur_page;
int ret;
struct scatterlist *s;
+ BUILD_BUG_ON(!typecheck(typeof(s->length), unsigned int));
+
/* compute number of contiguous chunks */
chunks = 1;
- for (i = 1; i < n_pages; ++i)
- if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
+ chunk_pages = 1;
+ for (i = 1; i < n_pages; ++i) {
+ if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1 ||
+ chunk_pages >= UINT_MAX >> PAGE_SHIFT) {
++chunks;
+ chunk_pages = 0;
+ }
+ ++chunk_pages;
+ }
ret = sg_alloc_table(sgt, chunks, gfp_mask);
if (unlikely(ret))
@@ -417,10 +426,15 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
unsigned int j;
/* look for the end of the current chunk */
- for (j = cur_page + 1; j < n_pages; ++j)
+ chunk_pages = 1;
+ for (j = cur_page + 1; j < n_pages; ++j) {
if (page_to_pfn(pages[j]) !=
- page_to_pfn(pages[j - 1]) + 1)
+ page_to_pfn(pages[j - 1]) + 1 ||
+ chunk_pages >= UINT_MAX >> PAGE_SHIFT) {
break;
+ }
+ ++chunk_pages;
+ }
chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);