From a05502e5cfa9abe17a16592be82c2f5692c91f35 Mon Sep 17 00:00:00 2001 From: Horace Chen Date: Fri, 29 Sep 2017 14:41:57 +0800 Subject: drm/amdgpu: Reserve shared memory on VRAM for SR-IOV MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit SR-IOV need to reserve a piece of shared VRAM at the exact place to exchange data betweem PF and VF. The start address and size of the shared mem are passed to guest through VBIOS structure VRAM_UsageByFirmware. VRAM_UsageByFirmware is a general feature in VBIOS, it indicates that VBIOS need to reserve a piece of memory on the VRAM. Because the mem address is specified. Reserve it early in amdgpu_ttm_init to make sure that it can monoplize the space. Signed-off-by: Horace Chen Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 15a28578d458..1f68a146e26c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1255,6 +1255,15 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) /* Change the size here instead of the init above so only lpfn is affected */ amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); + /* + *The reserved vram for firmware must be pinned to the specified + *place on the VRAM, so reserve it early. + */ + r = amdgpu_fw_reserve_vram_init(adev); + if (r) { + return r; + } + r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->stolen_vga_memory, -- cgit v1.2.3 From b82485fd384a56c27fae44e649552eca6334237a Mon Sep 17 00:00:00 2001 From: Andres Rodriguez Date: Fri, 15 Sep 2017 21:05:19 -0400 Subject: drm/amdgpu: add helper to convert a ttm bo to amdgpu_bo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Andres Rodriguez Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 8 +++----- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 5 +++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 9 +++++---- 3 files changed, 13 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 6982baeccd14..8b4ed8a98a18 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -40,9 +40,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) { struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); - struct amdgpu_bo *bo; - - bo = container_of(tbo, struct amdgpu_bo, tbo); + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); amdgpu_bo_kunmap(bo); @@ -884,7 +882,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) return; - abo = container_of(bo, struct amdgpu_bo, tbo); + abo = ttm_to_amdgpu_bo(bo); amdgpu_vm_bo_invalidate(adev, abo, evict); amdgpu_bo_kunmap(abo); @@ -911,7 +909,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) return 0; - abo = container_of(bo, struct amdgpu_bo, tbo); + abo = ttm_to_amdgpu_bo(bo); /* Remember that this BO was accessed by the CPU */ abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 39b6bf6fb051..c26ef53604af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -94,6 +94,11 @@ struct amdgpu_bo { }; }; +static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) +{ + return container_of(tbo, struct amdgpu_bo, tbo); +} + /** * amdgpu_mem_type_to_domain - return domain corresponding to mem_type * @mem_type: ttm memory type diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 1f68a146e26c..10952c3e5eb6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -44,6 +44,7 @@ #include #include #include "amdgpu.h" +#include "amdgpu_object.h" #include "amdgpu_trace.h" #include "bif/bif_4_1_d.h" @@ -209,7 +210,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, placement->num_busy_placement = 1; return; } - abo = container_of(bo, struct amdgpu_bo, tbo); + abo = ttm_to_amdgpu_bo(bo); switch (bo->mem.mem_type) { case TTM_PL_VRAM: if (adev->mman.buffer_funcs && @@ -257,7 +258,7 @@ gtt: static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) { - struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo); + struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); if (amdgpu_ttm_tt_get_usermm(bo->ttm)) return -EPERM; @@ -484,7 +485,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, int r; /* Can't move a pinned BO */ - abo = container_of(bo, struct amdgpu_bo, tbo); + abo = ttm_to_amdgpu_bo(bo); if (WARN_ON_ONCE(abo->pin_count > 0)) return -EINVAL; @@ -1142,7 +1143,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, unsigned long offset, void *buf, int len, int write) { - struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo); + struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); struct drm_mm_node *nodes = abo->tbo.mem.mm_node; uint32_t value = 0; -- cgit v1.2.3 From 177ae09b5d699a5ebd1cafcee78889db968abf54 Mon Sep 17 00:00:00 2001 From: Andres Rodriguez Date: Fri, 15 Sep 2017 20:44:06 -0400 Subject: drm/amdgpu: introduce AMDGPU_GEM_CREATE_EXPLICIT_SYNC v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce a flag to signal that access to a BO will be synchronized through an external mechanism. Currently all buffers shared between contexts are subject to implicit synchronization. However, this is only required for protocols that currently don't support an explicit synchronization mechanism (DRI2/3). This patch introduces the AMDGPU_GEM_CREATE_EXPLICIT_SYNC, so that users can specify when it is safe to disable implicit sync. v2: only disable explicit sync in amdgpu_cs_ioctl Reviewed-by: Christian König Signed-off-by: Andres Rodriguez Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 4 +++- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 8 ++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 7 +++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 8 ++++---- include/uapi/drm/amdgpu_drm.h | 2 ++ 8 files changed, 29 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index ab83dfcabb41..38027a00f8ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -705,7 +705,8 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) list_for_each_entry(e, &p->validated, tv.head) { struct reservation_object *resv = e->robj->tbo.resv; - r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp); + r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp, + amdgpu_bo_explicit_sync(e->robj)); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index b0d45c8e6bb3..21e99366cab3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -212,7 +212,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, AMDGPU_GEM_CREATE_NO_CPU_ACCESS | AMDGPU_GEM_CREATE_CPU_GTT_USWC | AMDGPU_GEM_CREATE_VRAM_CLEARED | - AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)) + AMDGPU_GEM_CREATE_VM_ALWAYS_VALID | + AMDGPU_GEM_CREATE_EXPLICIT_SYNC)) + return -EINVAL; /* reject invalid gem domains */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index c26ef53604af..428aae048f4b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -193,6 +193,14 @@ static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo) } } +/** + * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced + */ +static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo) +{ + return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC; +} + int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index c586f44312f9..a4bf21f8f1c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -169,14 +169,14 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, * * @sync: sync object to add fences from reservation object to * @resv: reservation object with embedded fence - * @shared: true if we should only sync to the exclusive fence + * @explicit_sync: true if we should only sync to the exclusive fence * * Sync to the fence */ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct reservation_object *resv, - void *owner) + void *owner, bool explicit_sync) { struct reservation_object_list *flist; struct dma_fence *f; @@ -191,6 +191,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, f = reservation_object_get_excl(resv); r = amdgpu_sync_fence(adev, sync, f); + if (explicit_sync) + return r; + flist = reservation_object_get_list(resv); if (!flist || r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h index dc7687993317..70d7e3a279a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h @@ -45,7 +45,8 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct reservation_object *resv, - void *owner); + void *owner, + bool explicit_sync); struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, struct amdgpu_ring *ring); struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 10952c3e5eb6..a2282bacf960 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1489,7 +1489,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, job->vm_needs_flush = vm_needs_flush; if (resv) { r = amdgpu_sync_resv(adev, &job->sync, resv, - AMDGPU_FENCE_OWNER_UNDEFINED); + AMDGPU_FENCE_OWNER_UNDEFINED, + false); if (r) { DRM_ERROR("sync failed (%d).\n", r); goto error_free; @@ -1581,7 +1582,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, if (resv) { r = amdgpu_sync_resv(adev, &job->sync, resv, - AMDGPU_FENCE_OWNER_UNDEFINED); + AMDGPU_FENCE_OWNER_UNDEFINED, false); if (r) { DRM_ERROR("sync failed (%d).\n", r); goto error_free; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index eb4a01c14eee..c559d76ff695 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1035,7 +1035,7 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm, int r; amdgpu_sync_create(&sync); - amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner); + amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false); r = amdgpu_sync_wait(&sync, true); amdgpu_sync_free(&sync); @@ -1176,11 +1176,11 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, amdgpu_ring_pad_ib(ring, params.ib); amdgpu_sync_resv(adev, &job->sync, parent->base.bo->tbo.resv, - AMDGPU_FENCE_OWNER_VM); + AMDGPU_FENCE_OWNER_VM, false); if (shadow) amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv, - AMDGPU_FENCE_OWNER_VM); + AMDGPU_FENCE_OWNER_VM, false); WARN_ON(params.ib->length_dw > ndw); r = amdgpu_job_submit(job, ring, &vm->entity, @@ -1644,7 +1644,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, goto error_free; r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv, - owner); + owner, false); if (r) goto error_free; diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 4c6e8c482ee4..b62484af8ccb 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -91,6 +91,8 @@ extern "C" { #define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5) /* Flag that BO is always valid in this VM */ #define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6) +/* Flag that BO sharing will be explicitly synchronized */ +#define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7) struct drm_amdgpu_gem_create_in { /** the requested memory size */ -- cgit v1.2.3 From 1eca5a530dc3ac0a2fadd21da1c9e6c729a4a2a1 Mon Sep 17 00:00:00 2001 From: Harish Kasiviswanathan Date: Tue, 3 Oct 2017 15:41:56 -0400 Subject: drm/amdgpu: Refactor amdgpu_move_blit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add more generic function amdgpu_copy_ttm_mem_to_mem() that supports arbitrary copy size, offsets and two BOs (source & dest.). This is useful for KFD Cross Memory Attach feature where data needs to be copied from BOs from different processes v2: Add struct amdgpu_copy_mem and changed amdgpu_copy_ttm_mem_to_mem() function parameters to use the struct v3: Minor function name change Signed-off-by: Harish Kasiviswanathan Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 169 +++++++++++++++++++++++--------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 12 +++ 2 files changed, 132 insertions(+), 49 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index a2282bacf960..382c0ae0561e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -290,97 +290,168 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, return addr; } -static int amdgpu_move_blit(struct ttm_buffer_object *bo, - bool evict, bool no_wait_gpu, - struct ttm_mem_reg *new_mem, - struct ttm_mem_reg *old_mem) +/** + * amdgpu_ttm_copy_mem_to_mem - Helper function for copy + * + * The function copies @size bytes from {src->mem + src->offset} to + * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a + * move and different for a BO to BO copy. + * + * @f: Returns the last fence if multiple jobs are submitted. + */ +int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, + struct amdgpu_copy_mem *src, + struct amdgpu_copy_mem *dst, + uint64_t size, + struct reservation_object *resv, + struct dma_fence **f) { - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; - - struct drm_mm_node *old_mm, *new_mm; - uint64_t old_start, old_size, new_start, new_size; - unsigned long num_pages; + struct drm_mm_node *src_mm, *dst_mm; + uint64_t src_node_start, dst_node_start, src_node_size, + dst_node_size, src_page_offset, dst_page_offset; struct dma_fence *fence = NULL; - int r; - - BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0); + int r = 0; + const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE * + AMDGPU_GPU_PAGE_SIZE); if (!ring->ready) { DRM_ERROR("Trying to move memory with ring turned off.\n"); return -EINVAL; } - old_mm = old_mem->mm_node; - old_size = old_mm->size; - old_start = amdgpu_mm_node_addr(bo, old_mm, old_mem); + src_mm = src->mem->mm_node; + while (src->offset >= (src_mm->size << PAGE_SHIFT)) { + src->offset -= (src_mm->size << PAGE_SHIFT); + ++src_mm; + } + src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) + + src->offset; + src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset; + src_page_offset = src_node_start & (PAGE_SIZE - 1); - new_mm = new_mem->mm_node; - new_size = new_mm->size; - new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem); + dst_mm = dst->mem->mm_node; + while (dst->offset >= (dst_mm->size << PAGE_SHIFT)) { + dst->offset -= (dst_mm->size << PAGE_SHIFT); + ++dst_mm; + } + dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) + + dst->offset; + dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset; + dst_page_offset = dst_node_start & (PAGE_SIZE - 1); - num_pages = new_mem->num_pages; mutex_lock(&adev->mman.gtt_window_lock); - while (num_pages) { - unsigned long cur_pages = min(min(old_size, new_size), - (u64)AMDGPU_GTT_MAX_TRANSFER_SIZE); - uint64_t from = old_start, to = new_start; + + while (size) { + unsigned long cur_size; + uint64_t from = src_node_start, to = dst_node_start; struct dma_fence *next; - if (old_mem->mem_type == TTM_PL_TT && - !amdgpu_gtt_mgr_is_allocated(old_mem)) { - r = amdgpu_map_buffer(bo, old_mem, cur_pages, - old_start, 0, ring, &from); + /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst + * begins at an offset, then adjust the size accordingly + */ + cur_size = min3(min(src_node_size, dst_node_size), size, + GTT_MAX_BYTES); + if (cur_size + src_page_offset > GTT_MAX_BYTES || + cur_size + dst_page_offset > GTT_MAX_BYTES) + cur_size -= max(src_page_offset, dst_page_offset); + + /* Map only what needs to be accessed. Map src to window 0 and + * dst to window 1 + */ + if (src->mem->mem_type == TTM_PL_TT && + !amdgpu_gtt_mgr_is_allocated(src->mem)) { + r = amdgpu_map_buffer(src->bo, src->mem, + PFN_UP(cur_size + src_page_offset), + src_node_start, 0, ring, + &from); if (r) goto error; + /* Adjust the offset because amdgpu_map_buffer returns + * start of mapped page + */ + from += src_page_offset; } - if (new_mem->mem_type == TTM_PL_TT && - !amdgpu_gtt_mgr_is_allocated(new_mem)) { - r = amdgpu_map_buffer(bo, new_mem, cur_pages, - new_start, 1, ring, &to); + if (dst->mem->mem_type == TTM_PL_TT && + !amdgpu_gtt_mgr_is_allocated(dst->mem)) { + r = amdgpu_map_buffer(dst->bo, dst->mem, + PFN_UP(cur_size + dst_page_offset), + dst_node_start, 1, ring, + &to); if (r) goto error; + to += dst_page_offset; } - r = amdgpu_copy_buffer(ring, from, to, - cur_pages * PAGE_SIZE, - bo->resv, &next, false, true); + r = amdgpu_copy_buffer(ring, from, to, cur_size, + resv, &next, false, true); if (r) goto error; dma_fence_put(fence); fence = next; - num_pages -= cur_pages; - if (!num_pages) + size -= cur_size; + if (!size) break; - old_size -= cur_pages; - if (!old_size) { - old_start = amdgpu_mm_node_addr(bo, ++old_mm, old_mem); - old_size = old_mm->size; + src_node_size -= cur_size; + if (!src_node_size) { + src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm, + src->mem); + src_node_size = (src_mm->size << PAGE_SHIFT); } else { - old_start += cur_pages * PAGE_SIZE; + src_node_start += cur_size; + src_page_offset = src_node_start & (PAGE_SIZE - 1); } - - new_size -= cur_pages; - if (!new_size) { - new_start = amdgpu_mm_node_addr(bo, ++new_mm, new_mem); - new_size = new_mm->size; + dst_node_size -= cur_size; + if (!dst_node_size) { + dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm, + dst->mem); + dst_node_size = (dst_mm->size << PAGE_SHIFT); } else { - new_start += cur_pages * PAGE_SIZE; + dst_node_start += cur_size; + dst_page_offset = dst_node_start & (PAGE_SIZE - 1); } } +error: mutex_unlock(&adev->mman.gtt_window_lock); + if (f) + *f = dma_fence_get(fence); + dma_fence_put(fence); + return r; +} + + +static int amdgpu_move_blit(struct ttm_buffer_object *bo, + bool evict, bool no_wait_gpu, + struct ttm_mem_reg *new_mem, + struct ttm_mem_reg *old_mem) +{ + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); + struct amdgpu_copy_mem src, dst; + struct dma_fence *fence = NULL; + int r; + + src.bo = bo; + dst.bo = bo; + src.mem = old_mem; + dst.mem = new_mem; + src.offset = 0; + dst.offset = 0; + + r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst, + new_mem->num_pages << PAGE_SHIFT, + bo->resv, &fence); + if (r) + goto error; r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); dma_fence_put(fence); return r; error: - mutex_unlock(&adev->mman.gtt_window_lock); - if (fence) dma_fence_wait(fence, false); dma_fence_put(fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 7abae6867339..abd4084982a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -58,6 +58,12 @@ struct amdgpu_mman { struct amd_sched_entity entity; }; +struct amdgpu_copy_mem { + struct ttm_buffer_object *bo; + struct ttm_mem_reg *mem; + unsigned long offset; +}; + extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func; extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; @@ -72,6 +78,12 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, struct reservation_object *resv, struct dma_fence **fence, bool direct_submit, bool vm_needs_flush); +int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, + struct amdgpu_copy_mem *src, + struct amdgpu_copy_mem *dst, + uint64_t size, + struct reservation_object *resv, + struct dma_fence **f); int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint64_t src_data, struct reservation_object *resv, -- cgit v1.2.3 From e1d515052f9075eb1b791b21467d79db3529db83 Mon Sep 17 00:00:00 2001 From: Harish Kasiviswanathan Date: Fri, 6 Oct 2017 17:36:35 -0400 Subject: drm/amdgpu: Add amdgpu_find_mm_node() Replace some commonly repeated code with a function. v2: Use amdgpu_find_mm_node() in amdgpu_ttm_io_mem_pfn() Signed-off-by: Harish Kasiviswanathan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 49 ++++++++++++++++++--------------- 1 file changed, 27 insertions(+), 22 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 382c0ae0561e..51eacefadea1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -291,7 +291,24 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, } /** - * amdgpu_ttm_copy_mem_to_mem - Helper function for copy + * amdgpu_find_mm_node - Helper function finds the drm_mm_node + * corresponding to @offset. It also modifies the offset to be + * within the drm_mm_node returned + */ +static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem, + unsigned long *offset) +{ + struct drm_mm_node *mm_node = mem->mm_node; + + while (*offset >= (mm_node->size << PAGE_SHIFT)) { + *offset -= (mm_node->size << PAGE_SHIFT); + ++mm_node; + } + return mm_node; +} + +/** + * amdgpu_copy_ttm_mem_to_mem - Helper function for copy * * The function copies @size bytes from {src->mem + src->offset} to * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a @@ -320,21 +337,13 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, return -EINVAL; } - src_mm = src->mem->mm_node; - while (src->offset >= (src_mm->size << PAGE_SHIFT)) { - src->offset -= (src_mm->size << PAGE_SHIFT); - ++src_mm; - } + src_mm = amdgpu_find_mm_node(src->mem, &src->offset); src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) + src->offset; src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset; src_page_offset = src_node_start & (PAGE_SIZE - 1); - dst_mm = dst->mem->mm_node; - while (dst->offset >= (dst_mm->size << PAGE_SHIFT)) { - dst->offset -= (dst_mm->size << PAGE_SHIFT); - ++dst_mm; - } + dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset); dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) + dst->offset; dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset; @@ -654,13 +663,12 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, unsigned long page_offset) { - struct drm_mm_node *mm = bo->mem.mm_node; - uint64_t size = mm->size; - uint64_t offset = page_offset; + struct drm_mm_node *mm; + unsigned long offset = (page_offset << PAGE_SHIFT); - page_offset = do_div(offset, size); - mm += offset; - return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + page_offset; + mm = amdgpu_find_mm_node(&bo->mem, &offset); + return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + + (offset >> PAGE_SHIFT); } /* @@ -1216,7 +1224,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, { struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); - struct drm_mm_node *nodes = abo->tbo.mem.mm_node; + struct drm_mm_node *nodes; uint32_t value = 0; int ret = 0; uint64_t pos; @@ -1225,10 +1233,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, if (bo->mem.mem_type != TTM_PL_VRAM) return -EIO; - while (offset >= (nodes->size << PAGE_SHIFT)) { - offset -= nodes->size << PAGE_SHIFT; - ++nodes; - } + nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset); pos = (nodes->start << PAGE_SHIFT) + offset; while (len && pos < adev->mc.mc_vram_size) { -- cgit v1.2.3