diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-06-03 07:19:45 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-06-03 07:19:45 -0700 |
| commit | 1067b6c2bea7fd2cc9da290d865ab3f3b91c8130 (patch) | |
| tree | 5b20d5fbe9f9a2b3cfbb794018d74606f5031a88 /drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | |
| parent | a652883a244901742d6c9733a9eebdf72e3114ea (diff) | |
| parent | d8dcaa1dc50f5aecd38d34180cd99d6af8566c88 (diff) | |
Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (41 commits)
drm/radeon/kms: make sure display hw is disabled when suspending
drm/vmwgfx: Allow userspace to change default layout. Bump minor.
drm/vmwgfx: Fix framebuffer modesetting
drm/vmwgfx: Fix vga save / restore with display topology.
vgaarb: use MIT license
vgaarb: convert pr_devel() to pr_debug()
drm: fix typos in Linux DRM Developer's Guide
drm/radeon/kms/pm: voltage fixes
drm/radeon/kms/pm: radeon_set_power_state fixes
drm/radeon/kms/pm: patch default power state with default clocks/voltages on r6xx+
drm/radeon/kms/pm: enable SetVoltage on r7xx/evergreen
drm/radeon/kms/pm: add support for SetVoltage cmd table (V2)
drm/radeon/kms/evergreen: add initial CS parser
drm/kms: disable/enable poll around switcheroo on/off
drm/nouveau: fixup confusion over which handle the DSM is hanging off.
drm/nouveau: attempt to get bios from ACPI v3
drm/nv50: cast IGP memory location to u64 before shifting
drm/ttm: Fix ttm_page_alloc.c
drm/ttm: Fix cached TTM page allocation.
drm/vmwgfx: Remove some leftover debug messages.
...
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_fence.c')
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 173 |
1 files changed, 173 insertions, 0 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c new file mode 100644 index 00000000000..61eacc1b5ca --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -0,0 +1,173 @@ +/************************************************************************** + * + * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + + +#include "vmwgfx_drv.h" + +struct vmw_fence { + struct list_head head; + uint32_t sequence; + struct timespec submitted; +}; + +void vmw_fence_queue_init(struct vmw_fence_queue *queue) +{ + INIT_LIST_HEAD(&queue->head); + queue->lag = ns_to_timespec(0); + getrawmonotonic(&queue->lag_time); + spin_lock_init(&queue->lock); +} + +void vmw_fence_queue_takedown(struct vmw_fence_queue *queue) +{ + struct vmw_fence *fence, *next; + + spin_lock(&queue->lock); + list_for_each_entry_safe(fence, next, &queue->head, head) { + kfree(fence); + } + spin_unlock(&queue->lock); +} + +int vmw_fence_push(struct vmw_fence_queue *queue, + uint32_t sequence) +{ + struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL); + + if (unlikely(!fence)) + return -ENOMEM; + + fence->sequence = sequence; + getrawmonotonic(&fence->submitted); + spin_lock(&queue->lock); + list_add_tail(&fence->head, &queue->head); + spin_unlock(&queue->lock); + + return 0; +} + +int vmw_fence_pull(struct vmw_fence_queue *queue, + uint32_t signaled_sequence) +{ + struct vmw_fence *fence, *next; + struct timespec now; + bool updated = false; + + spin_lock(&queue->lock); + getrawmonotonic(&now); + + if (list_empty(&queue->head)) { + queue->lag = ns_to_timespec(0); + queue->lag_time = now; + updated = true; + goto out_unlock; + } + + list_for_each_entry_safe(fence, next, &queue->head, head) { + if (signaled_sequence - fence->sequence > (1 << 30)) + continue; + + queue->lag = timespec_sub(now, fence->submitted); + queue->lag_time = now; + updated = true; + list_del(&fence->head); + kfree(fence); + } + +out_unlock: + spin_unlock(&queue->lock); + + return (updated) ? 0 : -EBUSY; +} + +static struct timespec vmw_timespec_add(struct timespec t1, + struct timespec t2) +{ + t1.tv_sec += t2.tv_sec; + t1.tv_nsec += t2.tv_nsec; + if (t1.tv_nsec >= 1000000000L) { + t1.tv_sec += 1; + t1.tv_nsec -= 1000000000L; + } + + return t1; +} + +static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue) +{ + struct timespec now; + + spin_lock(&queue->lock); + getrawmonotonic(&now); + queue->lag = vmw_timespec_add(queue->lag, + timespec_sub(now, queue->lag_time)); + queue->lag_time = now; + spin_unlock(&queue->lock); + return queue->lag; +} + + +static bool vmw_lag_lt(struct vmw_fence_queue *queue, + uint32_t us) +{ + struct timespec lag, cond; + + cond = ns_to_timespec((s64) us * 1000); + lag = vmw_fifo_lag(queue); + return (timespec_compare(&lag, &cond) < 1); +} + +int vmw_wait_lag(struct vmw_private *dev_priv, + struct vmw_fence_queue *queue, uint32_t us) +{ + struct vmw_fence *fence; + uint32_t sequence; + int ret; + + while (!vmw_lag_lt(queue, us)) { + spin_lock(&queue->lock); + if (list_empty(&queue->head)) + sequence = atomic_read(&dev_priv->fence_seq); + else { + fence = list_first_entry(&queue->head, + struct vmw_fence, head); + sequence = fence->sequence; + } + spin_unlock(&queue->lock); + + ret = vmw_wait_fence(dev_priv, false, sequence, true, + 3*HZ); + + if (unlikely(ret != 0)) + return ret; + + (void) vmw_fence_pull(queue, sequence); + } + return 0; +} + + |
