summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
blob: 2a3d3468e4e0ad3086b3266d42cd26ef062d37ea (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
 * Huge page-table-entry support for IO memory.
 *
 * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
 */
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>

/**
 * struct vmw_thp_manager - Range manager implementing huge page alignment
 *
 * @manager: TTM resource manager.
 * @mm: The underlying range manager. Protected by @lock.
 * @lock: Manager lock.
 */
struct vmw_thp_manager {
	struct ttm_resource_manager manager;
	struct drm_mm mm;
	spinlock_t lock;
};

static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man)
{
	return container_of(man, struct vmw_thp_manager, manager);
}

static const struct ttm_resource_manager_func vmw_thp_func;

static int vmw_thp_insert_aligned(struct ttm_buffer_object *bo,
				  struct drm_mm *mm, struct drm_mm_node *node,
				  unsigned long align_pages,
				  const struct ttm_place *place,
				  struct ttm_resource *mem,
				  unsigned long lpfn,
				  enum drm_mm_insert_mode mode)
{
	if (align_pages >= bo->page_alignment &&
	    (!bo->page_alignment || align_pages % bo->page_alignment == 0)) {
		return drm_mm_insert_node_in_range(mm, node,
						   mem->num_pages,
						   align_pages, 0,
						   place->fpfn, lpfn, mode);
	}

	return -ENOSPC;
}

static int vmw_thp_get_node(struct ttm_resource_manager *man,
			    struct ttm_buffer_object *bo,
			    const struct ttm_place *place,
			    struct ttm_resource **res)
{
	struct vmw_thp_manager *rman = to_thp_manager(man);
	struct drm_mm *mm = &rman->mm;
	struct ttm_range_mgr_node *node;
	unsigned long align_pages;
	unsigned long lpfn;
	enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
	int ret;

	node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
	if (!node)
		return -ENOMEM;

	ttm_resource_init(bo, place, &node->base);

	lpfn = place->lpfn;
	if (!lpfn)
		lpfn = man->size;

	mode = DRM_MM_INSERT_BEST;
	if (place->flags & TTM_PL_FLAG_TOPDOWN)
		mode = DRM_MM_INSERT_HIGH;

	spin_lock(&rman->lock);
	if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
		align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
		if (node->base.num_pages >= align_pages) {
			ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
						     align_pages, place,
						     &node->base, lpfn, mode);
			if (!ret)
				goto found_unlock;
		}
	}

	align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
	if (node->base.num_pages >= align_pages) {
		ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
					     align_pages, place, &node->base,
					     lpfn, mode);
		if (!ret)
			goto found_unlock;
	}

	ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
					  node->base.num_pages,
					  bo->page_alignment, 0,
					  place->fpfn, lpfn, mode);
found_unlock:
	spin_unlock(&rman->lock);

	if (unlikely(ret)) {
		kfree(node);
	} else {
		node->base.start = node->mm_nodes[0].start;
		*res = &node->base;
	}

	return ret;
}

static void vmw_thp_put_node(struct ttm_resource_manager *man,
			     struct ttm_resource *res)
{
	struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
	struct vmw_thp_manager *rman = to_thp_manager(man);

	spin_lock(&rman->lock);
	drm_mm_remove_node(&node->mm_nodes[0]);
	spin_unlock(&rman->lock);

	kfree(node);
}

int vmw_thp_init(struct vmw_private *dev_priv)
{
	struct vmw_thp_manager *rman;

	rman = kzalloc(sizeof(*rman), GFP_KERNEL);
	if (!rman)
		return -ENOMEM;

	ttm_resource_manager_init(&rman->manager,
				  dev_priv->vram_size >> PAGE_SHIFT);

	rman->manager.func = &vmw_thp_func;
	drm_mm_init(&rman->mm, 0, rman->manager.size);
	spin_lock_init(&rman->lock);

	ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, &rman->manager);
	ttm_resource_manager_set_used(&rman->manager, true);
	return 0;
}

void vmw_thp_fini(struct vmw_private *dev_priv)
{
	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
	struct vmw_thp_manager *rman = to_thp_manager(man);
	struct drm_mm *mm = &rman->mm;
	int ret;

	ttm_resource_manager_set_used(man, false);

	ret = ttm_resource_manager_evict_all(&dev_priv->bdev, man);
	if (ret)
		return;
	spin_lock(&rman->lock);
	drm_mm_clean(mm);
	drm_mm_takedown(mm);
	spin_unlock(&rman->lock);
	ttm_resource_manager_cleanup(man);
	ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, NULL);
	kfree(rman);
}

static void vmw_thp_debug(struct ttm_resource_manager *man,
			  struct drm_printer *printer)
{
	struct vmw_thp_manager *rman = to_thp_manager(man);

	spin_lock(&rman->lock);
	drm_mm_print(&rman->mm, printer);
	spin_unlock(&rman->lock);
}

static const struct ttm_resource_manager_func vmw_thp_func = {
	.alloc = vmw_thp_get_node,
	.free = vmw_thp_put_node,
	.debug = vmw_thp_debug
};