1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/io-mapping.h>
36#include <linux/mlx5/driver.h>
37#include "mlx5_core.h"
38
39int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
40{
41 u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {};
42 u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {};
43 int err;
44
45 MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
46 err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out);
47 if (!err)
48 *uarn = MLX5_GET(alloc_uar_out, out, uar);
49 return err;
50}
51EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
52
53int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
54{
55 u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {};
56
57 MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
58 MLX5_SET(dealloc_uar_in, in, uar, uarn);
59 return mlx5_cmd_exec_in(dev, dealloc_uar, in);
60}
61EXPORT_SYMBOL(mlx5_cmd_free_uar);
62
63static int uars_per_sys_page(struct mlx5_core_dev *mdev)
64{
65 if (MLX5_CAP_GEN(mdev, uar_4k))
66 return MLX5_CAP_GEN(mdev, num_of_uars_per_page);
67
68 return 1;
69}
70
71static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index)
72{
73 u32 system_page_index;
74
75 if (MLX5_CAP_GEN(mdev, uar_4k))
76 system_page_index = index >> (PAGE_SHIFT - MLX5_ADAPTER_PAGE_SHIFT);
77 else
78 system_page_index = index;
79
80 return (mdev->bar_addr >> PAGE_SHIFT) + system_page_index;
81}
82
83static void up_rel_func(struct kref *kref)
84{
85 struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count);
86
87 list_del(&up->list);
88 iounmap(up->map);
89 if (mlx5_cmd_free_uar(up->mdev, up->index))
90 mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
91 bitmap_free(up->reg_bitmap);
92 bitmap_free(up->fp_bitmap);
93 kfree(up);
94}
95
96static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev,
97 bool map_wc)
98{
99 struct mlx5_uars_page *up;
100 int err = -ENOMEM;
101 phys_addr_t pfn;
102 int bfregs;
103 int i;
104
105 bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR;
106 up = kzalloc(sizeof(*up), GFP_KERNEL);
107 if (!up)
108 return ERR_PTR(err);
109
110 up->mdev = mdev;
111 up->reg_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL);
112 if (!up->reg_bitmap)
113 goto error1;
114
115 up->fp_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL);
116 if (!up->fp_bitmap)
117 goto error1;
118
119 for (i = 0; i < bfregs; i++)
120 if ((i % MLX5_BFREGS_PER_UAR) < MLX5_NON_FP_BFREGS_PER_UAR)
121 set_bit(i, up->reg_bitmap);
122 else
123 set_bit(i, up->fp_bitmap);
124
125 up->bfregs = bfregs;
126 up->fp_avail = bfregs * MLX5_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
127 up->reg_avail = bfregs * MLX5_NON_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
128
129 err = mlx5_cmd_alloc_uar(mdev, &up->index);
130 if (err) {
131 mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
132 goto error1;
133 }
134
135 pfn = uar2pfn(mdev, up->index);
136 if (map_wc) {
137 up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
138 if (!up->map) {
139 err = -EAGAIN;
140 goto error2;
141 }
142 } else {
143 up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
144 if (!up->map) {
145 err = -ENOMEM;
146 goto error2;
147 }
148 }
149 kref_init(&up->ref_count);
150 mlx5_core_dbg(mdev, "allocated UAR page: index %d, total bfregs %d\n",
151 up->index, up->bfregs);
152 return up;
153
154error2:
155 if (mlx5_cmd_free_uar(mdev, up->index))
156 mlx5_core_warn(mdev, "failed to free uar index %d\n", up->index);
157error1:
158 bitmap_free(up->fp_bitmap);
159 bitmap_free(up->reg_bitmap);
160 kfree(up);
161 return ERR_PTR(err);
162}
163
164struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
165{
166 struct mlx5_uars_page *ret;
167
168 mutex_lock(&mdev->priv.bfregs.reg_head.lock);
169 if (!list_empty(&mdev->priv.bfregs.reg_head.list)) {
170 ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
171 struct mlx5_uars_page, list);
172 kref_get(&ret->ref_count);
173 goto out;
174 }
175 ret = alloc_uars_page(mdev, false);
176 if (IS_ERR(ret))
177 goto out;
178 list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
179out:
180 mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
181
182 return ret;
183}
184EXPORT_SYMBOL(mlx5_get_uars_page);
185
186void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up)
187{
188 mutex_lock(&mdev->priv.bfregs.reg_head.lock);
189 kref_put(&up->ref_count, up_rel_func);
190 mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
191}
192EXPORT_SYMBOL(mlx5_put_uars_page);
193
194static unsigned long map_offset(struct mlx5_core_dev *mdev, int dbi)
195{
196
197
198
199 return dbi / MLX5_BFREGS_PER_UAR * MLX5_ADAPTER_PAGE_SIZE +
200 (dbi % MLX5_BFREGS_PER_UAR) *
201 (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET;
202}
203
204static int alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
205 bool map_wc, bool fast_path)
206{
207 struct mlx5_bfreg_data *bfregs;
208 struct mlx5_uars_page *up;
209 struct list_head *head;
210 unsigned long *bitmap;
211 unsigned int *avail;
212 struct mutex *lock;
213 int dbi;
214
215 bfregs = &mdev->priv.bfregs;
216 if (map_wc) {
217 head = &bfregs->wc_head.list;
218 lock = &bfregs->wc_head.lock;
219 } else {
220 head = &bfregs->reg_head.list;
221 lock = &bfregs->reg_head.lock;
222 }
223 mutex_lock(lock);
224 if (list_empty(head)) {
225 up = alloc_uars_page(mdev, map_wc);
226 if (IS_ERR(up)) {
227 mutex_unlock(lock);
228 return PTR_ERR(up);
229 }
230 list_add(&up->list, head);
231 } else {
232 up = list_entry(head->next, struct mlx5_uars_page, list);
233 kref_get(&up->ref_count);
234 }
235 if (fast_path) {
236 bitmap = up->fp_bitmap;
237 avail = &up->fp_avail;
238 } else {
239 bitmap = up->reg_bitmap;
240 avail = &up->reg_avail;
241 }
242 dbi = find_first_bit(bitmap, up->bfregs);
243 clear_bit(dbi, bitmap);
244 (*avail)--;
245 if (!(*avail))
246 list_del(&up->list);
247
248 bfreg->map = up->map + map_offset(mdev, dbi);
249 bfreg->up = up;
250 bfreg->wc = map_wc;
251 bfreg->index = up->index + dbi / MLX5_BFREGS_PER_UAR;
252 mutex_unlock(lock);
253
254 return 0;
255}
256
257int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
258 bool map_wc, bool fast_path)
259{
260 int err;
261
262 err = alloc_bfreg(mdev, bfreg, map_wc, fast_path);
263 if (!err)
264 return 0;
265
266 if (err == -EAGAIN && map_wc)
267 return alloc_bfreg(mdev, bfreg, false, fast_path);
268
269 return err;
270}
271EXPORT_SYMBOL(mlx5_alloc_bfreg);
272
273static unsigned int addr_to_dbi_in_syspage(struct mlx5_core_dev *dev,
274 struct mlx5_uars_page *up,
275 struct mlx5_sq_bfreg *bfreg)
276{
277 unsigned int uar_idx;
278 unsigned int bfreg_idx;
279 unsigned int bf_reg_size;
280
281 bf_reg_size = 1 << MLX5_CAP_GEN(dev, log_bf_reg_size);
282
283 uar_idx = (bfreg->map - up->map) >> MLX5_ADAPTER_PAGE_SHIFT;
284 bfreg_idx = (((uintptr_t)bfreg->map % MLX5_ADAPTER_PAGE_SIZE) - MLX5_BF_OFFSET) / bf_reg_size;
285
286 return uar_idx * MLX5_BFREGS_PER_UAR + bfreg_idx;
287}
288
289void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg)
290{
291 struct mlx5_bfreg_data *bfregs;
292 struct mlx5_uars_page *up;
293 struct mutex *lock;
294 unsigned int dbi;
295 bool fp;
296 unsigned int *avail;
297 unsigned long *bitmap;
298 struct list_head *head;
299
300 bfregs = &mdev->priv.bfregs;
301 if (bfreg->wc) {
302 head = &bfregs->wc_head.list;
303 lock = &bfregs->wc_head.lock;
304 } else {
305 head = &bfregs->reg_head.list;
306 lock = &bfregs->reg_head.lock;
307 }
308 up = bfreg->up;
309 dbi = addr_to_dbi_in_syspage(mdev, up, bfreg);
310 fp = (dbi % MLX5_BFREGS_PER_UAR) >= MLX5_NON_FP_BFREGS_PER_UAR;
311 if (fp) {
312 avail = &up->fp_avail;
313 bitmap = up->fp_bitmap;
314 } else {
315 avail = &up->reg_avail;
316 bitmap = up->reg_bitmap;
317 }
318 mutex_lock(lock);
319 (*avail)++;
320 set_bit(dbi, bitmap);
321 if (*avail == 1)
322 list_add_tail(&up->list, head);
323
324 kref_put(&up->ref_count, up_rel_func);
325 mutex_unlock(lock);
326}
327EXPORT_SYMBOL(mlx5_free_bfreg);
328