1
2
3
4
5
6
7
8#define pr_fmt(fmt) "habanalabs: " fmt
9
10#include <uapi/misc/habanalabs.h>
11#include "habanalabs.h"
12
13#include <linux/kernel.h>
14#include <linux/fs.h>
15#include <linux/uaccess.h>
16#include <linux/slab.h>
17
18static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = {
19 [HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr),
20 [HL_DEBUG_OP_ETF] = sizeof(struct hl_debug_params_etf),
21 [HL_DEBUG_OP_STM] = sizeof(struct hl_debug_params_stm),
22 [HL_DEBUG_OP_FUNNEL] = 0,
23 [HL_DEBUG_OP_BMON] = sizeof(struct hl_debug_params_bmon),
24 [HL_DEBUG_OP_SPMU] = sizeof(struct hl_debug_params_spmu),
25 [HL_DEBUG_OP_TIMESTAMP] = 0
26
27};
28
29static int device_status_info(struct hl_device *hdev, struct hl_info_args *args)
30{
31 struct hl_info_device_status dev_stat = {0};
32 u32 size = args->return_size;
33 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
34
35 if ((!size) || (!out))
36 return -EINVAL;
37
38 dev_stat.status = hl_device_status(hdev);
39
40 return copy_to_user(out, &dev_stat,
41 min((size_t)size, sizeof(dev_stat))) ? -EFAULT : 0;
42}
43
44static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
45{
46 struct hl_info_hw_ip_info hw_ip = {0};
47 u32 size = args->return_size;
48 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
49 struct asic_fixed_properties *prop = &hdev->asic_prop;
50 u64 sram_kmd_size, dram_kmd_size;
51
52 if ((!size) || (!out))
53 return -EINVAL;
54
55 sram_kmd_size = (prop->sram_user_base_address -
56 prop->sram_base_address);
57 dram_kmd_size = (prop->dram_user_base_address -
58 prop->dram_base_address);
59
60 hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev);
61 hw_ip.sram_base_address = prop->sram_user_base_address;
62 hw_ip.dram_base_address =
63 hdev->mmu_enable && prop->dram_supports_virtual_memory ?
64 prop->dmmu.start_addr : prop->dram_user_base_address;
65 hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask;
66 hw_ip.sram_size = prop->sram_size - sram_kmd_size;
67
68 if (hdev->mmu_enable)
69 hw_ip.dram_size =
70 DIV_ROUND_DOWN_ULL(prop->dram_size - dram_kmd_size,
71 prop->dram_page_size) *
72 prop->dram_page_size;
73 else
74 hw_ip.dram_size = prop->dram_size - dram_kmd_size;
75
76 if (hw_ip.dram_size > PAGE_SIZE)
77 hw_ip.dram_enabled = 1;
78 hw_ip.dram_page_size = prop->dram_page_size;
79 hw_ip.num_of_events = prop->num_of_events;
80
81 memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version,
82 min(VERSION_MAX_LEN, HL_INFO_VERSION_MAX_LEN));
83
84 memcpy(hw_ip.card_name, prop->cpucp_info.card_name,
85 min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN));
86
87 hw_ip.cpld_version = le32_to_cpu(prop->cpucp_info.cpld_version);
88 hw_ip.module_id = le32_to_cpu(prop->cpucp_info.card_location);
89
90 hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
91 hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
92 hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
93 hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor;
94
95 hw_ip.first_available_interrupt_id =
96 prop->first_available_user_msix_interrupt;
97 return copy_to_user(out, &hw_ip,
98 min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0;
99}
100
101static int hw_events_info(struct hl_device *hdev, bool aggregate,
102 struct hl_info_args *args)
103{
104 u32 size, max_size = args->return_size;
105 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
106 void *arr;
107
108 if ((!max_size) || (!out))
109 return -EINVAL;
110
111 arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size);
112
113 return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0;
114}
115
116static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
117{
118 struct hl_device *hdev = hpriv->hdev;
119 struct hl_info_dram_usage dram_usage = {0};
120 u32 max_size = args->return_size;
121 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
122 struct asic_fixed_properties *prop = &hdev->asic_prop;
123 u64 dram_kmd_size;
124
125 if ((!max_size) || (!out))
126 return -EINVAL;
127
128 dram_kmd_size = (prop->dram_user_base_address -
129 prop->dram_base_address);
130 dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) -
131 atomic64_read(&hdev->dram_used_mem);
132 if (hpriv->ctx)
133 dram_usage.ctx_dram_mem =
134 atomic64_read(&hpriv->ctx->dram_phys_mem);
135
136 return copy_to_user(out, &dram_usage,
137 min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0;
138}
139
140static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
141{
142 struct hl_info_hw_idle hw_idle = {0};
143 u32 max_size = args->return_size;
144 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
145
146 if ((!max_size) || (!out))
147 return -EINVAL;
148
149 hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
150 hw_idle.busy_engines_mask_ext,
151 HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);
152 hw_idle.busy_engines_mask =
153 lower_32_bits(hw_idle.busy_engines_mask_ext[0]);
154
155 return copy_to_user(out, &hw_idle,
156 min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
157}
158
159static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args)
160{
161 struct hl_debug_params *params;
162 void *input = NULL, *output = NULL;
163 int rc;
164
165 params = kzalloc(sizeof(*params), GFP_KERNEL);
166 if (!params)
167 return -ENOMEM;
168
169 params->reg_idx = args->reg_idx;
170 params->enable = args->enable;
171 params->op = args->op;
172
173 if (args->input_ptr && args->input_size) {
174 input = kzalloc(hl_debug_struct_size[args->op], GFP_KERNEL);
175 if (!input) {
176 rc = -ENOMEM;
177 goto out;
178 }
179
180 if (copy_from_user(input, u64_to_user_ptr(args->input_ptr),
181 args->input_size)) {
182 rc = -EFAULT;
183 dev_err(hdev->dev, "failed to copy input debug data\n");
184 goto out;
185 }
186
187 params->input = input;
188 }
189
190 if (args->output_ptr && args->output_size) {
191 output = kzalloc(args->output_size, GFP_KERNEL);
192 if (!output) {
193 rc = -ENOMEM;
194 goto out;
195 }
196
197 params->output = output;
198 params->output_size = args->output_size;
199 }
200
201 rc = hdev->asic_funcs->debug_coresight(hdev, params);
202 if (rc) {
203 dev_err(hdev->dev,
204 "debug coresight operation failed %d\n", rc);
205 goto out;
206 }
207
208 if (output && copy_to_user((void __user *) (uintptr_t) args->output_ptr,
209 output, args->output_size)) {
210 dev_err(hdev->dev, "copy to user failed in debug ioctl\n");
211 rc = -EFAULT;
212 goto out;
213 }
214
215
216out:
217 kfree(params);
218 kfree(output);
219 kfree(input);
220
221 return rc;
222}
223
224static int device_utilization(struct hl_device *hdev, struct hl_info_args *args)
225{
226 struct hl_info_device_utilization device_util = {0};
227 u32 max_size = args->return_size;
228 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
229 int rc;
230
231 if ((!max_size) || (!out))
232 return -EINVAL;
233
234 rc = hl_device_utilization(hdev, &device_util.utilization);
235 if (rc)
236 return -EINVAL;
237
238 return copy_to_user(out, &device_util,
239 min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0;
240}
241
242static int get_clk_rate(struct hl_device *hdev, struct hl_info_args *args)
243{
244 struct hl_info_clk_rate clk_rate = {0};
245 u32 max_size = args->return_size;
246 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
247 int rc;
248
249 if ((!max_size) || (!out))
250 return -EINVAL;
251
252 rc = hdev->asic_funcs->get_clk_rate(hdev, &clk_rate.cur_clk_rate_mhz,
253 &clk_rate.max_clk_rate_mhz);
254 if (rc)
255 return rc;
256
257 return copy_to_user(out, &clk_rate,
258 min((size_t) max_size, sizeof(clk_rate))) ? -EFAULT : 0;
259}
260
261static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args)
262{
263 struct hl_info_reset_count reset_count = {0};
264 u32 max_size = args->return_size;
265 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
266
267 if ((!max_size) || (!out))
268 return -EINVAL;
269
270 reset_count.hard_reset_cnt = hdev->hard_reset_cnt;
271 reset_count.soft_reset_cnt = hdev->soft_reset_cnt;
272
273 return copy_to_user(out, &reset_count,
274 min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0;
275}
276
277static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args)
278{
279 struct hl_info_time_sync time_sync = {0};
280 u32 max_size = args->return_size;
281 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
282
283 if ((!max_size) || (!out))
284 return -EINVAL;
285
286 time_sync.device_time = hdev->asic_funcs->get_device_time(hdev);
287 time_sync.host_time = ktime_get_raw_ns();
288
289 return copy_to_user(out, &time_sync,
290 min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0;
291}
292
293static int pci_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
294{
295 struct hl_device *hdev = hpriv->hdev;
296 struct hl_info_pci_counters pci_counters = {0};
297 u32 max_size = args->return_size;
298 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
299 int rc;
300
301 if ((!max_size) || (!out))
302 return -EINVAL;
303
304 rc = hl_fw_cpucp_pci_counters_get(hdev, &pci_counters);
305 if (rc)
306 return rc;
307
308 return copy_to_user(out, &pci_counters,
309 min((size_t) max_size, sizeof(pci_counters))) ? -EFAULT : 0;
310}
311
312static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
313{
314 struct hl_device *hdev = hpriv->hdev;
315 struct hl_info_clk_throttle clk_throttle = {0};
316 u32 max_size = args->return_size;
317 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
318
319 if ((!max_size) || (!out))
320 return -EINVAL;
321
322 clk_throttle.clk_throttling_reason = hdev->clk_throttling_reason;
323
324 return copy_to_user(out, &clk_throttle,
325 min((size_t) max_size, sizeof(clk_throttle))) ? -EFAULT : 0;
326}
327
328static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
329{
330 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
331 struct hl_info_cs_counters cs_counters = {0};
332 struct hl_device *hdev = hpriv->hdev;
333 struct hl_cs_counters_atomic *cntr;
334 u32 max_size = args->return_size;
335
336 cntr = &hdev->aggregated_cs_counters;
337
338 if ((!max_size) || (!out))
339 return -EINVAL;
340
341 cs_counters.total_out_of_mem_drop_cnt =
342 atomic64_read(&cntr->out_of_mem_drop_cnt);
343 cs_counters.total_parsing_drop_cnt =
344 atomic64_read(&cntr->parsing_drop_cnt);
345 cs_counters.total_queue_full_drop_cnt =
346 atomic64_read(&cntr->queue_full_drop_cnt);
347 cs_counters.total_device_in_reset_drop_cnt =
348 atomic64_read(&cntr->device_in_reset_drop_cnt);
349 cs_counters.total_max_cs_in_flight_drop_cnt =
350 atomic64_read(&cntr->max_cs_in_flight_drop_cnt);
351 cs_counters.total_validation_drop_cnt =
352 atomic64_read(&cntr->validation_drop_cnt);
353
354 if (hpriv->ctx) {
355 cs_counters.ctx_out_of_mem_drop_cnt =
356 atomic64_read(
357 &hpriv->ctx->cs_counters.out_of_mem_drop_cnt);
358 cs_counters.ctx_parsing_drop_cnt =
359 atomic64_read(
360 &hpriv->ctx->cs_counters.parsing_drop_cnt);
361 cs_counters.ctx_queue_full_drop_cnt =
362 atomic64_read(
363 &hpriv->ctx->cs_counters.queue_full_drop_cnt);
364 cs_counters.ctx_device_in_reset_drop_cnt =
365 atomic64_read(
366 &hpriv->ctx->cs_counters.device_in_reset_drop_cnt);
367 cs_counters.ctx_max_cs_in_flight_drop_cnt =
368 atomic64_read(
369 &hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt);
370 cs_counters.ctx_validation_drop_cnt =
371 atomic64_read(
372 &hpriv->ctx->cs_counters.validation_drop_cnt);
373 }
374
375 return copy_to_user(out, &cs_counters,
376 min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0;
377}
378
379static int sync_manager_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
380{
381 struct hl_device *hdev = hpriv->hdev;
382 struct asic_fixed_properties *prop = &hdev->asic_prop;
383 struct hl_info_sync_manager sm_info = {0};
384 u32 max_size = args->return_size;
385 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
386
387 if ((!max_size) || (!out))
388 return -EINVAL;
389
390 if (args->dcore_id >= HL_MAX_DCORES)
391 return -EINVAL;
392
393 sm_info.first_available_sync_object =
394 prop->first_available_user_sob[args->dcore_id];
395 sm_info.first_available_monitor =
396 prop->first_available_user_mon[args->dcore_id];
397 sm_info.first_available_cq =
398 prop->first_available_cq[args->dcore_id];
399
400 return copy_to_user(out, &sm_info, min_t(size_t, (size_t) max_size,
401 sizeof(sm_info))) ? -EFAULT : 0;
402}
403
404static int total_energy_consumption_info(struct hl_fpriv *hpriv,
405 struct hl_info_args *args)
406{
407 struct hl_device *hdev = hpriv->hdev;
408 struct hl_info_energy total_energy = {0};
409 u32 max_size = args->return_size;
410 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
411 int rc;
412
413 if ((!max_size) || (!out))
414 return -EINVAL;
415
416 rc = hl_fw_cpucp_total_energy_get(hdev,
417 &total_energy.total_energy_consumption);
418 if (rc)
419 return rc;
420
421 return copy_to_user(out, &total_energy,
422 min((size_t) max_size, sizeof(total_energy))) ? -EFAULT : 0;
423}
424
425static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
426{
427 struct hl_device *hdev = hpriv->hdev;
428 struct hl_pll_frequency_info freq_info = { {0} };
429 u32 max_size = args->return_size;
430 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
431 int rc;
432
433 if ((!max_size) || (!out))
434 return -EINVAL;
435
436 rc = hl_fw_cpucp_pll_info_get(hdev, args->pll_index, freq_info.output);
437 if (rc)
438 return rc;
439
440 return copy_to_user(out, &freq_info,
441 min((size_t) max_size, sizeof(freq_info))) ? -EFAULT : 0;
442}
443
444static int power_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
445{
446 struct hl_device *hdev = hpriv->hdev;
447 u32 max_size = args->return_size;
448 struct hl_power_info power_info = {0};
449 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
450 int rc;
451
452 if ((!max_size) || (!out))
453 return -EINVAL;
454
455 rc = hl_fw_cpucp_power_get(hdev, &power_info.power);
456 if (rc)
457 return rc;
458
459 return copy_to_user(out, &power_info,
460 min((size_t) max_size, sizeof(power_info))) ? -EFAULT : 0;
461}
462
463static int open_stats_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
464{
465 struct hl_device *hdev = hpriv->hdev;
466 u32 max_size = args->return_size;
467 struct hl_open_stats_info open_stats_info = {0};
468 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
469
470 if ((!max_size) || (!out))
471 return -EINVAL;
472
473 open_stats_info.last_open_period_ms = jiffies64_to_msecs(
474 hdev->last_open_session_duration_jif);
475 open_stats_info.open_counter = hdev->open_counter;
476
477 return copy_to_user(out, &open_stats_info,
478 min((size_t) max_size, sizeof(open_stats_info))) ? -EFAULT : 0;
479}
480
481static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
482 struct device *dev)
483{
484 enum hl_device_status status;
485 struct hl_info_args *args = data;
486 struct hl_device *hdev = hpriv->hdev;
487
488 int rc;
489
490
491
492
493
494 switch (args->op) {
495 case HL_INFO_HW_IP_INFO:
496 return hw_ip_info(hdev, args);
497
498 case HL_INFO_DEVICE_STATUS:
499 return device_status_info(hdev, args);
500
501 case HL_INFO_RESET_COUNT:
502 return get_reset_count(hdev, args);
503
504 default:
505 break;
506 }
507
508 if (!hl_device_operational(hdev, &status)) {
509 dev_warn_ratelimited(dev,
510 "Device is %s. Can't execute INFO IOCTL\n",
511 hdev->status[status]);
512 return -EBUSY;
513 }
514
515 switch (args->op) {
516 case HL_INFO_HW_EVENTS:
517 rc = hw_events_info(hdev, false, args);
518 break;
519
520 case HL_INFO_DRAM_USAGE:
521 rc = dram_usage_info(hpriv, args);
522 break;
523
524 case HL_INFO_HW_IDLE:
525 rc = hw_idle(hdev, args);
526 break;
527
528 case HL_INFO_DEVICE_UTILIZATION:
529 rc = device_utilization(hdev, args);
530 break;
531
532 case HL_INFO_HW_EVENTS_AGGREGATE:
533 rc = hw_events_info(hdev, true, args);
534 break;
535
536 case HL_INFO_CLK_RATE:
537 rc = get_clk_rate(hdev, args);
538 break;
539
540 case HL_INFO_TIME_SYNC:
541 return time_sync_info(hdev, args);
542
543 case HL_INFO_CS_COUNTERS:
544 return cs_counters_info(hpriv, args);
545
546 case HL_INFO_PCI_COUNTERS:
547 return pci_counters_info(hpriv, args);
548
549 case HL_INFO_CLK_THROTTLE_REASON:
550 return clk_throttle_info(hpriv, args);
551
552 case HL_INFO_SYNC_MANAGER:
553 return sync_manager_info(hpriv, args);
554
555 case HL_INFO_TOTAL_ENERGY:
556 return total_energy_consumption_info(hpriv, args);
557
558 case HL_INFO_PLL_FREQUENCY:
559 return pll_frequency_info(hpriv, args);
560
561 case HL_INFO_POWER:
562 return power_info(hpriv, args);
563
564 case HL_INFO_OPEN_STATS:
565 return open_stats_info(hpriv, args);
566
567 default:
568 dev_err(dev, "Invalid request %d\n", args->op);
569 rc = -ENOTTY;
570 break;
571 }
572
573 return rc;
574}
575
576static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
577{
578 return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev);
579}
580
581static int hl_info_ioctl_control(struct hl_fpriv *hpriv, void *data)
582{
583 return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev_ctrl);
584}
585
586static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
587{
588 struct hl_debug_args *args = data;
589 struct hl_device *hdev = hpriv->hdev;
590 enum hl_device_status status;
591
592 int rc = 0;
593
594 if (!hl_device_operational(hdev, &status)) {
595 dev_warn_ratelimited(hdev->dev,
596 "Device is %s. Can't execute DEBUG IOCTL\n",
597 hdev->status[status]);
598 return -EBUSY;
599 }
600
601 switch (args->op) {
602 case HL_DEBUG_OP_ETR:
603 case HL_DEBUG_OP_ETF:
604 case HL_DEBUG_OP_STM:
605 case HL_DEBUG_OP_FUNNEL:
606 case HL_DEBUG_OP_BMON:
607 case HL_DEBUG_OP_SPMU:
608 case HL_DEBUG_OP_TIMESTAMP:
609 if (!hdev->in_debug) {
610 dev_err_ratelimited(hdev->dev,
611 "Rejecting debug configuration request because device not in debug mode\n");
612 return -EFAULT;
613 }
614 args->input_size =
615 min(args->input_size, hl_debug_struct_size[args->op]);
616 rc = debug_coresight(hdev, args);
617 break;
618 case HL_DEBUG_OP_SET_MODE:
619 rc = hl_device_set_debug_mode(hdev, (bool) args->enable);
620 break;
621 default:
622 dev_err(hdev->dev, "Invalid request %d\n", args->op);
623 rc = -ENOTTY;
624 break;
625 }
626
627 return rc;
628}
629
630#define HL_IOCTL_DEF(ioctl, _func) \
631 [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func}
632
633static const struct hl_ioctl_desc hl_ioctls[] = {
634 HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl),
635 HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl),
636 HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl),
637 HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_wait_ioctl),
638 HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl),
639 HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl)
640};
641
642static const struct hl_ioctl_desc hl_ioctls_control[] = {
643 HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl_control)
644};
645
646static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
647 const struct hl_ioctl_desc *ioctl, struct device *dev)
648{
649 struct hl_fpriv *hpriv = filep->private_data;
650 struct hl_device *hdev = hpriv->hdev;
651 unsigned int nr = _IOC_NR(cmd);
652 char stack_kdata[128] = {0};
653 char *kdata = NULL;
654 unsigned int usize, asize;
655 hl_ioctl_t *func;
656 u32 hl_size;
657 int retcode;
658
659 if (hdev->hard_reset_pending) {
660 dev_crit_ratelimited(dev,
661 "Device HARD reset pending! Please close FD\n");
662 return -ENODEV;
663 }
664
665
666 func = ioctl->func;
667
668 if (unlikely(!func)) {
669 dev_dbg(dev, "no function\n");
670 retcode = -ENOTTY;
671 goto out_err;
672 }
673
674 hl_size = _IOC_SIZE(ioctl->cmd);
675 usize = asize = _IOC_SIZE(cmd);
676 if (hl_size > asize)
677 asize = hl_size;
678
679 cmd = ioctl->cmd;
680
681 if (cmd & (IOC_IN | IOC_OUT)) {
682 if (asize <= sizeof(stack_kdata)) {
683 kdata = stack_kdata;
684 } else {
685 kdata = kzalloc(asize, GFP_KERNEL);
686 if (!kdata) {
687 retcode = -ENOMEM;
688 goto out_err;
689 }
690 }
691 }
692
693 if (cmd & IOC_IN) {
694 if (copy_from_user(kdata, (void __user *)arg, usize)) {
695 retcode = -EFAULT;
696 goto out_err;
697 }
698 } else if (cmd & IOC_OUT) {
699 memset(kdata, 0, usize);
700 }
701
702 retcode = func(hpriv, kdata);
703
704 if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize))
705 retcode = -EFAULT;
706
707out_err:
708 if (retcode)
709 dev_dbg(dev, "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
710 task_pid_nr(current), cmd, nr);
711
712 if (kdata != stack_kdata)
713 kfree(kdata);
714
715 return retcode;
716}
717
718long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
719{
720 struct hl_fpriv *hpriv = filep->private_data;
721 struct hl_device *hdev = hpriv->hdev;
722 const struct hl_ioctl_desc *ioctl = NULL;
723 unsigned int nr = _IOC_NR(cmd);
724
725 if (!hdev) {
726 pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n");
727 return -ENODEV;
728 }
729
730 if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) {
731 ioctl = &hl_ioctls[nr];
732 } else {
733 dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n",
734 task_pid_nr(current), nr);
735 return -ENOTTY;
736 }
737
738 return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev);
739}
740
741long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
742{
743 struct hl_fpriv *hpriv = filep->private_data;
744 struct hl_device *hdev = hpriv->hdev;
745 const struct hl_ioctl_desc *ioctl = NULL;
746 unsigned int nr = _IOC_NR(cmd);
747
748 if (!hdev) {
749 pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n");
750 return -ENODEV;
751 }
752
753 if (nr == _IOC_NR(HL_IOCTL_INFO)) {
754 ioctl = &hl_ioctls_control[nr];
755 } else {
756 dev_err(hdev->dev_ctrl, "invalid ioctl: pid=%d, nr=0x%02x\n",
757 task_pid_nr(current), nr);
758 return -ENOTTY;
759 }
760
761 return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev_ctrl);
762}
763