1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#include <linux/init.h>
43#include <linux/module.h>
44#include <linux/pci.h>
45#include <linux/dma-mapping.h>
46#include <linux/interrupt.h>
47#include <linux/workqueue.h>
48#include <linux/uwb.h>
49#include <linux/uwb/whci.h>
50#include <linux/uwb/umc.h>
51
52#include "uwb-internal.h"
53
54
55
56
57
58
59
60
61struct whcrc {
62 struct umc_dev *umc_dev;
63 struct uwb_rc *uwb_rc;
64
65 unsigned long area;
66 void __iomem *rc_base;
67 size_t rc_len;
68 spinlock_t irq_lock;
69
70 void *evt_buf, *cmd_buf;
71 dma_addr_t evt_dma_buf, cmd_dma_buf;
72 wait_queue_head_t cmd_wq;
73 struct work_struct event_work;
74};
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90static int whcrc_cmd(struct uwb_rc *uwb_rc,
91 const struct uwb_rccb *cmd, size_t cmd_size)
92{
93 int result = 0;
94 struct whcrc *whcrc = uwb_rc->priv;
95 struct device *dev = &whcrc->umc_dev->dev;
96 u32 urccmd;
97
98 if (cmd_size >= 4096)
99 return -EINVAL;
100
101
102
103
104
105
106
107 if (le_readl(whcrc->rc_base + URCSTS) & URCSTS_HALTED) {
108 dev_err(dev, "requesting reset of halted radio controller\n");
109 uwb_rc_reset_all(uwb_rc);
110 return -EIO;
111 }
112
113 result = wait_event_timeout(whcrc->cmd_wq,
114 !(le_readl(whcrc->rc_base + URCCMD) & URCCMD_ACTIVE), HZ/2);
115 if (result == 0) {
116 dev_err(dev, "device is not ready to execute commands\n");
117 return -ETIMEDOUT;
118 }
119
120 memmove(whcrc->cmd_buf, cmd, cmd_size);
121 le_writeq(whcrc->cmd_dma_buf, whcrc->rc_base + URCCMDADDR);
122
123 spin_lock(&whcrc->irq_lock);
124 urccmd = le_readl(whcrc->rc_base + URCCMD);
125 urccmd &= ~(URCCMD_EARV | URCCMD_SIZE_MASK);
126 le_writel(urccmd | URCCMD_ACTIVE | URCCMD_IWR | cmd_size,
127 whcrc->rc_base + URCCMD);
128 spin_unlock(&whcrc->irq_lock);
129
130 return 0;
131}
132
133static int whcrc_reset(struct uwb_rc *rc)
134{
135 struct whcrc *whcrc = rc->priv;
136
137 return umc_controller_reset(whcrc->umc_dev);
138}
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154static
155void whcrc_enable_events(struct whcrc *whcrc)
156{
157 u32 urccmd;
158
159 le_writeq(whcrc->evt_dma_buf, whcrc->rc_base + URCEVTADDR);
160
161 spin_lock(&whcrc->irq_lock);
162 urccmd = le_readl(whcrc->rc_base + URCCMD) & ~URCCMD_ACTIVE;
163 le_writel(urccmd | URCCMD_EARV, whcrc->rc_base + URCCMD);
164 spin_unlock(&whcrc->irq_lock);
165}
166
167static void whcrc_event_work(struct work_struct *work)
168{
169 struct whcrc *whcrc = container_of(work, struct whcrc, event_work);
170 size_t size;
171 u64 urcevtaddr;
172
173 urcevtaddr = le_readq(whcrc->rc_base + URCEVTADDR);
174 size = urcevtaddr & URCEVTADDR_OFFSET_MASK;
175
176 uwb_rc_neh_grok(whcrc->uwb_rc, whcrc->evt_buf, size);
177 whcrc_enable_events(whcrc);
178}
179
180
181
182
183
184
185
186static
187irqreturn_t whcrc_irq_cb(int irq, void *_whcrc)
188{
189 struct whcrc *whcrc = _whcrc;
190 struct device *dev = &whcrc->umc_dev->dev;
191 u32 urcsts;
192
193 urcsts = le_readl(whcrc->rc_base + URCSTS);
194 if (!(urcsts & URCSTS_INT_MASK))
195 return IRQ_NONE;
196 le_writel(urcsts & URCSTS_INT_MASK, whcrc->rc_base + URCSTS);
197
198 if (urcsts & URCSTS_HSE) {
199 dev_err(dev, "host system error -- hardware halted\n");
200
201 goto out;
202 }
203 if (urcsts & URCSTS_ER)
204 schedule_work(&whcrc->event_work);
205 if (urcsts & URCSTS_RCI)
206 wake_up_all(&whcrc->cmd_wq);
207out:
208 return IRQ_HANDLED;
209}
210
211
212
213
214
215static
216int whcrc_setup_rc_umc(struct whcrc *whcrc)
217{
218 int result = 0;
219 struct device *dev = &whcrc->umc_dev->dev;
220 struct umc_dev *umc_dev = whcrc->umc_dev;
221
222 whcrc->area = umc_dev->resource.start;
223 whcrc->rc_len = umc_dev->resource.end - umc_dev->resource.start + 1;
224 result = -EBUSY;
225 if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME) == NULL) {
226 dev_err(dev, "can't request URC region (%zu bytes @ 0x%lx): %d\n",
227 whcrc->rc_len, whcrc->area, result);
228 goto error_request_region;
229 }
230
231 whcrc->rc_base = ioremap_nocache(whcrc->area, whcrc->rc_len);
232 if (whcrc->rc_base == NULL) {
233 dev_err(dev, "can't ioremap registers (%zu bytes @ 0x%lx): %d\n",
234 whcrc->rc_len, whcrc->area, result);
235 goto error_ioremap_nocache;
236 }
237
238 result = request_irq(umc_dev->irq, whcrc_irq_cb, IRQF_SHARED,
239 KBUILD_MODNAME, whcrc);
240 if (result < 0) {
241 dev_err(dev, "can't allocate IRQ %d: %d\n",
242 umc_dev->irq, result);
243 goto error_request_irq;
244 }
245
246 result = -ENOMEM;
247 whcrc->cmd_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE,
248 &whcrc->cmd_dma_buf, GFP_KERNEL);
249 if (whcrc->cmd_buf == NULL) {
250 dev_err(dev, "Can't allocate cmd transfer buffer\n");
251 goto error_cmd_buffer;
252 }
253
254 whcrc->evt_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE,
255 &whcrc->evt_dma_buf, GFP_KERNEL);
256 if (whcrc->evt_buf == NULL) {
257 dev_err(dev, "Can't allocate evt transfer buffer\n");
258 goto error_evt_buffer;
259 }
260 return 0;
261
262error_evt_buffer:
263 dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf,
264 whcrc->cmd_dma_buf);
265error_cmd_buffer:
266 free_irq(umc_dev->irq, whcrc);
267error_request_irq:
268 iounmap(whcrc->rc_base);
269error_ioremap_nocache:
270 release_mem_region(whcrc->area, whcrc->rc_len);
271error_request_region:
272 return result;
273}
274
275
276
277
278
279static
280void whcrc_release_rc_umc(struct whcrc *whcrc)
281{
282 struct umc_dev *umc_dev = whcrc->umc_dev;
283
284 dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->evt_buf,
285 whcrc->evt_dma_buf);
286 dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf,
287 whcrc->cmd_dma_buf);
288 free_irq(umc_dev->irq, whcrc);
289 iounmap(whcrc->rc_base);
290 release_mem_region(whcrc->area, whcrc->rc_len);
291}
292
293
294
295
296
297
298
299
300
301static int whcrc_start_rc(struct uwb_rc *rc)
302{
303 struct whcrc *whcrc = rc->priv;
304 struct device *dev = &whcrc->umc_dev->dev;
305
306
307 le_writel(URCCMD_RESET, whcrc->rc_base + URCCMD);
308 if (whci_wait_for(dev, whcrc->rc_base + URCCMD, URCCMD_RESET, 0,
309 5000, "hardware reset") < 0)
310 return -EBUSY;
311
312
313 le_writel(0, whcrc->rc_base + URCINTR);
314 le_writel(URCCMD_RS, whcrc->rc_base + URCCMD);
315 if (whci_wait_for(dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, 0,
316 5000, "radio controller start") < 0)
317 return -ETIMEDOUT;
318 whcrc_enable_events(whcrc);
319 le_writel(URCINTR_EN_ALL, whcrc->rc_base + URCINTR);
320 return 0;
321}
322
323
324
325
326
327
328
329
330
331static
332void whcrc_stop_rc(struct uwb_rc *rc)
333{
334 struct whcrc *whcrc = rc->priv;
335 struct umc_dev *umc_dev = whcrc->umc_dev;
336
337 le_writel(0, whcrc->rc_base + URCINTR);
338 cancel_work_sync(&whcrc->event_work);
339
340 le_writel(0, whcrc->rc_base + URCCMD);
341 whci_wait_for(&umc_dev->dev, whcrc->rc_base + URCSTS,
342 URCSTS_HALTED, URCSTS_HALTED, 100, "radio controller stop");
343}
344
345static void whcrc_init(struct whcrc *whcrc)
346{
347 spin_lock_init(&whcrc->irq_lock);
348 init_waitqueue_head(&whcrc->cmd_wq);
349 INIT_WORK(&whcrc->event_work, whcrc_event_work);
350}
351
352
353
354
355
356
357
358
359
360static
361int whcrc_probe(struct umc_dev *umc_dev)
362{
363 int result;
364 struct uwb_rc *uwb_rc;
365 struct whcrc *whcrc;
366 struct device *dev = &umc_dev->dev;
367
368 result = -ENOMEM;
369 uwb_rc = uwb_rc_alloc();
370 if (uwb_rc == NULL) {
371 dev_err(dev, "unable to allocate RC instance\n");
372 goto error_rc_alloc;
373 }
374 whcrc = kzalloc(sizeof(*whcrc), GFP_KERNEL);
375 if (whcrc == NULL) {
376 dev_err(dev, "unable to allocate WHC-RC instance\n");
377 goto error_alloc;
378 }
379 whcrc_init(whcrc);
380 whcrc->umc_dev = umc_dev;
381
382 result = whcrc_setup_rc_umc(whcrc);
383 if (result < 0) {
384 dev_err(dev, "Can't setup RC UMC interface: %d\n", result);
385 goto error_setup_rc_umc;
386 }
387 whcrc->uwb_rc = uwb_rc;
388
389 uwb_rc->owner = THIS_MODULE;
390 uwb_rc->cmd = whcrc_cmd;
391 uwb_rc->reset = whcrc_reset;
392 uwb_rc->start = whcrc_start_rc;
393 uwb_rc->stop = whcrc_stop_rc;
394
395 result = uwb_rc_add(uwb_rc, dev, whcrc);
396 if (result < 0)
397 goto error_rc_add;
398 umc_set_drvdata(umc_dev, whcrc);
399 return 0;
400
401error_rc_add:
402 whcrc_release_rc_umc(whcrc);
403error_setup_rc_umc:
404 kfree(whcrc);
405error_alloc:
406 uwb_rc_put(uwb_rc);
407error_rc_alloc:
408 return result;
409}
410
411
412
413
414
415
416
417
418
419
420static void whcrc_remove(struct umc_dev *umc_dev)
421{
422 struct whcrc *whcrc = umc_get_drvdata(umc_dev);
423 struct uwb_rc *uwb_rc = whcrc->uwb_rc;
424
425 umc_set_drvdata(umc_dev, NULL);
426 uwb_rc_rm(uwb_rc);
427 whcrc_release_rc_umc(whcrc);
428 kfree(whcrc);
429 uwb_rc_put(uwb_rc);
430}
431
432static int whcrc_pre_reset(struct umc_dev *umc)
433{
434 struct whcrc *whcrc = umc_get_drvdata(umc);
435 struct uwb_rc *uwb_rc = whcrc->uwb_rc;
436
437 uwb_rc_pre_reset(uwb_rc);
438 return 0;
439}
440
441static int whcrc_post_reset(struct umc_dev *umc)
442{
443 struct whcrc *whcrc = umc_get_drvdata(umc);
444 struct uwb_rc *uwb_rc = whcrc->uwb_rc;
445
446 uwb_rc_post_reset(uwb_rc);
447 return 0;
448}
449
450
451static struct pci_device_id whcrc_id_table[] = {
452 { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
453 { }
454};
455MODULE_DEVICE_TABLE(pci, whcrc_id_table);
456
457static struct umc_driver whcrc_driver = {
458 .name = "whc-rc",
459 .cap_id = UMC_CAP_ID_WHCI_RC,
460 .probe = whcrc_probe,
461 .remove = whcrc_remove,
462 .pre_reset = whcrc_pre_reset,
463 .post_reset = whcrc_post_reset,
464};
465
466static int __init whcrc_driver_init(void)
467{
468 return umc_driver_register(&whcrc_driver);
469}
470module_init(whcrc_driver_init);
471
472static void __exit whcrc_driver_exit(void)
473{
474 umc_driver_unregister(&whcrc_driver);
475}
476module_exit(whcrc_driver_exit);
477
478MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
479MODULE_DESCRIPTION("Wireless Host Controller Radio Control Driver");
480MODULE_LICENSE("GPL");
481