1
2
3
4
5
6
7
8
9
10
11#ifndef THUNDERBOLT_H_
12#define THUNDERBOLT_H_
13
14#include <linux/device.h>
15#include <linux/idr.h>
16#include <linux/list.h>
17#include <linux/mutex.h>
18#include <linux/mod_devicetable.h>
19#include <linux/pci.h>
20#include <linux/uuid.h>
21#include <linux/workqueue.h>
22
23enum tb_cfg_pkg_type {
24 TB_CFG_PKG_READ = 1,
25 TB_CFG_PKG_WRITE = 2,
26 TB_CFG_PKG_ERROR = 3,
27 TB_CFG_PKG_NOTIFY_ACK = 4,
28 TB_CFG_PKG_EVENT = 5,
29 TB_CFG_PKG_XDOMAIN_REQ = 6,
30 TB_CFG_PKG_XDOMAIN_RESP = 7,
31 TB_CFG_PKG_OVERRIDE = 8,
32 TB_CFG_PKG_RESET = 9,
33 TB_CFG_PKG_ICM_EVENT = 10,
34 TB_CFG_PKG_ICM_CMD = 11,
35 TB_CFG_PKG_ICM_RESP = 12,
36 TB_CFG_PKG_PREPARE_TO_SLEEP = 13,
37};
38
39
40
41
42
43
44
45
46
47
48
49
50
51enum tb_security_level {
52 TB_SECURITY_NONE,
53 TB_SECURITY_USER,
54 TB_SECURITY_SECURE,
55 TB_SECURITY_DPONLY,
56 TB_SECURITY_USBONLY,
57 TB_SECURITY_NOPCIE,
58};
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75struct tb {
76 struct device dev;
77 struct mutex lock;
78 struct tb_nhi *nhi;
79 struct tb_ctl *ctl;
80 struct workqueue_struct *wq;
81 struct tb_switch *root_switch;
82 const struct tb_cm_ops *cm_ops;
83 int index;
84 enum tb_security_level security_level;
85 size_t nboot_acl;
86 unsigned long privdata[];
87};
88
89extern struct bus_type tb_bus_type;
90extern struct device_type tb_service_type;
91extern struct device_type tb_xdomain_type;
92
93#define TB_LINKS_PER_PHY_PORT 2
94
95static inline unsigned int tb_phy_port_from_link(unsigned int link)
96{
97 return (link - 1) / TB_LINKS_PER_PHY_PORT;
98}
99
100
101
102
103
104
105
106
107struct tb_property_dir {
108 const uuid_t *uuid;
109 struct list_head properties;
110};
111
112enum tb_property_type {
113 TB_PROPERTY_TYPE_UNKNOWN = 0x00,
114 TB_PROPERTY_TYPE_DIRECTORY = 0x44,
115 TB_PROPERTY_TYPE_DATA = 0x64,
116 TB_PROPERTY_TYPE_TEXT = 0x74,
117 TB_PROPERTY_TYPE_VALUE = 0x76,
118};
119
120#define TB_PROPERTY_KEY_SIZE 8
121
122
123
124
125
126
127
128
129
130
131
132struct tb_property {
133 struct list_head list;
134 char key[TB_PROPERTY_KEY_SIZE + 1];
135 enum tb_property_type type;
136 size_t length;
137 union {
138 struct tb_property_dir *dir;
139 u8 *data;
140 char *text;
141 u32 immediate;
142 } value;
143};
144
145struct tb_property_dir *tb_property_parse_dir(const u32 *block,
146 size_t block_len);
147ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
148 size_t block_len);
149struct tb_property_dir *tb_property_copy_dir(const struct tb_property_dir *dir);
150struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid);
151void tb_property_free_dir(struct tb_property_dir *dir);
152int tb_property_add_immediate(struct tb_property_dir *parent, const char *key,
153 u32 value);
154int tb_property_add_data(struct tb_property_dir *parent, const char *key,
155 const void *buf, size_t buflen);
156int tb_property_add_text(struct tb_property_dir *parent, const char *key,
157 const char *text);
158int tb_property_add_dir(struct tb_property_dir *parent, const char *key,
159 struct tb_property_dir *dir);
160void tb_property_remove(struct tb_property *tb_property);
161struct tb_property *tb_property_find(struct tb_property_dir *dir,
162 const char *key, enum tb_property_type type);
163struct tb_property *tb_property_get_next(struct tb_property_dir *dir,
164 struct tb_property *prev);
165
166#define tb_property_for_each(dir, property) \
167 for (property = tb_property_get_next(dir, NULL); \
168 property; \
169 property = tb_property_get_next(dir, property))
170
171int tb_register_property_dir(const char *key, struct tb_property_dir *dir);
172void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222struct tb_xdomain {
223 struct device dev;
224 struct tb *tb;
225 uuid_t *remote_uuid;
226 const uuid_t *local_uuid;
227 u64 route;
228 u16 vendor;
229 u16 device;
230 unsigned int local_max_hopid;
231 unsigned int remote_max_hopid;
232 struct mutex lock;
233 const char *vendor_name;
234 const char *device_name;
235 unsigned int link_speed;
236 unsigned int link_width;
237 bool is_unplugged;
238 bool needs_uuid;
239 struct ida service_ids;
240 struct ida in_hopids;
241 struct ida out_hopids;
242 u32 *local_property_block;
243 u32 local_property_block_gen;
244 u32 local_property_block_len;
245 struct tb_property_dir *remote_properties;
246 u32 remote_property_block_gen;
247 struct delayed_work get_uuid_work;
248 int uuid_retries;
249 struct delayed_work get_properties_work;
250 int properties_retries;
251 struct delayed_work properties_changed_work;
252 int properties_changed_retries;
253 u8 link;
254 u8 depth;
255};
256
257int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd);
258void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd);
259int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid);
260void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid);
261int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid);
262void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid);
263int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path,
264 int transmit_ring, int receive_path,
265 int receive_ring);
266int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path,
267 int transmit_ring, int receive_path,
268 int receive_ring);
269
270static inline int tb_xdomain_disable_all_paths(struct tb_xdomain *xd)
271{
272 return tb_xdomain_disable_paths(xd, -1, -1, -1, -1);
273}
274
275struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid);
276struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route);
277
278static inline struct tb_xdomain *
279tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid)
280{
281 struct tb_xdomain *xd;
282
283 mutex_lock(&tb->lock);
284 xd = tb_xdomain_find_by_uuid(tb, uuid);
285 mutex_unlock(&tb->lock);
286
287 return xd;
288}
289
290static inline struct tb_xdomain *
291tb_xdomain_find_by_route_locked(struct tb *tb, u64 route)
292{
293 struct tb_xdomain *xd;
294
295 mutex_lock(&tb->lock);
296 xd = tb_xdomain_find_by_route(tb, route);
297 mutex_unlock(&tb->lock);
298
299 return xd;
300}
301
302static inline struct tb_xdomain *tb_xdomain_get(struct tb_xdomain *xd)
303{
304 if (xd)
305 get_device(&xd->dev);
306 return xd;
307}
308
309static inline void tb_xdomain_put(struct tb_xdomain *xd)
310{
311 if (xd)
312 put_device(&xd->dev);
313}
314
315static inline bool tb_is_xdomain(const struct device *dev)
316{
317 return dev->type == &tb_xdomain_type;
318}
319
320static inline struct tb_xdomain *tb_to_xdomain(struct device *dev)
321{
322 if (tb_is_xdomain(dev))
323 return container_of(dev, struct tb_xdomain, dev);
324 return NULL;
325}
326
327int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
328 size_t size, enum tb_cfg_pkg_type type);
329int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
330 size_t request_size, enum tb_cfg_pkg_type request_type,
331 void *response, size_t response_size,
332 enum tb_cfg_pkg_type response_type,
333 unsigned int timeout_msec);
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353struct tb_protocol_handler {
354 const uuid_t *uuid;
355 int (*callback)(const void *buf, size_t size, void *data);
356 void *data;
357 struct list_head list;
358};
359
360int tb_register_protocol_handler(struct tb_protocol_handler *handler);
361void tb_unregister_protocol_handler(struct tb_protocol_handler *handler);
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380struct tb_service {
381 struct device dev;
382 int id;
383 const char *key;
384 u32 prtcid;
385 u32 prtcvers;
386 u32 prtcrevs;
387 u32 prtcstns;
388 struct dentry *debugfs_dir;
389};
390
391static inline struct tb_service *tb_service_get(struct tb_service *svc)
392{
393 if (svc)
394 get_device(&svc->dev);
395 return svc;
396}
397
398static inline void tb_service_put(struct tb_service *svc)
399{
400 if (svc)
401 put_device(&svc->dev);
402}
403
404static inline bool tb_is_service(const struct device *dev)
405{
406 return dev->type == &tb_service_type;
407}
408
409static inline struct tb_service *tb_to_service(struct device *dev)
410{
411 if (tb_is_service(dev))
412 return container_of(dev, struct tb_service, dev);
413 return NULL;
414}
415
416
417
418
419
420
421
422
423
424struct tb_service_driver {
425 struct device_driver driver;
426 int (*probe)(struct tb_service *svc, const struct tb_service_id *id);
427 void (*remove)(struct tb_service *svc);
428 void (*shutdown)(struct tb_service *svc);
429 const struct tb_service_id *id_table;
430};
431
432#define TB_SERVICE(key, id) \
433 .match_flags = TBSVC_MATCH_PROTOCOL_KEY | \
434 TBSVC_MATCH_PROTOCOL_ID, \
435 .protocol_key = (key), \
436 .protocol_id = (id)
437
438int tb_register_service_driver(struct tb_service_driver *drv);
439void tb_unregister_service_driver(struct tb_service_driver *drv);
440
441static inline void *tb_service_get_drvdata(const struct tb_service *svc)
442{
443 return dev_get_drvdata(&svc->dev);
444}
445
446static inline void tb_service_set_drvdata(struct tb_service *svc, void *data)
447{
448 dev_set_drvdata(&svc->dev, data);
449}
450
451static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
452{
453 return tb_to_xdomain(svc->dev.parent);
454}
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472struct tb_nhi {
473 spinlock_t lock;
474 struct pci_dev *pdev;
475 const struct tb_nhi_ops *ops;
476 void __iomem *iobase;
477 struct tb_ring **tx_rings;
478 struct tb_ring **rx_rings;
479 struct ida msix_ida;
480 bool going_away;
481 struct work_struct interrupt_work;
482 u32 hop_count;
483};
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511struct tb_ring {
512 spinlock_t lock;
513 struct tb_nhi *nhi;
514 int size;
515 int hop;
516 int head;
517 int tail;
518 struct ring_desc *descriptors;
519 dma_addr_t descriptors_dma;
520 struct list_head queue;
521 struct list_head in_flight;
522 struct work_struct work;
523 bool is_tx:1;
524 bool running:1;
525 int irq;
526 u8 vector;
527 unsigned int flags;
528 int e2e_tx_hop;
529 u16 sof_mask;
530 u16 eof_mask;
531 void (*start_poll)(void *data);
532 void *poll_data;
533};
534
535
536#define RING_FLAG_NO_SUSPEND BIT(0)
537
538#define RING_FLAG_FRAME BIT(1)
539
540#define RING_FLAG_E2E BIT(2)
541
542struct ring_frame;
543typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled);
544
545
546
547
548
549
550
551
552
553
554enum ring_desc_flags {
555 RING_DESC_ISOCH = 0x1,
556 RING_DESC_CRC_ERROR = 0x1,
557 RING_DESC_COMPLETED = 0x2,
558 RING_DESC_POSTED = 0x4,
559 RING_DESC_BUFFER_OVERRUN = 0x04,
560 RING_DESC_INTERRUPT = 0x8,
561};
562
563
564
565
566
567
568
569
570
571
572
573struct ring_frame {
574 dma_addr_t buffer_phy;
575 ring_cb callback;
576 struct list_head list;
577 u32 size:12;
578 u32 flags:12;
579 u32 eof:4;
580 u32 sof:4;
581};
582
583
584#define TB_FRAME_SIZE 0x100
585
586struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
587 unsigned int flags);
588struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
589 unsigned int flags, int e2e_tx_hop,
590 u16 sof_mask, u16 eof_mask,
591 void (*start_poll)(void *), void *poll_data);
592void tb_ring_start(struct tb_ring *ring);
593void tb_ring_stop(struct tb_ring *ring);
594void tb_ring_free(struct tb_ring *ring);
595
596int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614static inline int tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame)
615{
616 WARN_ON(ring->is_tx);
617 return __tb_ring_enqueue(ring, frame);
618}
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635static inline int tb_ring_tx(struct tb_ring *ring, struct ring_frame *frame)
636{
637 WARN_ON(!ring->is_tx);
638 return __tb_ring_enqueue(ring, frame);
639}
640
641
642struct ring_frame *tb_ring_poll(struct tb_ring *ring);
643void tb_ring_poll_complete(struct tb_ring *ring);
644
645
646
647
648
649
650
651
652static inline struct device *tb_ring_dma_device(struct tb_ring *ring)
653{
654 return &ring->nhi->pdev->dev;
655}
656
657#endif
658