1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#include <linux/kernel.h>
41#include <linux/module.h>
42#include <linux/pci.h>
43#include <linux/delay.h>
44#include <linux/semaphore.h>
45#include <linux/irqdomain.h>
46#include <asm/irqdomain.h>
47#include <asm/apic.h>
48#include <linux/irq.h>
49#include <linux/msi.h>
50#include <linux/hyperv.h>
51#include <linux/refcount.h>
52#include <asm/mshyperv.h>
53
54
55
56
57
58
59#define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor)))
60#define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
61#define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
62
63enum pci_protocol_version_t {
64 PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1),
65 PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2),
66 PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3),
67};
68
69#define CPU_AFFINITY_ALL -1ULL
70
71
72
73
74
75static enum pci_protocol_version_t pci_protocol_versions[] = {
76 PCI_PROTOCOL_VERSION_1_3,
77 PCI_PROTOCOL_VERSION_1_2,
78 PCI_PROTOCOL_VERSION_1_1,
79};
80
81#define PCI_CONFIG_MMIO_LENGTH 0x2000
82#define CFG_PAGE_OFFSET 0x1000
83#define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
84
85#define MAX_SUPPORTED_MSI_MESSAGES 0x400
86
87#define STATUS_REVISION_MISMATCH 0xC0000059
88
89
90#define SLOT_NAME_SIZE 11
91
92
93
94
95
96enum pci_message_type {
97
98
99
100 PCI_MESSAGE_BASE = 0x42490000,
101 PCI_BUS_RELATIONS = PCI_MESSAGE_BASE + 0,
102 PCI_QUERY_BUS_RELATIONS = PCI_MESSAGE_BASE + 1,
103 PCI_POWER_STATE_CHANGE = PCI_MESSAGE_BASE + 4,
104 PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5,
105 PCI_QUERY_RESOURCE_RESOURCES = PCI_MESSAGE_BASE + 6,
106 PCI_BUS_D0ENTRY = PCI_MESSAGE_BASE + 7,
107 PCI_BUS_D0EXIT = PCI_MESSAGE_BASE + 8,
108 PCI_READ_BLOCK = PCI_MESSAGE_BASE + 9,
109 PCI_WRITE_BLOCK = PCI_MESSAGE_BASE + 0xA,
110 PCI_EJECT = PCI_MESSAGE_BASE + 0xB,
111 PCI_QUERY_STOP = PCI_MESSAGE_BASE + 0xC,
112 PCI_REENABLE = PCI_MESSAGE_BASE + 0xD,
113 PCI_QUERY_STOP_FAILED = PCI_MESSAGE_BASE + 0xE,
114 PCI_EJECTION_COMPLETE = PCI_MESSAGE_BASE + 0xF,
115 PCI_RESOURCES_ASSIGNED = PCI_MESSAGE_BASE + 0x10,
116 PCI_RESOURCES_RELEASED = PCI_MESSAGE_BASE + 0x11,
117 PCI_INVALIDATE_BLOCK = PCI_MESSAGE_BASE + 0x12,
118 PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13,
119 PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14,
120 PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15,
121 PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16,
122 PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17,
123 PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18,
124 PCI_BUS_RELATIONS2 = PCI_MESSAGE_BASE + 0x19,
125 PCI_MESSAGE_MAXIMUM
126};
127
128
129
130
131
132union pci_version {
133 struct {
134 u16 minor_version;
135 u16 major_version;
136 } parts;
137 u32 version;
138} __packed;
139
140
141
142
143
144
145
146union win_slot_encoding {
147 struct {
148 u32 dev:5;
149 u32 func:3;
150 u32 reserved:24;
151 } bits;
152 u32 slot;
153} __packed;
154
155
156
157
158struct pci_function_description {
159 u16 v_id;
160 u16 d_id;
161 u8 rev;
162 u8 prog_intf;
163 u8 subclass;
164 u8 base_class;
165 u32 subsystem_id;
166 union win_slot_encoding win_slot;
167 u32 ser;
168} __packed;
169
170enum pci_device_description_flags {
171 HV_PCI_DEVICE_FLAG_NONE = 0x0,
172 HV_PCI_DEVICE_FLAG_NUMA_AFFINITY = 0x1,
173};
174
175struct pci_function_description2 {
176 u16 v_id;
177 u16 d_id;
178 u8 rev;
179 u8 prog_intf;
180 u8 subclass;
181 u8 base_class;
182 u32 subsystem_id;
183 union win_slot_encoding win_slot;
184 u32 ser;
185 u32 flags;
186 u16 virtual_numa_node;
187 u16 reserved;
188} __packed;
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206struct hv_msi_desc {
207 u8 vector;
208 u8 delivery_mode;
209 u16 vector_count;
210 u32 reserved;
211 u64 cpu_mask;
212} __packed;
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230struct hv_msi_desc2 {
231 u8 vector;
232 u8 delivery_mode;
233 u16 vector_count;
234 u16 processor_count;
235 u16 processor_array[32];
236} __packed;
237
238
239
240
241
242
243
244
245
246
247
248
249
250struct tran_int_desc {
251 u16 reserved;
252 u16 vector_count;
253 u32 data;
254 u64 address;
255} __packed;
256
257
258
259
260
261
262struct pci_message {
263 u32 type;
264} __packed;
265
266struct pci_child_message {
267 struct pci_message message_type;
268 union win_slot_encoding wslot;
269} __packed;
270
271struct pci_incoming_message {
272 struct vmpacket_descriptor hdr;
273 struct pci_message message_type;
274} __packed;
275
276struct pci_response {
277 struct vmpacket_descriptor hdr;
278 s32 status;
279} __packed;
280
281struct pci_packet {
282 void (*completion_func)(void *context, struct pci_response *resp,
283 int resp_packet_size);
284 void *compl_ctxt;
285
286 struct pci_message message[];
287};
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303struct pci_version_request {
304 struct pci_message message_type;
305 u32 protocol_version;
306} __packed;
307
308
309
310
311
312
313struct pci_bus_d0_entry {
314 struct pci_message message_type;
315 u32 reserved;
316 u64 mmio_base;
317} __packed;
318
319struct pci_bus_relations {
320 struct pci_incoming_message incoming;
321 u32 device_count;
322 struct pci_function_description func[];
323} __packed;
324
325struct pci_bus_relations2 {
326 struct pci_incoming_message incoming;
327 u32 device_count;
328 struct pci_function_description2 func[];
329} __packed;
330
331struct pci_q_res_req_response {
332 struct vmpacket_descriptor hdr;
333 s32 status;
334 u32 probed_bar[PCI_STD_NUM_BARS];
335} __packed;
336
337struct pci_set_power {
338 struct pci_message message_type;
339 union win_slot_encoding wslot;
340 u32 power_state;
341 u32 reserved;
342} __packed;
343
344struct pci_set_power_response {
345 struct vmpacket_descriptor hdr;
346 s32 status;
347 union win_slot_encoding wslot;
348 u32 resultant_state;
349 u32 reserved;
350} __packed;
351
352struct pci_resources_assigned {
353 struct pci_message message_type;
354 union win_slot_encoding wslot;
355 u8 memory_range[0x14][6];
356 u32 msi_descriptors;
357 u32 reserved[4];
358} __packed;
359
360struct pci_resources_assigned2 {
361 struct pci_message message_type;
362 union win_slot_encoding wslot;
363 u8 memory_range[0x14][6];
364 u32 msi_descriptor_count;
365 u8 reserved[70];
366} __packed;
367
368struct pci_create_interrupt {
369 struct pci_message message_type;
370 union win_slot_encoding wslot;
371 struct hv_msi_desc int_desc;
372} __packed;
373
374struct pci_create_int_response {
375 struct pci_response response;
376 u32 reserved;
377 struct tran_int_desc int_desc;
378} __packed;
379
380struct pci_create_interrupt2 {
381 struct pci_message message_type;
382 union win_slot_encoding wslot;
383 struct hv_msi_desc2 int_desc;
384} __packed;
385
386struct pci_delete_interrupt {
387 struct pci_message message_type;
388 union win_slot_encoding wslot;
389 struct tran_int_desc int_desc;
390} __packed;
391
392
393
394
395struct pci_read_block {
396 struct pci_message message_type;
397 u32 block_id;
398 union win_slot_encoding wslot;
399 u32 bytes_requested;
400} __packed;
401
402struct pci_read_block_response {
403 struct vmpacket_descriptor hdr;
404 u32 status;
405 u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
406} __packed;
407
408
409
410
411struct pci_write_block {
412 struct pci_message message_type;
413 u32 block_id;
414 union win_slot_encoding wslot;
415 u32 byte_count;
416 u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
417} __packed;
418
419struct pci_dev_inval_block {
420 struct pci_incoming_message incoming;
421 union win_slot_encoding wslot;
422 u64 block_mask;
423} __packed;
424
425struct pci_dev_incoming {
426 struct pci_incoming_message incoming;
427 union win_slot_encoding wslot;
428} __packed;
429
430struct pci_eject_response {
431 struct pci_message message_type;
432 union win_slot_encoding wslot;
433 u32 status;
434} __packed;
435
436static int pci_ring_size = (4 * PAGE_SIZE);
437
438
439
440
441
442enum hv_pcibus_state {
443 hv_pcibus_init = 0,
444 hv_pcibus_probed,
445 hv_pcibus_installed,
446 hv_pcibus_removing,
447 hv_pcibus_maximum
448};
449
450struct hv_pcibus_device {
451 struct pci_sysdata sysdata;
452
453 enum pci_protocol_version_t protocol_version;
454 enum hv_pcibus_state state;
455 struct hv_device *hdev;
456 resource_size_t low_mmio_space;
457 resource_size_t high_mmio_space;
458 struct resource *mem_config;
459 struct resource *low_mmio_res;
460 struct resource *high_mmio_res;
461 struct completion *survey_event;
462 struct pci_bus *pci_bus;
463 spinlock_t config_lock;
464 spinlock_t device_list_lock;
465 void __iomem *cfg_addr;
466
467 struct list_head resources_for_children;
468
469 struct list_head children;
470 struct list_head dr_list;
471
472 struct msi_domain_info msi_info;
473 struct irq_domain *irq_domain;
474
475 spinlock_t retarget_msi_interrupt_lock;
476
477 struct workqueue_struct *wq;
478
479
480 int wslot_res_allocated;
481
482
483 struct hv_retarget_device_interrupt retarget_msi_interrupt_params;
484
485
486
487
488};
489
490
491
492
493
494
495struct hv_dr_work {
496 struct work_struct wrk;
497 struct hv_pcibus_device *bus;
498};
499
500struct hv_pcidev_description {
501 u16 v_id;
502 u16 d_id;
503 u8 rev;
504 u8 prog_intf;
505 u8 subclass;
506 u8 base_class;
507 u32 subsystem_id;
508 union win_slot_encoding win_slot;
509 u32 ser;
510 u32 flags;
511 u16 virtual_numa_node;
512};
513
514struct hv_dr_state {
515 struct list_head list_entry;
516 u32 device_count;
517 struct hv_pcidev_description func[];
518};
519
520enum hv_pcichild_state {
521 hv_pcichild_init = 0,
522 hv_pcichild_requirements,
523 hv_pcichild_resourced,
524 hv_pcichild_ejecting,
525 hv_pcichild_maximum
526};
527
528struct hv_pci_dev {
529
530 struct list_head list_entry;
531 refcount_t refs;
532 enum hv_pcichild_state state;
533 struct pci_slot *pci_slot;
534 struct hv_pcidev_description desc;
535 bool reported_missing;
536 struct hv_pcibus_device *hbus;
537 struct work_struct wrk;
538
539 void (*block_invalidate)(void *context, u64 block_mask);
540 void *invalidate_context;
541
542
543
544
545
546 u32 probed_bar[PCI_STD_NUM_BARS];
547};
548
549struct hv_pci_compl {
550 struct completion host_event;
551 s32 completion_status;
552};
553
554static void hv_pci_onchannelcallback(void *context);
555
556
557
558
559
560
561
562
563
564
565
566static void hv_pci_generic_compl(void *context, struct pci_response *resp,
567 int resp_packet_size)
568{
569 struct hv_pci_compl *comp_pkt = context;
570
571 if (resp_packet_size >= offsetofend(struct pci_response, status))
572 comp_pkt->completion_status = resp->status;
573 else
574 comp_pkt->completion_status = -1;
575
576 complete(&comp_pkt->host_event);
577}
578
579static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
580 u32 wslot);
581
582static void get_pcichild(struct hv_pci_dev *hpdev)
583{
584 refcount_inc(&hpdev->refs);
585}
586
587static void put_pcichild(struct hv_pci_dev *hpdev)
588{
589 if (refcount_dec_and_test(&hpdev->refs))
590 kfree(hpdev);
591}
592
593
594
595
596
597static int wait_for_response(struct hv_device *hdev,
598 struct completion *comp)
599{
600 while (true) {
601 if (hdev->channel->rescind) {
602 dev_warn_once(&hdev->device, "The device is gone.\n");
603 return -ENODEV;
604 }
605
606 if (wait_for_completion_timeout(comp, HZ / 10))
607 break;
608 }
609
610 return 0;
611}
612
613
614
615
616
617
618
619
620
621static u32 devfn_to_wslot(int devfn)
622{
623 union win_slot_encoding wslot;
624
625 wslot.slot = 0;
626 wslot.bits.dev = PCI_SLOT(devfn);
627 wslot.bits.func = PCI_FUNC(devfn);
628
629 return wslot.slot;
630}
631
632
633
634
635
636
637
638
639
640static int wslot_to_devfn(u32 wslot)
641{
642 union win_slot_encoding slot_no;
643
644 slot_no.slot = wslot;
645 return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func);
646}
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
664 int size, u32 *val)
665{
666 unsigned long flags;
667 void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where;
668
669
670
671
672 if (where + size <= PCI_COMMAND) {
673 memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size);
674 } else if (where >= PCI_CLASS_REVISION && where + size <=
675 PCI_CACHE_LINE_SIZE) {
676 memcpy(val, ((u8 *)&hpdev->desc.rev) + where -
677 PCI_CLASS_REVISION, size);
678 } else if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <=
679 PCI_ROM_ADDRESS) {
680 memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where -
681 PCI_SUBSYSTEM_VENDOR_ID, size);
682 } else if (where >= PCI_ROM_ADDRESS && where + size <=
683 PCI_CAPABILITY_LIST) {
684
685 *val = 0;
686 } else if (where >= PCI_INTERRUPT_LINE && where + size <=
687 PCI_INTERRUPT_PIN) {
688
689
690
691
692
693 *val = 0;
694 } else if (where + size <= CFG_PAGE_SIZE) {
695 spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
696
697 writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
698
699 mb();
700
701 switch (size) {
702 case 1:
703 *val = readb(addr);
704 break;
705 case 2:
706 *val = readw(addr);
707 break;
708 default:
709 *val = readl(addr);
710 break;
711 }
712
713
714
715
716 mb();
717 spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
718 } else {
719 dev_err(&hpdev->hbus->hdev->device,
720 "Attempt to read beyond a function's config space.\n");
721 }
722}
723
724static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
725{
726 u16 ret;
727 unsigned long flags;
728 void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET +
729 PCI_VENDOR_ID;
730
731 spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
732
733
734 writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
735
736 mb();
737
738 ret = readw(addr);
739
740
741
742
743
744 spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
745
746 return ret;
747}
748
749
750
751
752
753
754
755
756static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where,
757 int size, u32 val)
758{
759 unsigned long flags;
760 void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where;
761
762 if (where >= PCI_SUBSYSTEM_VENDOR_ID &&
763 where + size <= PCI_CAPABILITY_LIST) {
764
765 } else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) {
766 spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
767
768 writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
769
770 wmb();
771
772 switch (size) {
773 case 1:
774 writeb(val, addr);
775 break;
776 case 2:
777 writew(val, addr);
778 break;
779 default:
780 writel(val, addr);
781 break;
782 }
783
784
785
786
787 mb();
788 spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
789 } else {
790 dev_err(&hpdev->hbus->hdev->device,
791 "Attempt to write beyond a function's config space.\n");
792 }
793}
794
795
796
797
798
799
800
801
802
803
804
805
806static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
807 int where, int size, u32 *val)
808{
809 struct hv_pcibus_device *hbus =
810 container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
811 struct hv_pci_dev *hpdev;
812
813 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
814 if (!hpdev)
815 return PCIBIOS_DEVICE_NOT_FOUND;
816
817 _hv_pcifront_read_config(hpdev, where, size, val);
818
819 put_pcichild(hpdev);
820 return PCIBIOS_SUCCESSFUL;
821}
822
823
824
825
826
827
828
829
830
831
832
833
834static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
835 int where, int size, u32 val)
836{
837 struct hv_pcibus_device *hbus =
838 container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
839 struct hv_pci_dev *hpdev;
840
841 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
842 if (!hpdev)
843 return PCIBIOS_DEVICE_NOT_FOUND;
844
845 _hv_pcifront_write_config(hpdev, where, size, val);
846
847 put_pcichild(hpdev);
848 return PCIBIOS_SUCCESSFUL;
849}
850
851
852static struct pci_ops hv_pcifront_ops = {
853 .read = hv_pcifront_read_config,
854 .write = hv_pcifront_write_config,
855};
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882struct hv_read_config_compl {
883 struct hv_pci_compl comp_pkt;
884 void *buf;
885 unsigned int len;
886 unsigned int bytes_returned;
887};
888
889
890
891
892
893
894
895
896static void hv_pci_read_config_compl(void *context, struct pci_response *resp,
897 int resp_packet_size)
898{
899 struct hv_read_config_compl *comp = context;
900 struct pci_read_block_response *read_resp =
901 (struct pci_read_block_response *)resp;
902 unsigned int data_len, hdr_len;
903
904 hdr_len = offsetof(struct pci_read_block_response, bytes);
905 if (resp_packet_size < hdr_len) {
906 comp->comp_pkt.completion_status = -1;
907 goto out;
908 }
909
910 data_len = resp_packet_size - hdr_len;
911 if (data_len > 0 && read_resp->status == 0) {
912 comp->bytes_returned = min(comp->len, data_len);
913 memcpy(comp->buf, read_resp->bytes, comp->bytes_returned);
914 } else {
915 comp->bytes_returned = 0;
916 }
917
918 comp->comp_pkt.completion_status = read_resp->status;
919out:
920 complete(&comp->comp_pkt.host_event);
921}
922
923
924
925
926
927
928
929
930
931
932
933
934static int hv_read_config_block(struct pci_dev *pdev, void *buf,
935 unsigned int len, unsigned int block_id,
936 unsigned int *bytes_returned)
937{
938 struct hv_pcibus_device *hbus =
939 container_of(pdev->bus->sysdata, struct hv_pcibus_device,
940 sysdata);
941 struct {
942 struct pci_packet pkt;
943 char buf[sizeof(struct pci_read_block)];
944 } pkt;
945 struct hv_read_config_compl comp_pkt;
946 struct pci_read_block *read_blk;
947 int ret;
948
949 if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
950 return -EINVAL;
951
952 init_completion(&comp_pkt.comp_pkt.host_event);
953 comp_pkt.buf = buf;
954 comp_pkt.len = len;
955
956 memset(&pkt, 0, sizeof(pkt));
957 pkt.pkt.completion_func = hv_pci_read_config_compl;
958 pkt.pkt.compl_ctxt = &comp_pkt;
959 read_blk = (struct pci_read_block *)&pkt.pkt.message;
960 read_blk->message_type.type = PCI_READ_BLOCK;
961 read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
962 read_blk->block_id = block_id;
963 read_blk->bytes_requested = len;
964
965 ret = vmbus_sendpacket(hbus->hdev->channel, read_blk,
966 sizeof(*read_blk), (unsigned long)&pkt.pkt,
967 VM_PKT_DATA_INBAND,
968 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
969 if (ret)
970 return ret;
971
972 ret = wait_for_response(hbus->hdev, &comp_pkt.comp_pkt.host_event);
973 if (ret)
974 return ret;
975
976 if (comp_pkt.comp_pkt.completion_status != 0 ||
977 comp_pkt.bytes_returned == 0) {
978 dev_err(&hbus->hdev->device,
979 "Read Config Block failed: 0x%x, bytes_returned=%d\n",
980 comp_pkt.comp_pkt.completion_status,
981 comp_pkt.bytes_returned);
982 return -EIO;
983 }
984
985 *bytes_returned = comp_pkt.bytes_returned;
986 return 0;
987}
988
989
990
991
992
993
994
995
996static void hv_pci_write_config_compl(void *context, struct pci_response *resp,
997 int resp_packet_size)
998{
999 struct hv_pci_compl *comp_pkt = context;
1000
1001 comp_pkt->completion_status = resp->status;
1002 complete(&comp_pkt->host_event);
1003}
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015static int hv_write_config_block(struct pci_dev *pdev, void *buf,
1016 unsigned int len, unsigned int block_id)
1017{
1018 struct hv_pcibus_device *hbus =
1019 container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1020 sysdata);
1021 struct {
1022 struct pci_packet pkt;
1023 char buf[sizeof(struct pci_write_block)];
1024 u32 reserved;
1025 } pkt;
1026 struct hv_pci_compl comp_pkt;
1027 struct pci_write_block *write_blk;
1028 u32 pkt_size;
1029 int ret;
1030
1031 if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
1032 return -EINVAL;
1033
1034 init_completion(&comp_pkt.host_event);
1035
1036 memset(&pkt, 0, sizeof(pkt));
1037 pkt.pkt.completion_func = hv_pci_write_config_compl;
1038 pkt.pkt.compl_ctxt = &comp_pkt;
1039 write_blk = (struct pci_write_block *)&pkt.pkt.message;
1040 write_blk->message_type.type = PCI_WRITE_BLOCK;
1041 write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1042 write_blk->block_id = block_id;
1043 write_blk->byte_count = len;
1044 memcpy(write_blk->bytes, buf, len);
1045 pkt_size = offsetof(struct pci_write_block, bytes) + len;
1046
1047
1048
1049
1050
1051
1052
1053 pkt_size += sizeof(pkt.reserved);
1054
1055 ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size,
1056 (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND,
1057 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1058 if (ret)
1059 return ret;
1060
1061 ret = wait_for_response(hbus->hdev, &comp_pkt.host_event);
1062 if (ret)
1063 return ret;
1064
1065 if (comp_pkt.completion_status != 0) {
1066 dev_err(&hbus->hdev->device,
1067 "Write Config Block failed: 0x%x\n",
1068 comp_pkt.completion_status);
1069 return -EIO;
1070 }
1071
1072 return 0;
1073}
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084static int hv_register_block_invalidate(struct pci_dev *pdev, void *context,
1085 void (*block_invalidate)(void *context,
1086 u64 block_mask))
1087{
1088 struct hv_pcibus_device *hbus =
1089 container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1090 sysdata);
1091 struct hv_pci_dev *hpdev;
1092
1093 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1094 if (!hpdev)
1095 return -ENODEV;
1096
1097 hpdev->block_invalidate = block_invalidate;
1098 hpdev->invalidate_context = context;
1099
1100 put_pcichild(hpdev);
1101 return 0;
1102
1103}
1104
1105
1106static void hv_int_desc_free(struct hv_pci_dev *hpdev,
1107 struct tran_int_desc *int_desc)
1108{
1109 struct pci_delete_interrupt *int_pkt;
1110 struct {
1111 struct pci_packet pkt;
1112 u8 buffer[sizeof(struct pci_delete_interrupt)];
1113 } ctxt;
1114
1115 memset(&ctxt, 0, sizeof(ctxt));
1116 int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
1117 int_pkt->message_type.type =
1118 PCI_DELETE_INTERRUPT_MESSAGE;
1119 int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
1120 int_pkt->int_desc = *int_desc;
1121 vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt),
1122 (unsigned long)&ctxt.pkt, VM_PKT_DATA_INBAND, 0);
1123 kfree(int_desc);
1124}
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
1138 unsigned int irq)
1139{
1140 struct hv_pcibus_device *hbus;
1141 struct hv_pci_dev *hpdev;
1142 struct pci_dev *pdev;
1143 struct tran_int_desc *int_desc;
1144 struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq);
1145 struct msi_desc *msi = irq_data_get_msi_desc(irq_data);
1146
1147 pdev = msi_desc_to_pci_dev(msi);
1148 hbus = info->data;
1149 int_desc = irq_data_get_irq_chip_data(irq_data);
1150 if (!int_desc)
1151 return;
1152
1153 irq_data->chip_data = NULL;
1154 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1155 if (!hpdev) {
1156 kfree(int_desc);
1157 return;
1158 }
1159
1160 hv_int_desc_free(hpdev, int_desc);
1161 put_pcichild(hpdev);
1162}
1163
1164static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest,
1165 bool force)
1166{
1167 struct irq_data *parent = data->parent_data;
1168
1169 return parent->chip->irq_set_affinity(parent, dest, force);
1170}
1171
1172static void hv_irq_mask(struct irq_data *data)
1173{
1174 pci_msi_mask_irq(data);
1175}
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187static void hv_irq_unmask(struct irq_data *data)
1188{
1189 struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
1190 struct irq_cfg *cfg = irqd_cfg(data);
1191 struct hv_retarget_device_interrupt *params;
1192 struct hv_pcibus_device *hbus;
1193 struct cpumask *dest;
1194 cpumask_var_t tmp;
1195 struct pci_bus *pbus;
1196 struct pci_dev *pdev;
1197 unsigned long flags;
1198 u32 var_size = 0;
1199 int cpu, nr_bank;
1200 u64 res;
1201
1202 dest = irq_data_get_effective_affinity_mask(data);
1203 pdev = msi_desc_to_pci_dev(msi_desc);
1204 pbus = pdev->bus;
1205 hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
1206
1207 spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
1208
1209 params = &hbus->retarget_msi_interrupt_params;
1210 memset(params, 0, sizeof(*params));
1211 params->partition_id = HV_PARTITION_ID_SELF;
1212 params->int_entry.source = HV_INTERRUPT_SOURCE_MSI;
1213 hv_set_msi_entry_from_desc(¶ms->int_entry.msi_entry, msi_desc);
1214 params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
1215 (hbus->hdev->dev_instance.b[4] << 16) |
1216 (hbus->hdev->dev_instance.b[7] << 8) |
1217 (hbus->hdev->dev_instance.b[6] & 0xf8) |
1218 PCI_FUNC(pdev->devfn);
1219 params->int_target.vector = cfg->vector;
1220
1221
1222
1223
1224
1225
1226
1227
1228 if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
1229
1230
1231
1232
1233
1234
1235
1236 params->int_target.flags |=
1237 HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
1238
1239 if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
1240 res = 1;
1241 goto exit_unlock;
1242 }
1243
1244 cpumask_and(tmp, dest, cpu_online_mask);
1245 nr_bank = cpumask_to_vpset(¶ms->int_target.vp_set, tmp);
1246 free_cpumask_var(tmp);
1247
1248 if (nr_bank <= 0) {
1249 res = 1;
1250 goto exit_unlock;
1251 }
1252
1253
1254
1255
1256
1257
1258 var_size = 1 + nr_bank;
1259 } else {
1260 for_each_cpu_and(cpu, dest, cpu_online_mask) {
1261 params->int_target.vp_mask |=
1262 (1ULL << hv_cpu_number_to_vp_number(cpu));
1263 }
1264 }
1265
1266 res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
1267 params, NULL);
1268
1269exit_unlock:
1270 spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288 if (!hv_result_success(res) && hbus->state != hv_pcibus_removing)
1289 dev_err(&hbus->hdev->device,
1290 "%s() failed: %#llx", __func__, res);
1291
1292 pci_msi_unmask_irq(data);
1293}
1294
1295struct compose_comp_ctxt {
1296 struct hv_pci_compl comp_pkt;
1297 struct tran_int_desc int_desc;
1298};
1299
1300static void hv_pci_compose_compl(void *context, struct pci_response *resp,
1301 int resp_packet_size)
1302{
1303 struct compose_comp_ctxt *comp_pkt = context;
1304 struct pci_create_int_response *int_resp =
1305 (struct pci_create_int_response *)resp;
1306
1307 comp_pkt->comp_pkt.completion_status = resp->status;
1308 comp_pkt->int_desc = int_resp->int_desc;
1309 complete(&comp_pkt->comp_pkt.host_event);
1310}
1311
1312static u32 hv_compose_msi_req_v1(
1313 struct pci_create_interrupt *int_pkt, struct cpumask *affinity,
1314 u32 slot, u8 vector)
1315{
1316 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
1317 int_pkt->wslot.slot = slot;
1318 int_pkt->int_desc.vector = vector;
1319 int_pkt->int_desc.vector_count = 1;
1320 int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED;
1321
1322
1323
1324
1325
1326 int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
1327
1328 return sizeof(*int_pkt);
1329}
1330
1331static u32 hv_compose_msi_req_v2(
1332 struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity,
1333 u32 slot, u8 vector)
1334{
1335 int cpu;
1336
1337 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
1338 int_pkt->wslot.slot = slot;
1339 int_pkt->int_desc.vector = vector;
1340 int_pkt->int_desc.vector_count = 1;
1341 int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED;
1342
1343
1344
1345
1346
1347 cpu = cpumask_first_and(affinity, cpu_online_mask);
1348 int_pkt->int_desc.processor_array[0] =
1349 hv_cpu_number_to_vp_number(cpu);
1350 int_pkt->int_desc.processor_count = 1;
1351
1352 return sizeof(*int_pkt);
1353}
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1367{
1368 struct irq_cfg *cfg = irqd_cfg(data);
1369 struct hv_pcibus_device *hbus;
1370 struct vmbus_channel *channel;
1371 struct hv_pci_dev *hpdev;
1372 struct pci_bus *pbus;
1373 struct pci_dev *pdev;
1374 struct cpumask *dest;
1375 struct compose_comp_ctxt comp;
1376 struct tran_int_desc *int_desc;
1377 struct {
1378 struct pci_packet pci_pkt;
1379 union {
1380 struct pci_create_interrupt v1;
1381 struct pci_create_interrupt2 v2;
1382 } int_pkts;
1383 } __packed ctxt;
1384
1385 u32 size;
1386 int ret;
1387
1388 pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
1389 dest = irq_data_get_effective_affinity_mask(data);
1390 pbus = pdev->bus;
1391 hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
1392 channel = hbus->hdev->channel;
1393 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1394 if (!hpdev)
1395 goto return_null_message;
1396
1397
1398 if (data->chip_data) {
1399 int_desc = data->chip_data;
1400 data->chip_data = NULL;
1401 hv_int_desc_free(hpdev, int_desc);
1402 }
1403
1404 int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
1405 if (!int_desc)
1406 goto drop_reference;
1407
1408 memset(&ctxt, 0, sizeof(ctxt));
1409 init_completion(&comp.comp_pkt.host_event);
1410 ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
1411 ctxt.pci_pkt.compl_ctxt = ∁
1412
1413 switch (hbus->protocol_version) {
1414 case PCI_PROTOCOL_VERSION_1_1:
1415 size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
1416 dest,
1417 hpdev->desc.win_slot.slot,
1418 cfg->vector);
1419 break;
1420
1421 case PCI_PROTOCOL_VERSION_1_2:
1422 case PCI_PROTOCOL_VERSION_1_3:
1423 size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
1424 dest,
1425 hpdev->desc.win_slot.slot,
1426 cfg->vector);
1427 break;
1428
1429 default:
1430
1431
1432
1433
1434 dev_err(&hbus->hdev->device,
1435 "Unexpected vPCI protocol, update driver.");
1436 goto free_int_desc;
1437 }
1438
1439 ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
1440 size, (unsigned long)&ctxt.pci_pkt,
1441 VM_PKT_DATA_INBAND,
1442 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1443 if (ret) {
1444 dev_err(&hbus->hdev->device,
1445 "Sending request for interrupt failed: 0x%x",
1446 comp.comp_pkt.completion_status);
1447 goto free_int_desc;
1448 }
1449
1450
1451
1452
1453
1454 tasklet_disable_in_atomic(&channel->callback_event);
1455
1456
1457
1458
1459
1460 while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
1461 unsigned long flags;
1462
1463
1464 if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
1465 dev_err_once(&hbus->hdev->device,
1466 "the device has gone\n");
1467 goto enable_tasklet;
1468 }
1469
1470
1471
1472
1473
1474
1475
1476
1477 spin_lock_irqsave(&channel->sched_lock, flags);
1478 if (unlikely(channel->onchannel_callback == NULL)) {
1479 spin_unlock_irqrestore(&channel->sched_lock, flags);
1480 goto enable_tasklet;
1481 }
1482 hv_pci_onchannelcallback(hbus);
1483 spin_unlock_irqrestore(&channel->sched_lock, flags);
1484
1485 if (hpdev->state == hv_pcichild_ejecting) {
1486 dev_err_once(&hbus->hdev->device,
1487 "the device is being ejected\n");
1488 goto enable_tasklet;
1489 }
1490
1491 udelay(100);
1492 }
1493
1494 tasklet_enable(&channel->callback_event);
1495
1496 if (comp.comp_pkt.completion_status < 0) {
1497 dev_err(&hbus->hdev->device,
1498 "Request for interrupt failed: 0x%x",
1499 comp.comp_pkt.completion_status);
1500 goto free_int_desc;
1501 }
1502
1503
1504
1505
1506
1507
1508 *int_desc = comp.int_desc;
1509 data->chip_data = int_desc;
1510
1511
1512 msg->address_hi = comp.int_desc.address >> 32;
1513 msg->address_lo = comp.int_desc.address & 0xffffffff;
1514 msg->data = comp.int_desc.data;
1515
1516 put_pcichild(hpdev);
1517 return;
1518
1519enable_tasklet:
1520 tasklet_enable(&channel->callback_event);
1521free_int_desc:
1522 kfree(int_desc);
1523drop_reference:
1524 put_pcichild(hpdev);
1525return_null_message:
1526 msg->address_hi = 0;
1527 msg->address_lo = 0;
1528 msg->data = 0;
1529}
1530
1531
1532static struct irq_chip hv_msi_irq_chip = {
1533 .name = "Hyper-V PCIe MSI",
1534 .irq_compose_msi_msg = hv_compose_msi_msg,
1535 .irq_set_affinity = hv_set_affinity,
1536 .irq_ack = irq_chip_ack_parent,
1537 .irq_mask = hv_irq_mask,
1538 .irq_unmask = hv_irq_unmask,
1539};
1540
1541static struct msi_domain_ops hv_msi_ops = {
1542 .msi_prepare = pci_msi_prepare,
1543 .msi_free = hv_msi_free,
1544};
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
1560{
1561 hbus->msi_info.chip = &hv_msi_irq_chip;
1562 hbus->msi_info.ops = &hv_msi_ops;
1563 hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
1564 MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI |
1565 MSI_FLAG_PCI_MSIX);
1566 hbus->msi_info.handler = handle_edge_irq;
1567 hbus->msi_info.handler_name = "edge";
1568 hbus->msi_info.data = hbus;
1569 hbus->irq_domain = pci_msi_create_irq_domain(hbus->sysdata.fwnode,
1570 &hbus->msi_info,
1571 x86_vector_domain);
1572 if (!hbus->irq_domain) {
1573 dev_err(&hbus->hdev->device,
1574 "Failed to build an MSI IRQ domain\n");
1575 return -ENODEV;
1576 }
1577
1578 return 0;
1579}
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595static u64 get_bar_size(u64 bar_val)
1596{
1597 return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)),
1598 PAGE_SIZE);
1599}
1600
1601
1602
1603
1604
1605static void survey_child_resources(struct hv_pcibus_device *hbus)
1606{
1607 struct hv_pci_dev *hpdev;
1608 resource_size_t bar_size = 0;
1609 unsigned long flags;
1610 struct completion *event;
1611 u64 bar_val;
1612 int i;
1613
1614
1615 event = xchg(&hbus->survey_event, NULL);
1616 if (!event)
1617 return;
1618
1619
1620 if (hbus->low_mmio_space || hbus->high_mmio_space) {
1621 complete(event);
1622 return;
1623 }
1624
1625 spin_lock_irqsave(&hbus->device_list_lock, flags);
1626
1627
1628
1629
1630
1631
1632 list_for_each_entry(hpdev, &hbus->children, list_entry) {
1633 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1634 if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
1635 dev_err(&hbus->hdev->device,
1636 "There's an I/O BAR in this list!\n");
1637
1638 if (hpdev->probed_bar[i] != 0) {
1639
1640
1641
1642
1643
1644 bar_val = hpdev->probed_bar[i];
1645 if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
1646 bar_val |=
1647 ((u64)hpdev->probed_bar[++i] << 32);
1648 else
1649 bar_val |= 0xffffffff00000000ULL;
1650
1651 bar_size = get_bar_size(bar_val);
1652
1653 if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
1654 hbus->high_mmio_space += bar_size;
1655 else
1656 hbus->low_mmio_space += bar_size;
1657 }
1658 }
1659 }
1660
1661 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1662 complete(event);
1663}
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677static void prepopulate_bars(struct hv_pcibus_device *hbus)
1678{
1679 resource_size_t high_size = 0;
1680 resource_size_t low_size = 0;
1681 resource_size_t high_base = 0;
1682 resource_size_t low_base = 0;
1683 resource_size_t bar_size;
1684 struct hv_pci_dev *hpdev;
1685 unsigned long flags;
1686 u64 bar_val;
1687 u32 command;
1688 bool high;
1689 int i;
1690
1691 if (hbus->low_mmio_space) {
1692 low_size = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
1693 low_base = hbus->low_mmio_res->start;
1694 }
1695
1696 if (hbus->high_mmio_space) {
1697 high_size = 1ULL <<
1698 (63 - __builtin_clzll(hbus->high_mmio_space));
1699 high_base = hbus->high_mmio_res->start;
1700 }
1701
1702 spin_lock_irqsave(&hbus->device_list_lock, flags);
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715 list_for_each_entry(hpdev, &hbus->children, list_entry) {
1716 _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command);
1717 command &= ~PCI_COMMAND_MEMORY;
1718 _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, command);
1719 }
1720
1721
1722 do {
1723 list_for_each_entry(hpdev, &hbus->children, list_entry) {
1724 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1725 bar_val = hpdev->probed_bar[i];
1726 if (bar_val == 0)
1727 continue;
1728 high = bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64;
1729 if (high) {
1730 bar_val |=
1731 ((u64)hpdev->probed_bar[i + 1]
1732 << 32);
1733 } else {
1734 bar_val |= 0xffffffffULL << 32;
1735 }
1736 bar_size = get_bar_size(bar_val);
1737 if (high) {
1738 if (high_size != bar_size) {
1739 i++;
1740 continue;
1741 }
1742 _hv_pcifront_write_config(hpdev,
1743 PCI_BASE_ADDRESS_0 + (4 * i),
1744 4,
1745 (u32)(high_base & 0xffffff00));
1746 i++;
1747 _hv_pcifront_write_config(hpdev,
1748 PCI_BASE_ADDRESS_0 + (4 * i),
1749 4, (u32)(high_base >> 32));
1750 high_base += bar_size;
1751 } else {
1752 if (low_size != bar_size)
1753 continue;
1754 _hv_pcifront_write_config(hpdev,
1755 PCI_BASE_ADDRESS_0 + (4 * i),
1756 4,
1757 (u32)(low_base & 0xffffff00));
1758 low_base += bar_size;
1759 }
1760 }
1761 if (high_size <= 1 && low_size <= 1) {
1762
1763 _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2,
1764 &command);
1765 command |= PCI_COMMAND_MEMORY;
1766 _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2,
1767 command);
1768 break;
1769 }
1770 }
1771
1772 high_size >>= 1;
1773 low_size >>= 1;
1774 } while (high_size || low_size);
1775
1776 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1777}
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
1789{
1790 struct hv_pci_dev *hpdev;
1791 char name[SLOT_NAME_SIZE];
1792 int slot_nr;
1793
1794 list_for_each_entry(hpdev, &hbus->children, list_entry) {
1795 if (hpdev->pci_slot)
1796 continue;
1797
1798 slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
1799 snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
1800 hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr,
1801 name, NULL);
1802 if (IS_ERR(hpdev->pci_slot)) {
1803 pr_warn("pci_create slot %s failed\n", name);
1804 hpdev->pci_slot = NULL;
1805 }
1806 }
1807}
1808
1809
1810
1811
1812static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
1813{
1814 struct hv_pci_dev *hpdev;
1815
1816 list_for_each_entry(hpdev, &hbus->children, list_entry) {
1817 if (!hpdev->pci_slot)
1818 continue;
1819 pci_destroy_slot(hpdev->pci_slot);
1820 hpdev->pci_slot = NULL;
1821 }
1822}
1823
1824
1825
1826
1827static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
1828{
1829 struct pci_dev *dev;
1830 struct pci_bus *bus = hbus->pci_bus;
1831 struct hv_pci_dev *hv_dev;
1832
1833 list_for_each_entry(dev, &bus->devices, bus_list) {
1834 hv_dev = get_pcichild_wslot(hbus, devfn_to_wslot(dev->devfn));
1835 if (!hv_dev)
1836 continue;
1837
1838 if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY)
1839 set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node);
1840
1841 put_pcichild(hv_dev);
1842 }
1843}
1844
1845
1846
1847
1848
1849
1850
1851static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
1852{
1853
1854 hbus->pci_bus = pci_create_root_bus(&hbus->hdev->device,
1855 0,
1856 &hv_pcifront_ops,
1857 &hbus->sysdata,
1858 &hbus->resources_for_children);
1859 if (!hbus->pci_bus)
1860 return -ENODEV;
1861
1862 pci_lock_rescan_remove();
1863 pci_scan_child_bus(hbus->pci_bus);
1864 hv_pci_assign_numa_node(hbus);
1865 pci_bus_assign_resources(hbus->pci_bus);
1866 hv_pci_assign_slots(hbus);
1867 pci_bus_add_devices(hbus->pci_bus);
1868 pci_unlock_rescan_remove();
1869 hbus->state = hv_pcibus_installed;
1870 return 0;
1871}
1872
1873struct q_res_req_compl {
1874 struct completion host_event;
1875 struct hv_pci_dev *hpdev;
1876};
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887static void q_resource_requirements(void *context, struct pci_response *resp,
1888 int resp_packet_size)
1889{
1890 struct q_res_req_compl *completion = context;
1891 struct pci_q_res_req_response *q_res_req =
1892 (struct pci_q_res_req_response *)resp;
1893 int i;
1894
1895 if (resp->status < 0) {
1896 dev_err(&completion->hpdev->hbus->hdev->device,
1897 "query resource requirements failed: %x\n",
1898 resp->status);
1899 } else {
1900 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1901 completion->hpdev->probed_bar[i] =
1902 q_res_req->probed_bar[i];
1903 }
1904 }
1905
1906 complete(&completion->host_event);
1907}
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
1921 struct hv_pcidev_description *desc)
1922{
1923 struct hv_pci_dev *hpdev;
1924 struct pci_child_message *res_req;
1925 struct q_res_req_compl comp_pkt;
1926 struct {
1927 struct pci_packet init_packet;
1928 u8 buffer[sizeof(struct pci_child_message)];
1929 } pkt;
1930 unsigned long flags;
1931 int ret;
1932
1933 hpdev = kzalloc(sizeof(*hpdev), GFP_KERNEL);
1934 if (!hpdev)
1935 return NULL;
1936
1937 hpdev->hbus = hbus;
1938
1939 memset(&pkt, 0, sizeof(pkt));
1940 init_completion(&comp_pkt.host_event);
1941 comp_pkt.hpdev = hpdev;
1942 pkt.init_packet.compl_ctxt = &comp_pkt;
1943 pkt.init_packet.completion_func = q_resource_requirements;
1944 res_req = (struct pci_child_message *)&pkt.init_packet.message;
1945 res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
1946 res_req->wslot.slot = desc->win_slot.slot;
1947
1948 ret = vmbus_sendpacket(hbus->hdev->channel, res_req,
1949 sizeof(struct pci_child_message),
1950 (unsigned long)&pkt.init_packet,
1951 VM_PKT_DATA_INBAND,
1952 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1953 if (ret)
1954 goto error;
1955
1956 if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
1957 goto error;
1958
1959 hpdev->desc = *desc;
1960 refcount_set(&hpdev->refs, 1);
1961 get_pcichild(hpdev);
1962 spin_lock_irqsave(&hbus->device_list_lock, flags);
1963
1964 list_add_tail(&hpdev->list_entry, &hbus->children);
1965 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1966 return hpdev;
1967
1968error:
1969 kfree(hpdev);
1970 return NULL;
1971}
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
1987 u32 wslot)
1988{
1989 unsigned long flags;
1990 struct hv_pci_dev *iter, *hpdev = NULL;
1991
1992 spin_lock_irqsave(&hbus->device_list_lock, flags);
1993 list_for_each_entry(iter, &hbus->children, list_entry) {
1994 if (iter->desc.win_slot.slot == wslot) {
1995 hpdev = iter;
1996 get_pcichild(hpdev);
1997 break;
1998 }
1999 }
2000 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2001
2002 return hpdev;
2003}
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028static void pci_devices_present_work(struct work_struct *work)
2029{
2030 u32 child_no;
2031 bool found;
2032 struct hv_pcidev_description *new_desc;
2033 struct hv_pci_dev *hpdev;
2034 struct hv_pcibus_device *hbus;
2035 struct list_head removed;
2036 struct hv_dr_work *dr_wrk;
2037 struct hv_dr_state *dr = NULL;
2038 unsigned long flags;
2039
2040 dr_wrk = container_of(work, struct hv_dr_work, wrk);
2041 hbus = dr_wrk->bus;
2042 kfree(dr_wrk);
2043
2044 INIT_LIST_HEAD(&removed);
2045
2046
2047 spin_lock_irqsave(&hbus->device_list_lock, flags);
2048 while (!list_empty(&hbus->dr_list)) {
2049 dr = list_first_entry(&hbus->dr_list, struct hv_dr_state,
2050 list_entry);
2051 list_del(&dr->list_entry);
2052
2053
2054 if (!list_empty(&hbus->dr_list)) {
2055 kfree(dr);
2056 continue;
2057 }
2058 }
2059 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2060
2061 if (!dr)
2062 return;
2063
2064
2065 spin_lock_irqsave(&hbus->device_list_lock, flags);
2066 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2067 hpdev->reported_missing = true;
2068 }
2069 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2070
2071
2072 for (child_no = 0; child_no < dr->device_count; child_no++) {
2073 found = false;
2074 new_desc = &dr->func[child_no];
2075
2076 spin_lock_irqsave(&hbus->device_list_lock, flags);
2077 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2078 if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) &&
2079 (hpdev->desc.v_id == new_desc->v_id) &&
2080 (hpdev->desc.d_id == new_desc->d_id) &&
2081 (hpdev->desc.ser == new_desc->ser)) {
2082 hpdev->reported_missing = false;
2083 found = true;
2084 }
2085 }
2086 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2087
2088 if (!found) {
2089 hpdev = new_pcichild_device(hbus, new_desc);
2090 if (!hpdev)
2091 dev_err(&hbus->hdev->device,
2092 "couldn't record a child device.\n");
2093 }
2094 }
2095
2096
2097 spin_lock_irqsave(&hbus->device_list_lock, flags);
2098 do {
2099 found = false;
2100 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2101 if (hpdev->reported_missing) {
2102 found = true;
2103 put_pcichild(hpdev);
2104 list_move_tail(&hpdev->list_entry, &removed);
2105 break;
2106 }
2107 }
2108 } while (found);
2109 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2110
2111
2112 while (!list_empty(&removed)) {
2113 hpdev = list_first_entry(&removed, struct hv_pci_dev,
2114 list_entry);
2115 list_del(&hpdev->list_entry);
2116
2117 if (hpdev->pci_slot)
2118 pci_destroy_slot(hpdev->pci_slot);
2119
2120 put_pcichild(hpdev);
2121 }
2122
2123 switch (hbus->state) {
2124 case hv_pcibus_installed:
2125
2126
2127
2128
2129 pci_lock_rescan_remove();
2130 pci_scan_child_bus(hbus->pci_bus);
2131 hv_pci_assign_numa_node(hbus);
2132 hv_pci_assign_slots(hbus);
2133 pci_unlock_rescan_remove();
2134 break;
2135
2136 case hv_pcibus_init:
2137 case hv_pcibus_probed:
2138 survey_child_resources(hbus);
2139 break;
2140
2141 default:
2142 break;
2143 }
2144
2145 kfree(dr);
2146}
2147
2148
2149
2150
2151
2152
2153
2154
2155static int hv_pci_start_relations_work(struct hv_pcibus_device *hbus,
2156 struct hv_dr_state *dr)
2157{
2158 struct hv_dr_work *dr_wrk;
2159 unsigned long flags;
2160 bool pending_dr;
2161
2162 if (hbus->state == hv_pcibus_removing) {
2163 dev_info(&hbus->hdev->device,
2164 "PCI VMBus BUS_RELATIONS: ignored\n");
2165 return -ENOENT;
2166 }
2167
2168 dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
2169 if (!dr_wrk)
2170 return -ENOMEM;
2171
2172 INIT_WORK(&dr_wrk->wrk, pci_devices_present_work);
2173 dr_wrk->bus = hbus;
2174
2175 spin_lock_irqsave(&hbus->device_list_lock, flags);
2176
2177
2178
2179
2180
2181 pending_dr = !list_empty(&hbus->dr_list);
2182 list_add_tail(&dr->list_entry, &hbus->dr_list);
2183 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2184
2185 if (pending_dr)
2186 kfree(dr_wrk);
2187 else
2188 queue_work(hbus->wq, &dr_wrk->wrk);
2189
2190 return 0;
2191}
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
2203 struct pci_bus_relations *relations)
2204{
2205 struct hv_dr_state *dr;
2206 int i;
2207
2208 dr = kzalloc(struct_size(dr, func, relations->device_count),
2209 GFP_NOWAIT);
2210 if (!dr)
2211 return;
2212
2213 dr->device_count = relations->device_count;
2214 for (i = 0; i < dr->device_count; i++) {
2215 dr->func[i].v_id = relations->func[i].v_id;
2216 dr->func[i].d_id = relations->func[i].d_id;
2217 dr->func[i].rev = relations->func[i].rev;
2218 dr->func[i].prog_intf = relations->func[i].prog_intf;
2219 dr->func[i].subclass = relations->func[i].subclass;
2220 dr->func[i].base_class = relations->func[i].base_class;
2221 dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2222 dr->func[i].win_slot = relations->func[i].win_slot;
2223 dr->func[i].ser = relations->func[i].ser;
2224 }
2225
2226 if (hv_pci_start_relations_work(hbus, dr))
2227 kfree(dr);
2228}
2229
2230
2231
2232
2233
2234
2235
2236
2237static void hv_pci_devices_present2(struct hv_pcibus_device *hbus,
2238 struct pci_bus_relations2 *relations)
2239{
2240 struct hv_dr_state *dr;
2241 int i;
2242
2243 dr = kzalloc(struct_size(dr, func, relations->device_count),
2244 GFP_NOWAIT);
2245 if (!dr)
2246 return;
2247
2248 dr->device_count = relations->device_count;
2249 for (i = 0; i < dr->device_count; i++) {
2250 dr->func[i].v_id = relations->func[i].v_id;
2251 dr->func[i].d_id = relations->func[i].d_id;
2252 dr->func[i].rev = relations->func[i].rev;
2253 dr->func[i].prog_intf = relations->func[i].prog_intf;
2254 dr->func[i].subclass = relations->func[i].subclass;
2255 dr->func[i].base_class = relations->func[i].base_class;
2256 dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2257 dr->func[i].win_slot = relations->func[i].win_slot;
2258 dr->func[i].ser = relations->func[i].ser;
2259 dr->func[i].flags = relations->func[i].flags;
2260 dr->func[i].virtual_numa_node =
2261 relations->func[i].virtual_numa_node;
2262 }
2263
2264 if (hv_pci_start_relations_work(hbus, dr))
2265 kfree(dr);
2266}
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277static void hv_eject_device_work(struct work_struct *work)
2278{
2279 struct pci_eject_response *ejct_pkt;
2280 struct hv_pcibus_device *hbus;
2281 struct hv_pci_dev *hpdev;
2282 struct pci_dev *pdev;
2283 unsigned long flags;
2284 int wslot;
2285 struct {
2286 struct pci_packet pkt;
2287 u8 buffer[sizeof(struct pci_eject_response)];
2288 } ctxt;
2289
2290 hpdev = container_of(work, struct hv_pci_dev, wrk);
2291 hbus = hpdev->hbus;
2292
2293 WARN_ON(hpdev->state != hv_pcichild_ejecting);
2294
2295
2296
2297
2298
2299
2300
2301 wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
2302 pdev = pci_get_domain_bus_and_slot(hbus->sysdata.domain, 0, wslot);
2303 if (pdev) {
2304 pci_lock_rescan_remove();
2305 pci_stop_and_remove_bus_device(pdev);
2306 pci_dev_put(pdev);
2307 pci_unlock_rescan_remove();
2308 }
2309
2310 spin_lock_irqsave(&hbus->device_list_lock, flags);
2311 list_del(&hpdev->list_entry);
2312 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2313
2314 if (hpdev->pci_slot)
2315 pci_destroy_slot(hpdev->pci_slot);
2316
2317 memset(&ctxt, 0, sizeof(ctxt));
2318 ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
2319 ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
2320 ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
2321 vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
2322 sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
2323 VM_PKT_DATA_INBAND, 0);
2324
2325
2326 put_pcichild(hpdev);
2327
2328 put_pcichild(hpdev);
2329 put_pcichild(hpdev);
2330
2331}
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
2342{
2343 struct hv_pcibus_device *hbus = hpdev->hbus;
2344 struct hv_device *hdev = hbus->hdev;
2345
2346 if (hbus->state == hv_pcibus_removing) {
2347 dev_info(&hdev->device, "PCI VMBus EJECT: ignored\n");
2348 return;
2349 }
2350
2351 hpdev->state = hv_pcichild_ejecting;
2352 get_pcichild(hpdev);
2353 INIT_WORK(&hpdev->wrk, hv_eject_device_work);
2354 queue_work(hbus->wq, &hpdev->wrk);
2355}
2356
2357
2358
2359
2360
2361
2362
2363
2364static void hv_pci_onchannelcallback(void *context)
2365{
2366 const int packet_size = 0x100;
2367 int ret;
2368 struct hv_pcibus_device *hbus = context;
2369 u32 bytes_recvd;
2370 u64 req_id;
2371 struct vmpacket_descriptor *desc;
2372 unsigned char *buffer;
2373 int bufferlen = packet_size;
2374 struct pci_packet *comp_packet;
2375 struct pci_response *response;
2376 struct pci_incoming_message *new_message;
2377 struct pci_bus_relations *bus_rel;
2378 struct pci_bus_relations2 *bus_rel2;
2379 struct pci_dev_inval_block *inval;
2380 struct pci_dev_incoming *dev_message;
2381 struct hv_pci_dev *hpdev;
2382
2383 buffer = kmalloc(bufferlen, GFP_ATOMIC);
2384 if (!buffer)
2385 return;
2386
2387 while (1) {
2388 ret = vmbus_recvpacket_raw(hbus->hdev->channel, buffer,
2389 bufferlen, &bytes_recvd, &req_id);
2390
2391 if (ret == -ENOBUFS) {
2392 kfree(buffer);
2393
2394 bufferlen = bytes_recvd;
2395 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
2396 if (!buffer)
2397 return;
2398 continue;
2399 }
2400
2401
2402 if (ret || !bytes_recvd)
2403 break;
2404
2405
2406
2407
2408
2409 if (bytes_recvd <= sizeof(struct pci_response))
2410 continue;
2411 desc = (struct vmpacket_descriptor *)buffer;
2412
2413 switch (desc->type) {
2414 case VM_PKT_COMP:
2415
2416
2417
2418
2419
2420 comp_packet = (struct pci_packet *)req_id;
2421 response = (struct pci_response *)buffer;
2422 comp_packet->completion_func(comp_packet->compl_ctxt,
2423 response,
2424 bytes_recvd);
2425 break;
2426
2427 case VM_PKT_DATA_INBAND:
2428
2429 new_message = (struct pci_incoming_message *)buffer;
2430 switch (new_message->message_type.type) {
2431 case PCI_BUS_RELATIONS:
2432
2433 bus_rel = (struct pci_bus_relations *)buffer;
2434 if (bytes_recvd <
2435 struct_size(bus_rel, func,
2436 bus_rel->device_count)) {
2437 dev_err(&hbus->hdev->device,
2438 "bus relations too small\n");
2439 break;
2440 }
2441
2442 hv_pci_devices_present(hbus, bus_rel);
2443 break;
2444
2445 case PCI_BUS_RELATIONS2:
2446
2447 bus_rel2 = (struct pci_bus_relations2 *)buffer;
2448 if (bytes_recvd <
2449 struct_size(bus_rel2, func,
2450 bus_rel2->device_count)) {
2451 dev_err(&hbus->hdev->device,
2452 "bus relations v2 too small\n");
2453 break;
2454 }
2455
2456 hv_pci_devices_present2(hbus, bus_rel2);
2457 break;
2458
2459 case PCI_EJECT:
2460
2461 dev_message = (struct pci_dev_incoming *)buffer;
2462 hpdev = get_pcichild_wslot(hbus,
2463 dev_message->wslot.slot);
2464 if (hpdev) {
2465 hv_pci_eject_device(hpdev);
2466 put_pcichild(hpdev);
2467 }
2468 break;
2469
2470 case PCI_INVALIDATE_BLOCK:
2471
2472 inval = (struct pci_dev_inval_block *)buffer;
2473 hpdev = get_pcichild_wslot(hbus,
2474 inval->wslot.slot);
2475 if (hpdev) {
2476 if (hpdev->block_invalidate) {
2477 hpdev->block_invalidate(
2478 hpdev->invalidate_context,
2479 inval->block_mask);
2480 }
2481 put_pcichild(hpdev);
2482 }
2483 break;
2484
2485 default:
2486 dev_warn(&hbus->hdev->device,
2487 "Unimplemented protocol message %x\n",
2488 new_message->message_type.type);
2489 break;
2490 }
2491 break;
2492
2493 default:
2494 dev_err(&hbus->hdev->device,
2495 "unhandled packet type %d, tid %llx len %d\n",
2496 desc->type, req_id, bytes_recvd);
2497 break;
2498 }
2499 }
2500
2501 kfree(buffer);
2502}
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523static int hv_pci_protocol_negotiation(struct hv_device *hdev,
2524 enum pci_protocol_version_t version[],
2525 int num_version)
2526{
2527 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
2528 struct pci_version_request *version_req;
2529 struct hv_pci_compl comp_pkt;
2530 struct pci_packet *pkt;
2531 int ret;
2532 int i;
2533
2534
2535
2536
2537
2538
2539
2540 pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL);
2541 if (!pkt)
2542 return -ENOMEM;
2543
2544 init_completion(&comp_pkt.host_event);
2545 pkt->completion_func = hv_pci_generic_compl;
2546 pkt->compl_ctxt = &comp_pkt;
2547 version_req = (struct pci_version_request *)&pkt->message;
2548 version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
2549
2550 for (i = 0; i < num_version; i++) {
2551 version_req->protocol_version = version[i];
2552 ret = vmbus_sendpacket(hdev->channel, version_req,
2553 sizeof(struct pci_version_request),
2554 (unsigned long)pkt, VM_PKT_DATA_INBAND,
2555 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2556 if (!ret)
2557 ret = wait_for_response(hdev, &comp_pkt.host_event);
2558
2559 if (ret) {
2560 dev_err(&hdev->device,
2561 "PCI Pass-through VSP failed to request version: %d",
2562 ret);
2563 goto exit;
2564 }
2565
2566 if (comp_pkt.completion_status >= 0) {
2567 hbus->protocol_version = version[i];
2568 dev_info(&hdev->device,
2569 "PCI VMBus probing: Using version %#x\n",
2570 hbus->protocol_version);
2571 goto exit;
2572 }
2573
2574 if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) {
2575 dev_err(&hdev->device,
2576 "PCI Pass-through VSP failed version request: %#x",
2577 comp_pkt.completion_status);
2578 ret = -EPROTO;
2579 goto exit;
2580 }
2581
2582 reinit_completion(&comp_pkt.host_event);
2583 }
2584
2585 dev_err(&hdev->device,
2586 "PCI pass-through VSP failed to find supported version");
2587 ret = -EPROTO;
2588
2589exit:
2590 kfree(pkt);
2591 return ret;
2592}
2593
2594
2595
2596
2597
2598
2599static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus)
2600{
2601
2602
2603
2604
2605
2606 if (hbus->low_mmio_space && hbus->low_mmio_res) {
2607 hbus->low_mmio_res->flags |= IORESOURCE_BUSY;
2608 vmbus_free_mmio(hbus->low_mmio_res->start,
2609 resource_size(hbus->low_mmio_res));
2610 }
2611
2612 if (hbus->high_mmio_space && hbus->high_mmio_res) {
2613 hbus->high_mmio_res->flags |= IORESOURCE_BUSY;
2614 vmbus_free_mmio(hbus->high_mmio_res->start,
2615 resource_size(hbus->high_mmio_res));
2616 }
2617}
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
2645{
2646 resource_size_t align;
2647 int ret;
2648
2649 if (hbus->low_mmio_space) {
2650 align = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
2651 ret = vmbus_allocate_mmio(&hbus->low_mmio_res, hbus->hdev, 0,
2652 (u64)(u32)0xffffffff,
2653 hbus->low_mmio_space,
2654 align, false);
2655 if (ret) {
2656 dev_err(&hbus->hdev->device,
2657 "Need %#llx of low MMIO space. Consider reconfiguring the VM.\n",
2658 hbus->low_mmio_space);
2659 return ret;
2660 }
2661
2662
2663 hbus->low_mmio_res->flags |= IORESOURCE_WINDOW;
2664 hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY;
2665 pci_add_resource(&hbus->resources_for_children,
2666 hbus->low_mmio_res);
2667 }
2668
2669 if (hbus->high_mmio_space) {
2670 align = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space));
2671 ret = vmbus_allocate_mmio(&hbus->high_mmio_res, hbus->hdev,
2672 0x100000000, -1,
2673 hbus->high_mmio_space, align,
2674 false);
2675 if (ret) {
2676 dev_err(&hbus->hdev->device,
2677 "Need %#llx of high MMIO space. Consider reconfiguring the VM.\n",
2678 hbus->high_mmio_space);
2679 goto release_low_mmio;
2680 }
2681
2682
2683 hbus->high_mmio_res->flags |= IORESOURCE_WINDOW;
2684 hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY;
2685 pci_add_resource(&hbus->resources_for_children,
2686 hbus->high_mmio_res);
2687 }
2688
2689 return 0;
2690
2691release_low_mmio:
2692 if (hbus->low_mmio_res) {
2693 vmbus_free_mmio(hbus->low_mmio_res->start,
2694 resource_size(hbus->low_mmio_res));
2695 }
2696
2697 return ret;
2698}
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709static int hv_allocate_config_window(struct hv_pcibus_device *hbus)
2710{
2711 int ret;
2712
2713
2714
2715
2716
2717 ret = vmbus_allocate_mmio(&hbus->mem_config, hbus->hdev, 0, -1,
2718 PCI_CONFIG_MMIO_LENGTH, 0x1000, false);
2719 if (ret)
2720 return ret;
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730 hbus->mem_config->flags |= IORESOURCE_BUSY;
2731
2732 return 0;
2733}
2734
2735static void hv_free_config_window(struct hv_pcibus_device *hbus)
2736{
2737 vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH);
2738}
2739
2740static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs);
2741
2742
2743
2744
2745
2746
2747
2748static int hv_pci_enter_d0(struct hv_device *hdev)
2749{
2750 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
2751 struct pci_bus_d0_entry *d0_entry;
2752 struct hv_pci_compl comp_pkt;
2753 struct pci_packet *pkt;
2754 int ret;
2755
2756
2757
2758
2759
2760
2761
2762 pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL);
2763 if (!pkt)
2764 return -ENOMEM;
2765
2766 init_completion(&comp_pkt.host_event);
2767 pkt->completion_func = hv_pci_generic_compl;
2768 pkt->compl_ctxt = &comp_pkt;
2769 d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
2770 d0_entry->message_type.type = PCI_BUS_D0ENTRY;
2771 d0_entry->mmio_base = hbus->mem_config->start;
2772
2773 ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
2774 (unsigned long)pkt, VM_PKT_DATA_INBAND,
2775 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2776 if (!ret)
2777 ret = wait_for_response(hdev, &comp_pkt.host_event);
2778
2779 if (ret)
2780 goto exit;
2781
2782 if (comp_pkt.completion_status < 0) {
2783 dev_err(&hdev->device,
2784 "PCI Pass-through VSP failed D0 Entry with status %x\n",
2785 comp_pkt.completion_status);
2786 ret = -EPROTO;
2787 goto exit;
2788 }
2789
2790 ret = 0;
2791
2792exit:
2793 kfree(pkt);
2794 return ret;
2795}
2796
2797
2798
2799
2800
2801
2802
2803
2804static int hv_pci_query_relations(struct hv_device *hdev)
2805{
2806 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
2807 struct pci_message message;
2808 struct completion comp;
2809 int ret;
2810
2811
2812 init_completion(&comp);
2813 if (cmpxchg(&hbus->survey_event, NULL, &comp))
2814 return -ENOTEMPTY;
2815
2816 memset(&message, 0, sizeof(message));
2817 message.type = PCI_QUERY_BUS_RELATIONS;
2818
2819 ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
2820 0, VM_PKT_DATA_INBAND, 0);
2821 if (!ret)
2822 ret = wait_for_response(hdev, &comp);
2823
2824 return ret;
2825}
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844static int hv_send_resources_allocated(struct hv_device *hdev)
2845{
2846 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
2847 struct pci_resources_assigned *res_assigned;
2848 struct pci_resources_assigned2 *res_assigned2;
2849 struct hv_pci_compl comp_pkt;
2850 struct hv_pci_dev *hpdev;
2851 struct pci_packet *pkt;
2852 size_t size_res;
2853 int wslot;
2854 int ret;
2855
2856 size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2)
2857 ? sizeof(*res_assigned) : sizeof(*res_assigned2);
2858
2859 pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
2860 if (!pkt)
2861 return -ENOMEM;
2862
2863 ret = 0;
2864
2865 for (wslot = 0; wslot < 256; wslot++) {
2866 hpdev = get_pcichild_wslot(hbus, wslot);
2867 if (!hpdev)
2868 continue;
2869
2870 memset(pkt, 0, sizeof(*pkt) + size_res);
2871 init_completion(&comp_pkt.host_event);
2872 pkt->completion_func = hv_pci_generic_compl;
2873 pkt->compl_ctxt = &comp_pkt;
2874
2875 if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
2876 res_assigned =
2877 (struct pci_resources_assigned *)&pkt->message;
2878 res_assigned->message_type.type =
2879 PCI_RESOURCES_ASSIGNED;
2880 res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
2881 } else {
2882 res_assigned2 =
2883 (struct pci_resources_assigned2 *)&pkt->message;
2884 res_assigned2->message_type.type =
2885 PCI_RESOURCES_ASSIGNED2;
2886 res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
2887 }
2888 put_pcichild(hpdev);
2889
2890 ret = vmbus_sendpacket(hdev->channel, &pkt->message,
2891 size_res, (unsigned long)pkt,
2892 VM_PKT_DATA_INBAND,
2893 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2894 if (!ret)
2895 ret = wait_for_response(hdev, &comp_pkt.host_event);
2896 if (ret)
2897 break;
2898
2899 if (comp_pkt.completion_status < 0) {
2900 ret = -EPROTO;
2901 dev_err(&hdev->device,
2902 "resource allocated returned 0x%x",
2903 comp_pkt.completion_status);
2904 break;
2905 }
2906
2907 hbus->wslot_res_allocated = wslot;
2908 }
2909
2910 kfree(pkt);
2911 return ret;
2912}
2913
2914
2915
2916
2917
2918
2919
2920
2921static int hv_send_resources_released(struct hv_device *hdev)
2922{
2923 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
2924 struct pci_child_message pkt;
2925 struct hv_pci_dev *hpdev;
2926 int wslot;
2927 int ret;
2928
2929 for (wslot = hbus->wslot_res_allocated; wslot >= 0; wslot--) {
2930 hpdev = get_pcichild_wslot(hbus, wslot);
2931 if (!hpdev)
2932 continue;
2933
2934 memset(&pkt, 0, sizeof(pkt));
2935 pkt.message_type.type = PCI_RESOURCES_RELEASED;
2936 pkt.wslot.slot = hpdev->desc.win_slot.slot;
2937
2938 put_pcichild(hpdev);
2939
2940 ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
2941 VM_PKT_DATA_INBAND, 0);
2942 if (ret)
2943 return ret;
2944
2945 hbus->wslot_res_allocated = wslot - 1;
2946 }
2947
2948 hbus->wslot_res_allocated = -1;
2949
2950 return 0;
2951}
2952
2953#define HVPCI_DOM_MAP_SIZE (64 * 1024)
2954static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE);
2955
2956
2957
2958
2959
2960#define HVPCI_DOM_INVALID 0
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971static u16 hv_get_dom_num(u16 dom)
2972{
2973 unsigned int i;
2974
2975 if (test_and_set_bit(dom, hvpci_dom_map) == 0)
2976 return dom;
2977
2978 for_each_clear_bit(i, hvpci_dom_map, HVPCI_DOM_MAP_SIZE) {
2979 if (test_and_set_bit(i, hvpci_dom_map) == 0)
2980 return i;
2981 }
2982
2983 return HVPCI_DOM_INVALID;
2984}
2985
2986
2987
2988
2989
2990static void hv_put_dom_num(u16 dom)
2991{
2992 clear_bit(dom, hvpci_dom_map);
2993}
2994
2995
2996
2997
2998
2999
3000
3001
3002static int hv_pci_probe(struct hv_device *hdev,
3003 const struct hv_vmbus_device_id *dev_id)
3004{
3005 struct hv_pcibus_device *hbus;
3006 u16 dom_req, dom;
3007 char *name;
3008 bool enter_d0_retry = true;
3009 int ret;
3010
3011
3012
3013
3014
3015 BUILD_BUG_ON(sizeof(*hbus) > HV_HYP_PAGE_SIZE);
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035 hbus = kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
3036 if (!hbus)
3037 return -ENOMEM;
3038 hbus->state = hv_pcibus_init;
3039 hbus->wslot_res_allocated = -1;
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054 dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4];
3055 dom = hv_get_dom_num(dom_req);
3056
3057 if (dom == HVPCI_DOM_INVALID) {
3058 dev_err(&hdev->device,
3059 "Unable to use dom# 0x%hx or other numbers", dom_req);
3060 ret = -EINVAL;
3061 goto free_bus;
3062 }
3063
3064 if (dom != dom_req)
3065 dev_info(&hdev->device,
3066 "PCI dom# 0x%hx has collision, using 0x%hx",
3067 dom_req, dom);
3068
3069 hbus->sysdata.domain = dom;
3070
3071 hbus->hdev = hdev;
3072 INIT_LIST_HEAD(&hbus->children);
3073 INIT_LIST_HEAD(&hbus->dr_list);
3074 INIT_LIST_HEAD(&hbus->resources_for_children);
3075 spin_lock_init(&hbus->config_lock);
3076 spin_lock_init(&hbus->device_list_lock);
3077 spin_lock_init(&hbus->retarget_msi_interrupt_lock);
3078 hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
3079 hbus->sysdata.domain);
3080 if (!hbus->wq) {
3081 ret = -ENOMEM;
3082 goto free_dom;
3083 }
3084
3085 ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
3086 hv_pci_onchannelcallback, hbus);
3087 if (ret)
3088 goto destroy_wq;
3089
3090 hv_set_drvdata(hdev, hbus);
3091
3092 ret = hv_pci_protocol_negotiation(hdev, pci_protocol_versions,
3093 ARRAY_SIZE(pci_protocol_versions));
3094 if (ret)
3095 goto close;
3096
3097 ret = hv_allocate_config_window(hbus);
3098 if (ret)
3099 goto close;
3100
3101 hbus->cfg_addr = ioremap(hbus->mem_config->start,
3102 PCI_CONFIG_MMIO_LENGTH);
3103 if (!hbus->cfg_addr) {
3104 dev_err(&hdev->device,
3105 "Unable to map a virtual address for config space\n");
3106 ret = -ENOMEM;
3107 goto free_config;
3108 }
3109
3110 name = kasprintf(GFP_KERNEL, "%pUL", &hdev->dev_instance);
3111 if (!name) {
3112 ret = -ENOMEM;
3113 goto unmap;
3114 }
3115
3116 hbus->sysdata.fwnode = irq_domain_alloc_named_fwnode(name);
3117 kfree(name);
3118 if (!hbus->sysdata.fwnode) {
3119 ret = -ENOMEM;
3120 goto unmap;
3121 }
3122
3123 ret = hv_pcie_init_irq_domain(hbus);
3124 if (ret)
3125 goto free_fwnode;
3126
3127retry:
3128 ret = hv_pci_query_relations(hdev);
3129 if (ret)
3130 goto free_irq_domain;
3131
3132 ret = hv_pci_enter_d0(hdev);
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146 if (ret == -EPROTO && enter_d0_retry) {
3147 enter_d0_retry = false;
3148
3149 dev_err(&hdev->device, "Retrying D0 Entry\n");
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159 hbus->wslot_res_allocated = 255;
3160 ret = hv_pci_bus_exit(hdev, true);
3161
3162 if (ret == 0)
3163 goto retry;
3164
3165 dev_err(&hdev->device,
3166 "Retrying D0 failed with ret %d\n", ret);
3167 }
3168 if (ret)
3169 goto free_irq_domain;
3170
3171 ret = hv_pci_allocate_bridge_windows(hbus);
3172 if (ret)
3173 goto exit_d0;
3174
3175 ret = hv_send_resources_allocated(hdev);
3176 if (ret)
3177 goto free_windows;
3178
3179 prepopulate_bars(hbus);
3180
3181 hbus->state = hv_pcibus_probed;
3182
3183 ret = create_root_hv_pci_bus(hbus);
3184 if (ret)
3185 goto free_windows;
3186
3187 return 0;
3188
3189free_windows:
3190 hv_pci_free_bridge_windows(hbus);
3191exit_d0:
3192 (void) hv_pci_bus_exit(hdev, true);
3193free_irq_domain:
3194 irq_domain_remove(hbus->irq_domain);
3195free_fwnode:
3196 irq_domain_free_fwnode(hbus->sysdata.fwnode);
3197unmap:
3198 iounmap(hbus->cfg_addr);
3199free_config:
3200 hv_free_config_window(hbus);
3201close:
3202 vmbus_close(hdev->channel);
3203destroy_wq:
3204 destroy_workqueue(hbus->wq);
3205free_dom:
3206 hv_put_dom_num(hbus->sysdata.domain);
3207free_bus:
3208 kfree(hbus);
3209 return ret;
3210}
3211
3212static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
3213{
3214 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3215 struct {
3216 struct pci_packet teardown_packet;
3217 u8 buffer[sizeof(struct pci_message)];
3218 } pkt;
3219 struct hv_pci_compl comp_pkt;
3220 struct hv_pci_dev *hpdev, *tmp;
3221 unsigned long flags;
3222 int ret;
3223
3224
3225
3226
3227
3228 if (hdev->channel->rescind)
3229 return 0;
3230
3231 if (!keep_devs) {
3232
3233 spin_lock_irqsave(&hbus->device_list_lock, flags);
3234 list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry) {
3235 list_del(&hpdev->list_entry);
3236 if (hpdev->pci_slot)
3237 pci_destroy_slot(hpdev->pci_slot);
3238
3239 put_pcichild(hpdev);
3240 put_pcichild(hpdev);
3241 }
3242 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
3243 }
3244
3245 ret = hv_send_resources_released(hdev);
3246 if (ret) {
3247 dev_err(&hdev->device,
3248 "Couldn't send resources released packet(s)\n");
3249 return ret;
3250 }
3251
3252 memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
3253 init_completion(&comp_pkt.host_event);
3254 pkt.teardown_packet.completion_func = hv_pci_generic_compl;
3255 pkt.teardown_packet.compl_ctxt = &comp_pkt;
3256 pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
3257
3258 ret = vmbus_sendpacket(hdev->channel, &pkt.teardown_packet.message,
3259 sizeof(struct pci_message),
3260 (unsigned long)&pkt.teardown_packet,
3261 VM_PKT_DATA_INBAND,
3262 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3263 if (ret)
3264 return ret;
3265
3266 if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0)
3267 return -ETIMEDOUT;
3268
3269 return 0;
3270}
3271
3272
3273
3274
3275
3276
3277
3278static int hv_pci_remove(struct hv_device *hdev)
3279{
3280 struct hv_pcibus_device *hbus;
3281 int ret;
3282
3283 hbus = hv_get_drvdata(hdev);
3284 if (hbus->state == hv_pcibus_installed) {
3285 tasklet_disable(&hdev->channel->callback_event);
3286 hbus->state = hv_pcibus_removing;
3287 tasklet_enable(&hdev->channel->callback_event);
3288 destroy_workqueue(hbus->wq);
3289 hbus->wq = NULL;
3290
3291
3292
3293
3294
3295
3296
3297 pci_lock_rescan_remove();
3298 pci_stop_root_bus(hbus->pci_bus);
3299 hv_pci_remove_slots(hbus);
3300 pci_remove_root_bus(hbus->pci_bus);
3301 pci_unlock_rescan_remove();
3302 }
3303
3304 ret = hv_pci_bus_exit(hdev, false);
3305
3306 vmbus_close(hdev->channel);
3307
3308 iounmap(hbus->cfg_addr);
3309 hv_free_config_window(hbus);
3310 pci_free_resource_list(&hbus->resources_for_children);
3311 hv_pci_free_bridge_windows(hbus);
3312 irq_domain_remove(hbus->irq_domain);
3313 irq_domain_free_fwnode(hbus->sysdata.fwnode);
3314
3315 hv_put_dom_num(hbus->sysdata.domain);
3316
3317 kfree(hbus);
3318 return ret;
3319}
3320
3321static int hv_pci_suspend(struct hv_device *hdev)
3322{
3323 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3324 enum hv_pcibus_state old_state;
3325 int ret;
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345 tasklet_disable(&hdev->channel->callback_event);
3346
3347
3348 old_state = hbus->state;
3349 if (hbus->state == hv_pcibus_installed)
3350 hbus->state = hv_pcibus_removing;
3351
3352 tasklet_enable(&hdev->channel->callback_event);
3353
3354 if (old_state != hv_pcibus_installed)
3355 return -EINVAL;
3356
3357 flush_workqueue(hbus->wq);
3358
3359 ret = hv_pci_bus_exit(hdev, true);
3360 if (ret)
3361 return ret;
3362
3363 vmbus_close(hdev->channel);
3364
3365 return 0;
3366}
3367
3368static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
3369{
3370 struct msi_desc *entry;
3371 struct irq_data *irq_data;
3372
3373 for_each_pci_msi_entry(entry, pdev) {
3374 irq_data = irq_get_irq_data(entry->irq);
3375 if (WARN_ON_ONCE(!irq_data))
3376 return -EINVAL;
3377
3378 hv_compose_msi_msg(irq_data, &entry->msg);
3379 }
3380
3381 return 0;
3382}
3383
3384
3385
3386
3387
3388
3389
3390
3391static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus)
3392{
3393 pci_walk_bus(hbus->pci_bus, hv_pci_restore_msi_msg, NULL);
3394}
3395
3396static int hv_pci_resume(struct hv_device *hdev)
3397{
3398 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3399 enum pci_protocol_version_t version[1];
3400 int ret;
3401
3402 hbus->state = hv_pcibus_init;
3403
3404 ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
3405 hv_pci_onchannelcallback, hbus);
3406 if (ret)
3407 return ret;
3408
3409
3410 version[0] = hbus->protocol_version;
3411 ret = hv_pci_protocol_negotiation(hdev, version, 1);
3412 if (ret)
3413 goto out;
3414
3415 ret = hv_pci_query_relations(hdev);
3416 if (ret)
3417 goto out;
3418
3419 ret = hv_pci_enter_d0(hdev);
3420 if (ret)
3421 goto out;
3422
3423 ret = hv_send_resources_allocated(hdev);
3424 if (ret)
3425 goto out;
3426
3427 prepopulate_bars(hbus);
3428
3429 hv_pci_restore_msi_state(hbus);
3430
3431 hbus->state = hv_pcibus_installed;
3432 return 0;
3433out:
3434 vmbus_close(hdev->channel);
3435 return ret;
3436}
3437
3438static const struct hv_vmbus_device_id hv_pci_id_table[] = {
3439
3440
3441 { HV_PCIE_GUID, },
3442 { },
3443};
3444
3445MODULE_DEVICE_TABLE(vmbus, hv_pci_id_table);
3446
3447static struct hv_driver hv_pci_drv = {
3448 .name = "hv_pci",
3449 .id_table = hv_pci_id_table,
3450 .probe = hv_pci_probe,
3451 .remove = hv_pci_remove,
3452 .suspend = hv_pci_suspend,
3453 .resume = hv_pci_resume,
3454};
3455
3456static void __exit exit_hv_pci_drv(void)
3457{
3458 vmbus_driver_unregister(&hv_pci_drv);
3459
3460 hvpci_block_ops.read_block = NULL;
3461 hvpci_block_ops.write_block = NULL;
3462 hvpci_block_ops.reg_blk_invalidate = NULL;
3463}
3464
3465static int __init init_hv_pci_drv(void)
3466{
3467 if (!hv_is_hyperv_initialized())
3468 return -ENODEV;
3469
3470
3471 set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
3472
3473
3474 hvpci_block_ops.read_block = hv_read_config_block;
3475 hvpci_block_ops.write_block = hv_write_config_block;
3476 hvpci_block_ops.reg_blk_invalidate = hv_register_block_invalidate;
3477
3478 return vmbus_driver_register(&hv_pci_drv);
3479}
3480
3481module_init(init_hv_pci_drv);
3482module_exit(exit_hv_pci_drv);
3483
3484MODULE_DESCRIPTION("Hyper-V PCI");
3485MODULE_LICENSE("GPL v2");
3486