1
2
3
4
5
6
7
8
9
10
11#include <linux/module.h>
12#include <linux/vmalloc.h>
13#include <linux/hyperv.h>
14#include <net/sock.h>
15#include <net/af_vsock.h>
16#include <asm/hyperv-tlfs.h>
17
18
19
20
21
22
23#define RINGBUFFER_HVS_RCV_SIZE (HV_HYP_PAGE_SIZE * 6)
24#define RINGBUFFER_HVS_SND_SIZE (HV_HYP_PAGE_SIZE * 6)
25#define RINGBUFFER_HVS_MAX_SIZE (HV_HYP_PAGE_SIZE * 64)
26
27
28#define HVS_MTU_SIZE (1024 * 16)
29
30
31#define HVS_CLOSE_TIMEOUT (8 * HZ)
32
33struct vmpipe_proto_header {
34 u32 pkt_type;
35 u32 data_size;
36};
37
38
39
40
41struct hvs_recv_buf {
42
43 struct vmpipe_proto_header hdr;
44
45
46 u8 data[HVS_MTU_SIZE];
47};
48
49
50
51
52
53
54
55
56
57
58#define HVS_SEND_BUF_SIZE \
59 (HV_HYP_PAGE_SIZE - sizeof(struct vmpipe_proto_header))
60
61struct hvs_send_buf {
62
63 struct vmpipe_proto_header hdr;
64
65
66 u8 data[HVS_SEND_BUF_SIZE];
67};
68
69#define HVS_HEADER_LEN (sizeof(struct vmpacket_descriptor) + \
70 sizeof(struct vmpipe_proto_header))
71
72
73
74
75#define VMBUS_PKT_TRAILER_SIZE (sizeof(u64))
76
77#define HVS_PKT_LEN(payload_len) (HVS_HEADER_LEN + \
78 ALIGN((payload_len), 8) + \
79 VMBUS_PKT_TRAILER_SIZE)
80
81union hvs_service_id {
82 guid_t srv_id;
83
84 struct {
85 unsigned int svm_port;
86 unsigned char b[sizeof(guid_t) - sizeof(unsigned int)];
87 };
88};
89
90
91struct hvsock {
92 struct vsock_sock *vsk;
93
94 guid_t vm_srv_id;
95 guid_t host_srv_id;
96
97 struct vmbus_channel *chan;
98 struct vmpacket_descriptor *recv_desc;
99
100
101 u32 recv_data_len;
102
103 u32 recv_data_off;
104
105
106 bool fin_sent;
107};
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151static const guid_t srv_id_template =
152 GUID_INIT(0x00000000, 0xfacb, 0x11e6, 0xbd, 0x58,
153 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3);
154
155static bool hvs_check_transport(struct vsock_sock *vsk);
156
157static bool is_valid_srv_id(const guid_t *id)
158{
159 return !memcmp(&id->b[4], &srv_id_template.b[4], sizeof(guid_t) - 4);
160}
161
162static unsigned int get_port_by_srv_id(const guid_t *svr_id)
163{
164 return *((unsigned int *)svr_id);
165}
166
167static void hvs_addr_init(struct sockaddr_vm *addr, const guid_t *svr_id)
168{
169 unsigned int port = get_port_by_srv_id(svr_id);
170
171 vsock_addr_init(addr, VMADDR_CID_ANY, port);
172}
173
174static void hvs_set_channel_pending_send_size(struct vmbus_channel *chan)
175{
176 set_channel_pending_send_size(chan,
177 HVS_PKT_LEN(HVS_SEND_BUF_SIZE));
178
179 virt_mb();
180}
181
182static bool hvs_channel_readable(struct vmbus_channel *chan)
183{
184 u32 readable = hv_get_bytes_to_read(&chan->inbound);
185
186
187 return readable >= HVS_PKT_LEN(0);
188}
189
190static int hvs_channel_readable_payload(struct vmbus_channel *chan)
191{
192 u32 readable = hv_get_bytes_to_read(&chan->inbound);
193
194 if (readable > HVS_PKT_LEN(0)) {
195
196
197
198
199 return 1;
200 }
201
202 if (readable == HVS_PKT_LEN(0)) {
203
204 return 0;
205 }
206
207
208 return -1;
209}
210
211static size_t hvs_channel_writable_bytes(struct vmbus_channel *chan)
212{
213 u32 writeable = hv_get_bytes_to_write(&chan->outbound);
214 size_t ret;
215
216
217
218
219
220 if (writeable <= HVS_PKT_LEN(1) + HVS_PKT_LEN(0))
221 return 0;
222
223 ret = writeable - HVS_PKT_LEN(1) - HVS_PKT_LEN(0);
224
225 return round_down(ret, 8);
226}
227
228static int hvs_send_data(struct vmbus_channel *chan,
229 struct hvs_send_buf *send_buf, size_t to_write)
230{
231 send_buf->hdr.pkt_type = 1;
232 send_buf->hdr.data_size = to_write;
233 return vmbus_sendpacket(chan, &send_buf->hdr,
234 sizeof(send_buf->hdr) + to_write,
235 0, VM_PKT_DATA_INBAND, 0);
236}
237
238static void hvs_channel_cb(void *ctx)
239{
240 struct sock *sk = (struct sock *)ctx;
241 struct vsock_sock *vsk = vsock_sk(sk);
242 struct hvsock *hvs = vsk->trans;
243 struct vmbus_channel *chan = hvs->chan;
244
245 if (hvs_channel_readable(chan))
246 sk->sk_data_ready(sk);
247
248 if (hv_get_bytes_to_write(&chan->outbound) > 0)
249 sk->sk_write_space(sk);
250}
251
252static void hvs_do_close_lock_held(struct vsock_sock *vsk,
253 bool cancel_timeout)
254{
255 struct sock *sk = sk_vsock(vsk);
256
257 sock_set_flag(sk, SOCK_DONE);
258 vsk->peer_shutdown = SHUTDOWN_MASK;
259 if (vsock_stream_has_data(vsk) <= 0)
260 sk->sk_state = TCP_CLOSING;
261 sk->sk_state_change(sk);
262 if (vsk->close_work_scheduled &&
263 (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
264 vsk->close_work_scheduled = false;
265 vsock_remove_sock(vsk);
266
267
268 sock_put(sk);
269 }
270}
271
272static void hvs_close_connection(struct vmbus_channel *chan)
273{
274 struct sock *sk = get_per_channel_state(chan);
275
276 lock_sock(sk);
277 hvs_do_close_lock_held(vsock_sk(sk), true);
278 release_sock(sk);
279
280
281
282
283 sock_put(sk);
284}
285
286static void hvs_open_connection(struct vmbus_channel *chan)
287{
288 guid_t *if_instance, *if_type;
289 unsigned char conn_from_host;
290
291 struct sockaddr_vm addr;
292 struct sock *sk, *new = NULL;
293 struct vsock_sock *vnew = NULL;
294 struct hvsock *hvs = NULL;
295 struct hvsock *hvs_new = NULL;
296 int rcvbuf;
297 int ret;
298 int sndbuf;
299
300 if_type = &chan->offermsg.offer.if_type;
301 if_instance = &chan->offermsg.offer.if_instance;
302 conn_from_host = chan->offermsg.offer.u.pipe.user_def[0];
303 if (!is_valid_srv_id(if_type))
304 return;
305
306 hvs_addr_init(&addr, conn_from_host ? if_type : if_instance);
307 sk = vsock_find_bound_socket(&addr);
308 if (!sk)
309 return;
310
311 lock_sock(sk);
312 if ((conn_from_host && sk->sk_state != TCP_LISTEN) ||
313 (!conn_from_host && sk->sk_state != TCP_SYN_SENT))
314 goto out;
315
316 if (conn_from_host) {
317 if (sk->sk_ack_backlog >= sk->sk_max_ack_backlog)
318 goto out;
319
320 new = vsock_create_connected(sk);
321 if (!new)
322 goto out;
323
324 new->sk_state = TCP_SYN_SENT;
325 vnew = vsock_sk(new);
326
327 hvs_addr_init(&vnew->local_addr, if_type);
328
329
330 vsock_addr_init(&vnew->remote_addr,
331 VMADDR_CID_HOST, VMADDR_PORT_ANY);
332 vnew->remote_addr.svm_port = get_port_by_srv_id(if_instance);
333 ret = vsock_assign_transport(vnew, vsock_sk(sk));
334
335
336
337 if (ret || !hvs_check_transport(vnew)) {
338 sock_put(new);
339 goto out;
340 }
341 hvs_new = vnew->trans;
342 hvs_new->chan = chan;
343 } else {
344 hvs = vsock_sk(sk)->trans;
345 hvs->chan = chan;
346 }
347
348 set_channel_read_mode(chan, HV_CALL_DIRECT);
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363 if (vmbus_proto_version < VERSION_WIN10_V5) {
364 sndbuf = RINGBUFFER_HVS_SND_SIZE;
365 rcvbuf = RINGBUFFER_HVS_RCV_SIZE;
366 } else {
367 sndbuf = max_t(int, sk->sk_sndbuf, RINGBUFFER_HVS_SND_SIZE);
368 sndbuf = min_t(int, sndbuf, RINGBUFFER_HVS_MAX_SIZE);
369 sndbuf = ALIGN(sndbuf, HV_HYP_PAGE_SIZE);
370 rcvbuf = max_t(int, sk->sk_rcvbuf, RINGBUFFER_HVS_RCV_SIZE);
371 rcvbuf = min_t(int, rcvbuf, RINGBUFFER_HVS_MAX_SIZE);
372 rcvbuf = ALIGN(rcvbuf, HV_HYP_PAGE_SIZE);
373 }
374
375 ret = vmbus_open(chan, sndbuf, rcvbuf, NULL, 0, hvs_channel_cb,
376 conn_from_host ? new : sk);
377 if (ret != 0) {
378 if (conn_from_host) {
379 hvs_new->chan = NULL;
380 sock_put(new);
381 } else {
382 hvs->chan = NULL;
383 }
384 goto out;
385 }
386
387 set_per_channel_state(chan, conn_from_host ? new : sk);
388
389
390 sock_hold(conn_from_host ? new : sk);
391 vmbus_set_chn_rescind_callback(chan, hvs_close_connection);
392
393
394
395
396
397
398 hvs_set_channel_pending_send_size(chan);
399
400 if (conn_from_host) {
401 new->sk_state = TCP_ESTABLISHED;
402 sk_acceptq_added(sk);
403
404 hvs_new->vm_srv_id = *if_type;
405 hvs_new->host_srv_id = *if_instance;
406
407 vsock_insert_connected(vnew);
408
409 vsock_enqueue_accept(sk, new);
410 } else {
411 sk->sk_state = TCP_ESTABLISHED;
412 sk->sk_socket->state = SS_CONNECTED;
413
414 vsock_insert_connected(vsock_sk(sk));
415 }
416
417 sk->sk_state_change(sk);
418
419out:
420
421 sock_put(sk);
422
423 release_sock(sk);
424}
425
426static u32 hvs_get_local_cid(void)
427{
428 return VMADDR_CID_ANY;
429}
430
431static int hvs_sock_init(struct vsock_sock *vsk, struct vsock_sock *psk)
432{
433 struct hvsock *hvs;
434 struct sock *sk = sk_vsock(vsk);
435
436 hvs = kzalloc(sizeof(*hvs), GFP_KERNEL);
437 if (!hvs)
438 return -ENOMEM;
439
440 vsk->trans = hvs;
441 hvs->vsk = vsk;
442 sk->sk_sndbuf = RINGBUFFER_HVS_SND_SIZE;
443 sk->sk_rcvbuf = RINGBUFFER_HVS_RCV_SIZE;
444 return 0;
445}
446
447static int hvs_connect(struct vsock_sock *vsk)
448{
449 union hvs_service_id vm, host;
450 struct hvsock *h = vsk->trans;
451
452 vm.srv_id = srv_id_template;
453 vm.svm_port = vsk->local_addr.svm_port;
454 h->vm_srv_id = vm.srv_id;
455
456 host.srv_id = srv_id_template;
457 host.svm_port = vsk->remote_addr.svm_port;
458 h->host_srv_id = host.srv_id;
459
460 return vmbus_send_tl_connect_request(&h->vm_srv_id, &h->host_srv_id);
461}
462
463static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode)
464{
465 struct vmpipe_proto_header hdr;
466
467 if (hvs->fin_sent || !hvs->chan)
468 return;
469
470
471 (void)hvs_send_data(hvs->chan, (struct hvs_send_buf *)&hdr, 0);
472 hvs->fin_sent = true;
473}
474
475static int hvs_shutdown(struct vsock_sock *vsk, int mode)
476{
477 if (!(mode & SEND_SHUTDOWN))
478 return 0;
479
480 hvs_shutdown_lock_held(vsk->trans, mode);
481 return 0;
482}
483
484static void hvs_close_timeout(struct work_struct *work)
485{
486 struct vsock_sock *vsk =
487 container_of(work, struct vsock_sock, close_work.work);
488 struct sock *sk = sk_vsock(vsk);
489
490 sock_hold(sk);
491 lock_sock(sk);
492 if (!sock_flag(sk, SOCK_DONE))
493 hvs_do_close_lock_held(vsk, false);
494
495 vsk->close_work_scheduled = false;
496 release_sock(sk);
497 sock_put(sk);
498}
499
500
501static bool hvs_close_lock_held(struct vsock_sock *vsk)
502{
503 struct sock *sk = sk_vsock(vsk);
504
505 if (!(sk->sk_state == TCP_ESTABLISHED ||
506 sk->sk_state == TCP_CLOSING))
507 return true;
508
509 if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
510 hvs_shutdown_lock_held(vsk->trans, SHUTDOWN_MASK);
511
512 if (sock_flag(sk, SOCK_DONE))
513 return true;
514
515
516 sock_hold(sk);
517 INIT_DELAYED_WORK(&vsk->close_work, hvs_close_timeout);
518 vsk->close_work_scheduled = true;
519 schedule_delayed_work(&vsk->close_work, HVS_CLOSE_TIMEOUT);
520 return false;
521}
522
523static void hvs_release(struct vsock_sock *vsk)
524{
525 bool remove_sock;
526
527 remove_sock = hvs_close_lock_held(vsk);
528 if (remove_sock)
529 vsock_remove_sock(vsk);
530}
531
532static void hvs_destruct(struct vsock_sock *vsk)
533{
534 struct hvsock *hvs = vsk->trans;
535 struct vmbus_channel *chan = hvs->chan;
536
537 if (chan)
538 vmbus_hvsock_device_unregister(chan);
539
540 kfree(hvs);
541}
542
543static int hvs_dgram_bind(struct vsock_sock *vsk, struct sockaddr_vm *addr)
544{
545 return -EOPNOTSUPP;
546}
547
548static int hvs_dgram_dequeue(struct vsock_sock *vsk, struct msghdr *msg,
549 size_t len, int flags)
550{
551 return -EOPNOTSUPP;
552}
553
554static int hvs_dgram_enqueue(struct vsock_sock *vsk,
555 struct sockaddr_vm *remote, struct msghdr *msg,
556 size_t dgram_len)
557{
558 return -EOPNOTSUPP;
559}
560
561static bool hvs_dgram_allow(u32 cid, u32 port)
562{
563 return false;
564}
565
566static int hvs_update_recv_data(struct hvsock *hvs)
567{
568 struct hvs_recv_buf *recv_buf;
569 u32 payload_len;
570
571 recv_buf = (struct hvs_recv_buf *)(hvs->recv_desc + 1);
572 payload_len = recv_buf->hdr.data_size;
573
574 if (payload_len > HVS_MTU_SIZE)
575 return -EIO;
576
577 if (payload_len == 0)
578 hvs->vsk->peer_shutdown |= SEND_SHUTDOWN;
579
580 hvs->recv_data_len = payload_len;
581 hvs->recv_data_off = 0;
582
583 return 0;
584}
585
586static ssize_t hvs_stream_dequeue(struct vsock_sock *vsk, struct msghdr *msg,
587 size_t len, int flags)
588{
589 struct hvsock *hvs = vsk->trans;
590 bool need_refill = !hvs->recv_desc;
591 struct hvs_recv_buf *recv_buf;
592 u32 to_read;
593 int ret;
594
595 if (flags & MSG_PEEK)
596 return -EOPNOTSUPP;
597
598 if (need_refill) {
599 hvs->recv_desc = hv_pkt_iter_first_raw(hvs->chan);
600 ret = hvs_update_recv_data(hvs);
601 if (ret)
602 return ret;
603 }
604
605 recv_buf = (struct hvs_recv_buf *)(hvs->recv_desc + 1);
606 to_read = min_t(u32, len, hvs->recv_data_len);
607 ret = memcpy_to_msg(msg, recv_buf->data + hvs->recv_data_off, to_read);
608 if (ret != 0)
609 return ret;
610
611 hvs->recv_data_len -= to_read;
612 if (hvs->recv_data_len == 0) {
613 hvs->recv_desc = hv_pkt_iter_next_raw(hvs->chan, hvs->recv_desc);
614 if (hvs->recv_desc) {
615 ret = hvs_update_recv_data(hvs);
616 if (ret)
617 return ret;
618 }
619 } else {
620 hvs->recv_data_off += to_read;
621 }
622
623 return to_read;
624}
625
626static ssize_t hvs_stream_enqueue(struct vsock_sock *vsk, struct msghdr *msg,
627 size_t len)
628{
629 struct hvsock *hvs = vsk->trans;
630 struct vmbus_channel *chan = hvs->chan;
631 struct hvs_send_buf *send_buf;
632 ssize_t to_write, max_writable;
633 ssize_t ret = 0;
634 ssize_t bytes_written = 0;
635
636 BUILD_BUG_ON(sizeof(*send_buf) != HV_HYP_PAGE_SIZE);
637
638 send_buf = kmalloc(sizeof(*send_buf), GFP_KERNEL);
639 if (!send_buf)
640 return -ENOMEM;
641
642
643
644
645
646 while (len) {
647 max_writable = hvs_channel_writable_bytes(chan);
648 if (!max_writable)
649 break;
650 to_write = min_t(ssize_t, len, max_writable);
651 to_write = min_t(ssize_t, to_write, HVS_SEND_BUF_SIZE);
652
653
654
655 ret = memcpy_from_msg(send_buf->data, msg, to_write);
656 if (ret < 0)
657 goto out;
658
659 ret = hvs_send_data(hvs->chan, send_buf, to_write);
660 if (ret < 0)
661 goto out;
662
663 bytes_written += to_write;
664 len -= to_write;
665 }
666out:
667
668 if (bytes_written)
669 ret = bytes_written;
670 kfree(send_buf);
671 return ret;
672}
673
674static s64 hvs_stream_has_data(struct vsock_sock *vsk)
675{
676 struct hvsock *hvs = vsk->trans;
677 s64 ret;
678
679 if (hvs->recv_data_len > 0)
680 return 1;
681
682 switch (hvs_channel_readable_payload(hvs->chan)) {
683 case 1:
684 ret = 1;
685 break;
686 case 0:
687 vsk->peer_shutdown |= SEND_SHUTDOWN;
688 ret = 0;
689 break;
690 default:
691 ret = 0;
692 break;
693 }
694
695 return ret;
696}
697
698static s64 hvs_stream_has_space(struct vsock_sock *vsk)
699{
700 struct hvsock *hvs = vsk->trans;
701
702 return hvs_channel_writable_bytes(hvs->chan);
703}
704
705static u64 hvs_stream_rcvhiwat(struct vsock_sock *vsk)
706{
707 return HVS_MTU_SIZE + 1;
708}
709
710static bool hvs_stream_is_active(struct vsock_sock *vsk)
711{
712 struct hvsock *hvs = vsk->trans;
713
714 return hvs->chan != NULL;
715}
716
717static bool hvs_stream_allow(u32 cid, u32 port)
718{
719 if (cid == VMADDR_CID_HOST)
720 return true;
721
722 return false;
723}
724
725static
726int hvs_notify_poll_in(struct vsock_sock *vsk, size_t target, bool *readable)
727{
728 struct hvsock *hvs = vsk->trans;
729
730 *readable = hvs_channel_readable(hvs->chan);
731 return 0;
732}
733
734static
735int hvs_notify_poll_out(struct vsock_sock *vsk, size_t target, bool *writable)
736{
737 *writable = hvs_stream_has_space(vsk) > 0;
738
739 return 0;
740}
741
742static
743int hvs_notify_recv_init(struct vsock_sock *vsk, size_t target,
744 struct vsock_transport_recv_notify_data *d)
745{
746 return 0;
747}
748
749static
750int hvs_notify_recv_pre_block(struct vsock_sock *vsk, size_t target,
751 struct vsock_transport_recv_notify_data *d)
752{
753 return 0;
754}
755
756static
757int hvs_notify_recv_pre_dequeue(struct vsock_sock *vsk, size_t target,
758 struct vsock_transport_recv_notify_data *d)
759{
760 return 0;
761}
762
763static
764int hvs_notify_recv_post_dequeue(struct vsock_sock *vsk, size_t target,
765 ssize_t copied, bool data_read,
766 struct vsock_transport_recv_notify_data *d)
767{
768 return 0;
769}
770
771static
772int hvs_notify_send_init(struct vsock_sock *vsk,
773 struct vsock_transport_send_notify_data *d)
774{
775 return 0;
776}
777
778static
779int hvs_notify_send_pre_block(struct vsock_sock *vsk,
780 struct vsock_transport_send_notify_data *d)
781{
782 return 0;
783}
784
785static
786int hvs_notify_send_pre_enqueue(struct vsock_sock *vsk,
787 struct vsock_transport_send_notify_data *d)
788{
789 return 0;
790}
791
792static
793int hvs_notify_send_post_enqueue(struct vsock_sock *vsk, ssize_t written,
794 struct vsock_transport_send_notify_data *d)
795{
796 return 0;
797}
798
799static struct vsock_transport hvs_transport = {
800 .module = THIS_MODULE,
801
802 .get_local_cid = hvs_get_local_cid,
803
804 .init = hvs_sock_init,
805 .destruct = hvs_destruct,
806 .release = hvs_release,
807 .connect = hvs_connect,
808 .shutdown = hvs_shutdown,
809
810 .dgram_bind = hvs_dgram_bind,
811 .dgram_dequeue = hvs_dgram_dequeue,
812 .dgram_enqueue = hvs_dgram_enqueue,
813 .dgram_allow = hvs_dgram_allow,
814
815 .stream_dequeue = hvs_stream_dequeue,
816 .stream_enqueue = hvs_stream_enqueue,
817 .stream_has_data = hvs_stream_has_data,
818 .stream_has_space = hvs_stream_has_space,
819 .stream_rcvhiwat = hvs_stream_rcvhiwat,
820 .stream_is_active = hvs_stream_is_active,
821 .stream_allow = hvs_stream_allow,
822
823 .notify_poll_in = hvs_notify_poll_in,
824 .notify_poll_out = hvs_notify_poll_out,
825 .notify_recv_init = hvs_notify_recv_init,
826 .notify_recv_pre_block = hvs_notify_recv_pre_block,
827 .notify_recv_pre_dequeue = hvs_notify_recv_pre_dequeue,
828 .notify_recv_post_dequeue = hvs_notify_recv_post_dequeue,
829 .notify_send_init = hvs_notify_send_init,
830 .notify_send_pre_block = hvs_notify_send_pre_block,
831 .notify_send_pre_enqueue = hvs_notify_send_pre_enqueue,
832 .notify_send_post_enqueue = hvs_notify_send_post_enqueue,
833
834};
835
836static bool hvs_check_transport(struct vsock_sock *vsk)
837{
838 return vsk->transport == &hvs_transport;
839}
840
841static int hvs_probe(struct hv_device *hdev,
842 const struct hv_vmbus_device_id *dev_id)
843{
844 struct vmbus_channel *chan = hdev->channel;
845
846 hvs_open_connection(chan);
847
848
849
850
851
852
853 return 0;
854}
855
856static int hvs_remove(struct hv_device *hdev)
857{
858 struct vmbus_channel *chan = hdev->channel;
859
860 vmbus_close(chan);
861
862 return 0;
863}
864
865
866
867
868
869
870
871static int hvs_suspend(struct hv_device *hv_dev)
872{
873
874 return 0;
875}
876
877static int hvs_resume(struct hv_device *dev)
878{
879
880 return 0;
881}
882
883
884static const struct hv_vmbus_device_id id_table[] = {
885 {},
886};
887
888static struct hv_driver hvs_drv = {
889 .name = "hv_sock",
890 .hvsock = true,
891 .id_table = id_table,
892 .probe = hvs_probe,
893 .remove = hvs_remove,
894 .suspend = hvs_suspend,
895 .resume = hvs_resume,
896};
897
898static int __init hvs_init(void)
899{
900 int ret;
901
902 if (vmbus_proto_version < VERSION_WIN10)
903 return -ENODEV;
904
905 ret = vmbus_driver_register(&hvs_drv);
906 if (ret != 0)
907 return ret;
908
909 ret = vsock_core_register(&hvs_transport, VSOCK_TRANSPORT_F_G2H);
910 if (ret) {
911 vmbus_driver_unregister(&hvs_drv);
912 return ret;
913 }
914
915 return 0;
916}
917
918static void __exit hvs_exit(void)
919{
920 vsock_core_unregister(&hvs_transport);
921 vmbus_driver_unregister(&hvs_drv);
922}
923
924module_init(hvs_init);
925module_exit(hvs_exit);
926
927MODULE_DESCRIPTION("Hyper-V Sockets");
928MODULE_VERSION("1.0.0");
929MODULE_LICENSE("GPL");
930MODULE_ALIAS_NETPROTO(PF_VSOCK);
931