1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/slab.h>
34#include <linux/types.h>
35#include <linux/vmalloc.h>
36#include <asm/xen/hypervisor.h>
37#include <xen/interface/xen.h>
38#include <xen/interface/event_channel.h>
39#include <xen/events.h>
40#include <xen/grant_table.h>
41#include <xen/xenbus.h>
42
43const char *xenbus_strstate(enum xenbus_state state)
44{
45 static const char *const name[] = {
46 [ XenbusStateUnknown ] = "Unknown",
47 [ XenbusStateInitialising ] = "Initialising",
48 [ XenbusStateInitWait ] = "InitWait",
49 [ XenbusStateInitialised ] = "Initialised",
50 [ XenbusStateConnected ] = "Connected",
51 [ XenbusStateClosing ] = "Closing",
52 [ XenbusStateClosed ] = "Closed",
53 [XenbusStateReconfiguring] = "Reconfiguring",
54 [XenbusStateReconfigured] = "Reconfigured",
55 };
56 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
57}
58EXPORT_SYMBOL_GPL(xenbus_strstate);
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74int xenbus_watch_path(struct xenbus_device *dev, const char *path,
75 struct xenbus_watch *watch,
76 void (*callback)(struct xenbus_watch *,
77 const char **, unsigned int))
78{
79 int err;
80
81 watch->node = path;
82 watch->callback = callback;
83
84 err = register_xenbus_watch(watch);
85
86 if (err) {
87 watch->node = NULL;
88 watch->callback = NULL;
89 xenbus_dev_fatal(dev, err, "adding watch on %s", path);
90 }
91
92 return err;
93}
94EXPORT_SYMBOL_GPL(xenbus_watch_path);
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112int xenbus_watch_pathfmt(struct xenbus_device *dev,
113 struct xenbus_watch *watch,
114 void (*callback)(struct xenbus_watch *,
115 const char **, unsigned int),
116 const char *pathfmt, ...)
117{
118 int err;
119 va_list ap;
120 char *path;
121
122 va_start(ap, pathfmt);
123 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
124 va_end(ap);
125
126 if (!path) {
127 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
128 return -ENOMEM;
129 }
130 err = xenbus_watch_path(dev, path, watch, callback);
131
132 if (err)
133 kfree(path);
134 return err;
135}
136EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
137
138static void xenbus_switch_fatal(struct xenbus_device *, int, int,
139 const char *, ...);
140
141static int
142__xenbus_switch_state(struct xenbus_device *dev,
143 enum xenbus_state state, int depth)
144{
145
146
147
148
149
150
151
152
153
154
155
156
157
158 struct xenbus_transaction xbt;
159 int current_state;
160 int err, abort;
161
162 if (state == dev->state)
163 return 0;
164
165again:
166 abort = 1;
167
168 err = xenbus_transaction_start(&xbt);
169 if (err) {
170 xenbus_switch_fatal(dev, depth, err, "starting transaction");
171 return 0;
172 }
173
174 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state);
175 if (err != 1)
176 goto abort;
177
178 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
179 if (err) {
180 xenbus_switch_fatal(dev, depth, err, "writing new state");
181 goto abort;
182 }
183
184 abort = 0;
185abort:
186 err = xenbus_transaction_end(xbt, abort);
187 if (err) {
188 if (err == -EAGAIN && !abort)
189 goto again;
190 xenbus_switch_fatal(dev, depth, err, "ending transaction");
191 } else
192 dev->state = state;
193
194 return 0;
195}
196
197
198
199
200
201
202
203
204
205
206int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
207{
208 return __xenbus_switch_state(dev, state, 0);
209}
210
211EXPORT_SYMBOL_GPL(xenbus_switch_state);
212
213int xenbus_frontend_closed(struct xenbus_device *dev)
214{
215 xenbus_switch_state(dev, XenbusStateClosed);
216 complete(&dev->down);
217 return 0;
218}
219EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
220
221
222
223
224
225static char *error_path(struct xenbus_device *dev)
226{
227 return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
228}
229
230
231static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
232 const char *fmt, va_list ap)
233{
234 int ret;
235 unsigned int len;
236 char *printf_buffer = NULL;
237 char *path_buffer = NULL;
238
239#define PRINTF_BUFFER_SIZE 4096
240 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
241 if (printf_buffer == NULL)
242 goto fail;
243
244 len = sprintf(printf_buffer, "%i ", -err);
245 ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
246
247 BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
248
249 dev_err(&dev->dev, "%s\n", printf_buffer);
250
251 path_buffer = error_path(dev);
252
253 if (path_buffer == NULL) {
254 dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
255 dev->nodename, printf_buffer);
256 goto fail;
257 }
258
259 if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
260 dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
261 dev->nodename, printf_buffer);
262 goto fail;
263 }
264
265fail:
266 kfree(printf_buffer);
267 kfree(path_buffer);
268}
269
270
271
272
273
274
275
276
277
278
279
280void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
281{
282 va_list ap;
283
284 va_start(ap, fmt);
285 xenbus_va_dev_error(dev, err, fmt, ap);
286 va_end(ap);
287}
288EXPORT_SYMBOL_GPL(xenbus_dev_error);
289
290
291
292
293
294
295
296
297
298
299
300
301void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
302{
303 va_list ap;
304
305 va_start(ap, fmt);
306 xenbus_va_dev_error(dev, err, fmt, ap);
307 va_end(ap);
308
309 xenbus_switch_state(dev, XenbusStateClosing);
310}
311EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
312
313
314
315
316
317static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
318 const char *fmt, ...)
319{
320 va_list ap;
321
322 va_start(ap, fmt);
323 xenbus_va_dev_error(dev, err, fmt, ap);
324 va_end(ap);
325
326 if (!depth)
327 __xenbus_switch_state(dev, XenbusStateClosing, 1);
328}
329
330
331
332
333
334
335
336
337
338
339int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
340{
341 int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
342 if (err < 0)
343 xenbus_dev_fatal(dev, err, "granting access to ring page");
344 return err;
345}
346EXPORT_SYMBOL_GPL(xenbus_grant_ring);
347
348
349
350
351
352
353
354
355int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
356{
357 struct evtchn_alloc_unbound alloc_unbound;
358 int err;
359
360 alloc_unbound.dom = DOMID_SELF;
361 alloc_unbound.remote_dom = dev->otherend_id;
362
363 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
364 &alloc_unbound);
365 if (err)
366 xenbus_dev_fatal(dev, err, "allocating event channel");
367 else
368 *port = alloc_unbound.port;
369
370 return err;
371}
372EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
373
374
375
376
377
378
379
380int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
381{
382 struct evtchn_bind_interdomain bind_interdomain;
383 int err;
384
385 bind_interdomain.remote_dom = dev->otherend_id;
386 bind_interdomain.remote_port = remote_port;
387
388 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
389 &bind_interdomain);
390 if (err)
391 xenbus_dev_fatal(dev, err,
392 "binding to event channel %d from domain %d",
393 remote_port, dev->otherend_id);
394 else
395 *port = bind_interdomain.local_port;
396
397 return err;
398}
399EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
400
401
402
403
404
405int xenbus_free_evtchn(struct xenbus_device *dev, int port)
406{
407 struct evtchn_close close;
408 int err;
409
410 close.port = port;
411
412 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
413 if (err)
414 xenbus_dev_error(dev, err, "freeing event channel %d", port);
415
416 return err;
417}
418EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
436{
437 struct gnttab_map_grant_ref op = {
438 .flags = GNTMAP_host_map,
439 .ref = gnt_ref,
440 .dom = dev->otherend_id,
441 };
442 struct vm_struct *area;
443
444 *vaddr = NULL;
445
446 area = xen_alloc_vm_area(PAGE_SIZE);
447 if (!area)
448 return -ENOMEM;
449
450 op.host_addr = (unsigned long)area->addr;
451
452 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
453 BUG();
454
455 if (op.status != GNTST_okay) {
456 xen_free_vm_area(area);
457 xenbus_dev_fatal(dev, op.status,
458 "mapping in shared page %d from domain %d",
459 gnt_ref, dev->otherend_id);
460 return op.status;
461 }
462
463
464 area->phys_addr = (unsigned long)op.handle;
465
466 *vaddr = area->addr;
467 return 0;
468}
469EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
487 grant_handle_t *handle, void *vaddr)
488{
489 struct gnttab_map_grant_ref op = {
490 .host_addr = (unsigned long)vaddr,
491 .flags = GNTMAP_host_map,
492 .ref = gnt_ref,
493 .dom = dev->otherend_id,
494 };
495
496 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
497 BUG();
498
499 if (op.status != GNTST_okay) {
500 xenbus_dev_fatal(dev, op.status,
501 "mapping in shared page %d from domain %d",
502 gnt_ref, dev->otherend_id);
503 } else
504 *handle = op.handle;
505
506 return op.status;
507}
508EXPORT_SYMBOL_GPL(xenbus_map_ring);
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
524{
525 struct vm_struct *area;
526 struct gnttab_unmap_grant_ref op = {
527 .host_addr = (unsigned long)vaddr,
528 };
529
530
531
532
533
534
535
536 read_lock(&vmlist_lock);
537 for (area = vmlist; area != NULL; area = area->next) {
538 if (area->addr == vaddr)
539 break;
540 }
541 read_unlock(&vmlist_lock);
542
543 if (!area) {
544 xenbus_dev_error(dev, -ENOENT,
545 "can't find mapped virtual address %p", vaddr);
546 return GNTST_bad_virt_addr;
547 }
548
549 op.handle = (grant_handle_t)area->phys_addr;
550
551 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
552 BUG();
553
554 if (op.status == GNTST_okay)
555 xen_free_vm_area(area);
556 else
557 xenbus_dev_error(dev, op.status,
558 "unmapping page at handle %d error %d",
559 (int16_t)area->phys_addr, op.status);
560
561 return op.status;
562}
563EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
564
565
566
567
568
569
570
571
572
573
574
575
576int xenbus_unmap_ring(struct xenbus_device *dev,
577 grant_handle_t handle, void *vaddr)
578{
579 struct gnttab_unmap_grant_ref op = {
580 .host_addr = (unsigned long)vaddr,
581 .handle = handle,
582 };
583
584 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
585 BUG();
586
587 if (op.status != GNTST_okay)
588 xenbus_dev_error(dev, op.status,
589 "unmapping page at handle %d error %d",
590 handle, op.status);
591
592 return op.status;
593}
594EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
595
596
597
598
599
600
601
602
603
604enum xenbus_state xenbus_read_driver_state(const char *path)
605{
606 enum xenbus_state result;
607 int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
608 if (err)
609 result = XenbusStateUnknown;
610
611 return result;
612}
613EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
614