1
2
3
4
5
6
7#include <linux/delay.h>
8#include <linux/device.h>
9#include <linux/dma-direction.h>
10#include <linux/dma-mapping.h>
11#include <linux/interrupt.h>
12#include <linux/list.h>
13#include <linux/mhi.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/wait.h>
17#include "internal.h"
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45static struct mhi_pm_transitions const dev_state_transitions[] = {
46
47 {
48 MHI_PM_DISABLE,
49 MHI_PM_POR
50 },
51 {
52 MHI_PM_POR,
53 MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
54 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
55 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
56 },
57 {
58 MHI_PM_M0,
59 MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
60 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
61 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
62 },
63 {
64 MHI_PM_M2,
65 MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
66 MHI_PM_LD_ERR_FATAL_DETECT
67 },
68 {
69 MHI_PM_M3_ENTER,
70 MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
71 MHI_PM_LD_ERR_FATAL_DETECT
72 },
73 {
74 MHI_PM_M3,
75 MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
76 MHI_PM_LD_ERR_FATAL_DETECT
77 },
78 {
79 MHI_PM_M3_EXIT,
80 MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
81 MHI_PM_LD_ERR_FATAL_DETECT
82 },
83 {
84 MHI_PM_FW_DL_ERR,
85 MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
86 MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
87 },
88
89 {
90 MHI_PM_SYS_ERR_DETECT,
91 MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
92 MHI_PM_LD_ERR_FATAL_DETECT
93 },
94 {
95 MHI_PM_SYS_ERR_PROCESS,
96 MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
97 MHI_PM_LD_ERR_FATAL_DETECT
98 },
99
100 {
101 MHI_PM_SHUTDOWN_PROCESS,
102 MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
103 },
104
105 {
106 MHI_PM_LD_ERR_FATAL_DETECT,
107 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE
108 },
109};
110
111enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
112 enum mhi_pm_state state)
113{
114 unsigned long cur_state = mhi_cntrl->pm_state;
115 int index = find_last_bit(&cur_state, 32);
116
117 if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
118 return cur_state;
119
120 if (unlikely(dev_state_transitions[index].from_state != cur_state))
121 return cur_state;
122
123 if (unlikely(!(dev_state_transitions[index].to_states & state)))
124 return cur_state;
125
126 mhi_cntrl->pm_state = state;
127 return mhi_cntrl->pm_state;
128}
129
130void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
131{
132 if (state == MHI_STATE_RESET) {
133 mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
134 MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1);
135 } else {
136 mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
137 MHICTRL_MHISTATE_MASK,
138 MHICTRL_MHISTATE_SHIFT, state);
139 }
140}
141
142
143static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
144{
145}
146
147static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
148{
149 mhi_cntrl->wake_get(mhi_cntrl, false);
150 mhi_cntrl->wake_put(mhi_cntrl, true);
151}
152
153
154int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
155{
156 struct mhi_event *mhi_event;
157 enum mhi_pm_state cur_state;
158 struct device *dev = &mhi_cntrl->mhi_dev->dev;
159 u32 interval_us = 25000;
160 int ret, i;
161
162
163 if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
164 dev_err(dev, "Device link is not accessible\n");
165 return -EIO;
166 }
167
168
169 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
170 MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0,
171 interval_us);
172 if (ret) {
173 dev_err(dev, "Device failed to clear MHI Reset\n");
174 return ret;
175 }
176
177 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
178 MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT, 1,
179 interval_us);
180 if (ret) {
181 dev_err(dev, "Device failed to enter MHI Ready\n");
182 return ret;
183 }
184
185 dev_dbg(dev, "Device in READY State\n");
186 write_lock_irq(&mhi_cntrl->pm_lock);
187 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
188 mhi_cntrl->dev_state = MHI_STATE_READY;
189 write_unlock_irq(&mhi_cntrl->pm_lock);
190
191 if (cur_state != MHI_PM_POR) {
192 dev_err(dev, "Error moving to state %s from %s\n",
193 to_mhi_pm_state_str(MHI_PM_POR),
194 to_mhi_pm_state_str(cur_state));
195 return -EIO;
196 }
197
198 read_lock_bh(&mhi_cntrl->pm_lock);
199 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
200 dev_err(dev, "Device registers not accessible\n");
201 goto error_mmio;
202 }
203
204
205 ret = mhi_init_mmio(mhi_cntrl);
206 if (ret) {
207 dev_err(dev, "Error configuring MMIO registers\n");
208 goto error_mmio;
209 }
210
211
212 mhi_event = mhi_cntrl->mhi_event;
213 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
214 struct mhi_ring *ring = &mhi_event->ring;
215
216
217 if (mhi_event->offload_ev || mhi_event->hw_ring)
218 continue;
219
220 ring->wp = ring->base + ring->len - ring->el_size;
221 *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
222
223 smp_wmb();
224
225
226 spin_lock_irq(&mhi_event->lock);
227 mhi_ring_er_db(mhi_event);
228 spin_unlock_irq(&mhi_event->lock);
229 }
230
231
232 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
233 read_unlock_bh(&mhi_cntrl->pm_lock);
234
235 return 0;
236
237error_mmio:
238 read_unlock_bh(&mhi_cntrl->pm_lock);
239
240 return -EIO;
241}
242
243int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
244{
245 enum mhi_pm_state cur_state;
246 struct mhi_chan *mhi_chan;
247 struct device *dev = &mhi_cntrl->mhi_dev->dev;
248 int i;
249
250 write_lock_irq(&mhi_cntrl->pm_lock);
251 mhi_cntrl->dev_state = MHI_STATE_M0;
252 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
253 write_unlock_irq(&mhi_cntrl->pm_lock);
254 if (unlikely(cur_state != MHI_PM_M0)) {
255 dev_err(dev, "Unable to transition to M0 state\n");
256 return -EIO;
257 }
258 mhi_cntrl->M0++;
259
260
261 read_lock_bh(&mhi_cntrl->pm_lock);
262 mhi_cntrl->wake_get(mhi_cntrl, true);
263
264
265 if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
266 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
267 struct mhi_cmd *mhi_cmd =
268 &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
269
270 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
271 if (mhi_event->offload_ev)
272 continue;
273
274 spin_lock_irq(&mhi_event->lock);
275 mhi_ring_er_db(mhi_event);
276 spin_unlock_irq(&mhi_event->lock);
277 }
278
279
280 spin_lock_irq(&mhi_cmd->lock);
281 if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
282 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
283 spin_unlock_irq(&mhi_cmd->lock);
284 }
285
286
287 mhi_chan = mhi_cntrl->mhi_chan;
288 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
289 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
290
291 if (mhi_chan->db_cfg.reset_req) {
292 write_lock_irq(&mhi_chan->lock);
293 mhi_chan->db_cfg.db_mode = true;
294 write_unlock_irq(&mhi_chan->lock);
295 }
296
297 read_lock_irq(&mhi_chan->lock);
298
299
300 if (tre_ring->base && tre_ring->wp != tre_ring->rp)
301 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
302 read_unlock_irq(&mhi_chan->lock);
303 }
304
305 mhi_cntrl->wake_put(mhi_cntrl, false);
306 read_unlock_bh(&mhi_cntrl->pm_lock);
307 wake_up_all(&mhi_cntrl->state_event);
308
309 return 0;
310}
311
312
313
314
315
316
317void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
318{
319 enum mhi_pm_state state;
320 struct device *dev = &mhi_cntrl->mhi_dev->dev;
321
322 write_lock_irq(&mhi_cntrl->pm_lock);
323 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
324 if (state == MHI_PM_M2) {
325 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
326 mhi_cntrl->dev_state = MHI_STATE_M2;
327
328 write_unlock_irq(&mhi_cntrl->pm_lock);
329
330 mhi_cntrl->M2++;
331 wake_up_all(&mhi_cntrl->state_event);
332
333
334 if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
335 atomic_read(&mhi_cntrl->dev_wake))) {
336 dev_dbg(dev,
337 "Exiting M2, pending_pkts: %d dev_wake: %d\n",
338 atomic_read(&mhi_cntrl->pending_pkts),
339 atomic_read(&mhi_cntrl->dev_wake));
340 read_lock_bh(&mhi_cntrl->pm_lock);
341 mhi_cntrl->wake_get(mhi_cntrl, true);
342 mhi_cntrl->wake_put(mhi_cntrl, true);
343 read_unlock_bh(&mhi_cntrl->pm_lock);
344 } else {
345 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
346 }
347 } else {
348 write_unlock_irq(&mhi_cntrl->pm_lock);
349 }
350}
351
352
353int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
354{
355 enum mhi_pm_state state;
356 struct device *dev = &mhi_cntrl->mhi_dev->dev;
357
358 write_lock_irq(&mhi_cntrl->pm_lock);
359 mhi_cntrl->dev_state = MHI_STATE_M3;
360 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
361 write_unlock_irq(&mhi_cntrl->pm_lock);
362 if (state != MHI_PM_M3) {
363 dev_err(dev, "Unable to transition to M3 state\n");
364 return -EIO;
365 }
366
367 mhi_cntrl->M3++;
368 wake_up_all(&mhi_cntrl->state_event);
369
370 return 0;
371}
372
373
374static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
375{
376 struct mhi_event *mhi_event;
377 struct device *dev = &mhi_cntrl->mhi_dev->dev;
378 enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
379 int i, ret;
380
381 dev_dbg(dev, "Processing Mission Mode transition\n");
382
383 write_lock_irq(&mhi_cntrl->pm_lock);
384 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
385 ee = mhi_get_exec_env(mhi_cntrl);
386
387 if (!MHI_IN_MISSION_MODE(ee)) {
388 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
389 write_unlock_irq(&mhi_cntrl->pm_lock);
390 wake_up_all(&mhi_cntrl->state_event);
391 return -EIO;
392 }
393 mhi_cntrl->ee = ee;
394 write_unlock_irq(&mhi_cntrl->pm_lock);
395
396 wake_up_all(&mhi_cntrl->state_event);
397
398 device_for_each_child(&mhi_cntrl->mhi_dev->dev, ¤t_ee,
399 mhi_destroy_device);
400 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
401
402
403 ret = __mhi_device_get_sync(mhi_cntrl);
404 if (ret)
405 return ret;
406
407 read_lock_bh(&mhi_cntrl->pm_lock);
408
409 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
410 ret = -EIO;
411 goto error_mission_mode;
412 }
413
414
415 mhi_event = mhi_cntrl->mhi_event;
416 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
417 struct mhi_ring *ring = &mhi_event->ring;
418
419 if (mhi_event->offload_ev || !mhi_event->hw_ring)
420 continue;
421
422 ring->wp = ring->base + ring->len - ring->el_size;
423 *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
424
425 smp_wmb();
426
427 spin_lock_irq(&mhi_event->lock);
428 if (MHI_DB_ACCESS_VALID(mhi_cntrl))
429 mhi_ring_er_db(mhi_event);
430 spin_unlock_irq(&mhi_event->lock);
431 }
432
433 read_unlock_bh(&mhi_cntrl->pm_lock);
434
435
436
437
438
439 mhi_create_devices(mhi_cntrl);
440
441 read_lock_bh(&mhi_cntrl->pm_lock);
442
443error_mission_mode:
444 mhi_cntrl->wake_put(mhi_cntrl, false);
445 read_unlock_bh(&mhi_cntrl->pm_lock);
446
447 return ret;
448}
449
450
451static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
452{
453 enum mhi_pm_state cur_state;
454 struct mhi_event *mhi_event;
455 struct mhi_cmd_ctxt *cmd_ctxt;
456 struct mhi_cmd *mhi_cmd;
457 struct mhi_event_ctxt *er_ctxt;
458 struct device *dev = &mhi_cntrl->mhi_dev->dev;
459 int ret, i;
460
461 dev_dbg(dev, "Processing disable transition with PM state: %s\n",
462 to_mhi_pm_state_str(mhi_cntrl->pm_state));
463
464 mutex_lock(&mhi_cntrl->pm_mutex);
465
466
467 if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
468 dev_dbg(dev, "Triggering MHI Reset in device\n");
469 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
470
471
472 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
473 MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0,
474 25000);
475 if (ret)
476 dev_err(dev, "Device failed to clear MHI Reset\n");
477
478
479
480
481
482 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
483 }
484
485 dev_dbg(dev,
486 "Waiting for all pending event ring processing to complete\n");
487 mhi_event = mhi_cntrl->mhi_event;
488 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
489 if (mhi_event->offload_ev)
490 continue;
491 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
492 tasklet_kill(&mhi_event->task);
493 }
494
495
496 mutex_unlock(&mhi_cntrl->pm_mutex);
497 dev_dbg(dev, "Waiting for all pending threads to complete\n");
498 wake_up_all(&mhi_cntrl->state_event);
499
500 dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
501 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
502
503 mutex_lock(&mhi_cntrl->pm_mutex);
504
505 WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
506 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
507
508
509 dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
510 mhi_cmd = mhi_cntrl->mhi_cmd;
511 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
512 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
513 struct mhi_ring *ring = &mhi_cmd->ring;
514
515 ring->rp = ring->base;
516 ring->wp = ring->base;
517 cmd_ctxt->rp = cmd_ctxt->rbase;
518 cmd_ctxt->wp = cmd_ctxt->rbase;
519 }
520
521 mhi_event = mhi_cntrl->mhi_event;
522 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
523 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
524 mhi_event++) {
525 struct mhi_ring *ring = &mhi_event->ring;
526
527
528 if (mhi_event->offload_ev)
529 continue;
530
531 ring->rp = ring->base;
532 ring->wp = ring->base;
533 er_ctxt->rp = er_ctxt->rbase;
534 er_ctxt->wp = er_ctxt->rbase;
535 }
536
537
538 write_lock_irq(&mhi_cntrl->pm_lock);
539 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
540 write_unlock_irq(&mhi_cntrl->pm_lock);
541 if (unlikely(cur_state != MHI_PM_DISABLE))
542 dev_err(dev, "Error moving from PM state: %s to: %s\n",
543 to_mhi_pm_state_str(cur_state),
544 to_mhi_pm_state_str(MHI_PM_DISABLE));
545
546 dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
547 to_mhi_pm_state_str(mhi_cntrl->pm_state),
548 TO_MHI_STATE_STR(mhi_cntrl->dev_state));
549
550 mutex_unlock(&mhi_cntrl->pm_mutex);
551}
552
553
554static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
555{
556 enum mhi_pm_state cur_state, prev_state;
557 enum dev_st_transition next_state;
558 struct mhi_event *mhi_event;
559 struct mhi_cmd_ctxt *cmd_ctxt;
560 struct mhi_cmd *mhi_cmd;
561 struct mhi_event_ctxt *er_ctxt;
562 struct device *dev = &mhi_cntrl->mhi_dev->dev;
563 int ret, i;
564
565 dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
566 to_mhi_pm_state_str(mhi_cntrl->pm_state),
567 to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
568
569
570 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
571
572 mutex_lock(&mhi_cntrl->pm_mutex);
573 write_lock_irq(&mhi_cntrl->pm_lock);
574 prev_state = mhi_cntrl->pm_state;
575 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
576 write_unlock_irq(&mhi_cntrl->pm_lock);
577
578 if (cur_state != MHI_PM_SYS_ERR_PROCESS) {
579 dev_err(dev, "Failed to transition from PM state: %s to: %s\n",
580 to_mhi_pm_state_str(cur_state),
581 to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
582 goto exit_sys_error_transition;
583 }
584
585 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
586 mhi_cntrl->dev_state = MHI_STATE_RESET;
587
588
589 wake_up_all(&mhi_cntrl->state_event);
590
591
592 if (MHI_REG_ACCESS_VALID(prev_state)) {
593 u32 in_reset = -1;
594 unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
595
596 dev_dbg(dev, "Triggering MHI Reset in device\n");
597 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
598
599
600 ret = wait_event_timeout(mhi_cntrl->state_event,
601 mhi_read_reg_field(mhi_cntrl,
602 mhi_cntrl->regs,
603 MHICTRL,
604 MHICTRL_RESET_MASK,
605 MHICTRL_RESET_SHIFT,
606 &in_reset) ||
607 !in_reset, timeout);
608 if (!ret || in_reset) {
609 dev_err(dev, "Device failed to exit MHI Reset state\n");
610 goto exit_sys_error_transition;
611 }
612
613
614
615
616
617 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
618 }
619
620 dev_dbg(dev,
621 "Waiting for all pending event ring processing to complete\n");
622 mhi_event = mhi_cntrl->mhi_event;
623 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
624 if (mhi_event->offload_ev)
625 continue;
626 tasklet_kill(&mhi_event->task);
627 }
628
629
630 mutex_unlock(&mhi_cntrl->pm_mutex);
631 dev_dbg(dev, "Waiting for all pending threads to complete\n");
632 wake_up_all(&mhi_cntrl->state_event);
633
634 dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
635 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
636
637 mutex_lock(&mhi_cntrl->pm_mutex);
638
639 WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
640 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
641
642
643 dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
644 mhi_cmd = mhi_cntrl->mhi_cmd;
645 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
646 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
647 struct mhi_ring *ring = &mhi_cmd->ring;
648
649 ring->rp = ring->base;
650 ring->wp = ring->base;
651 cmd_ctxt->rp = cmd_ctxt->rbase;
652 cmd_ctxt->wp = cmd_ctxt->rbase;
653 }
654
655 mhi_event = mhi_cntrl->mhi_event;
656 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
657 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
658 mhi_event++) {
659 struct mhi_ring *ring = &mhi_event->ring;
660
661
662 if (mhi_event->offload_ev)
663 continue;
664
665 ring->rp = ring->base;
666 ring->wp = ring->base;
667 er_ctxt->rp = er_ctxt->rbase;
668 er_ctxt->wp = er_ctxt->rbase;
669 }
670
671
672 if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
673 write_lock_irq(&mhi_cntrl->pm_lock);
674 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
675 write_unlock_irq(&mhi_cntrl->pm_lock);
676 if (cur_state != MHI_PM_POR) {
677 dev_err(dev, "Error moving to state %s from %s\n",
678 to_mhi_pm_state_str(MHI_PM_POR),
679 to_mhi_pm_state_str(cur_state));
680 goto exit_sys_error_transition;
681 }
682 next_state = DEV_ST_TRANSITION_PBL;
683 } else {
684 next_state = DEV_ST_TRANSITION_READY;
685 }
686
687 mhi_queue_state_transition(mhi_cntrl, next_state);
688
689exit_sys_error_transition:
690 dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
691 to_mhi_pm_state_str(mhi_cntrl->pm_state),
692 TO_MHI_STATE_STR(mhi_cntrl->dev_state));
693
694 mutex_unlock(&mhi_cntrl->pm_mutex);
695}
696
697
698int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
699 enum dev_st_transition state)
700{
701 struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
702 unsigned long flags;
703
704 if (!item)
705 return -ENOMEM;
706
707 item->state = state;
708 spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
709 list_add_tail(&item->node, &mhi_cntrl->transition_list);
710 spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
711
712 queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker);
713
714 return 0;
715}
716
717
718void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
719{
720 struct device *dev = &mhi_cntrl->mhi_dev->dev;
721
722
723 if (mhi_cntrl->rddm_image) {
724 dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
725 return;
726 }
727
728 mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
729}
730
731
732void mhi_pm_st_worker(struct work_struct *work)
733{
734 struct state_transition *itr, *tmp;
735 LIST_HEAD(head);
736 struct mhi_controller *mhi_cntrl = container_of(work,
737 struct mhi_controller,
738 st_worker);
739 struct device *dev = &mhi_cntrl->mhi_dev->dev;
740
741 spin_lock_irq(&mhi_cntrl->transition_lock);
742 list_splice_tail_init(&mhi_cntrl->transition_list, &head);
743 spin_unlock_irq(&mhi_cntrl->transition_lock);
744
745 list_for_each_entry_safe(itr, tmp, &head, node) {
746 list_del(&itr->node);
747 dev_dbg(dev, "Handling state transition: %s\n",
748 TO_DEV_STATE_TRANS_STR(itr->state));
749
750 switch (itr->state) {
751 case DEV_ST_TRANSITION_PBL:
752 write_lock_irq(&mhi_cntrl->pm_lock);
753 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
754 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
755 write_unlock_irq(&mhi_cntrl->pm_lock);
756 mhi_fw_load_handler(mhi_cntrl);
757 break;
758 case DEV_ST_TRANSITION_SBL:
759 write_lock_irq(&mhi_cntrl->pm_lock);
760 mhi_cntrl->ee = MHI_EE_SBL;
761 write_unlock_irq(&mhi_cntrl->pm_lock);
762
763
764
765
766
767 mhi_create_devices(mhi_cntrl);
768 if (mhi_cntrl->fbc_download)
769 mhi_download_amss_image(mhi_cntrl);
770 break;
771 case DEV_ST_TRANSITION_MISSION_MODE:
772 mhi_pm_mission_mode_transition(mhi_cntrl);
773 break;
774 case DEV_ST_TRANSITION_FP:
775 write_lock_irq(&mhi_cntrl->pm_lock);
776 mhi_cntrl->ee = MHI_EE_FP;
777 write_unlock_irq(&mhi_cntrl->pm_lock);
778 mhi_create_devices(mhi_cntrl);
779 break;
780 case DEV_ST_TRANSITION_READY:
781 mhi_ready_state_transition(mhi_cntrl);
782 break;
783 case DEV_ST_TRANSITION_SYS_ERR:
784 mhi_pm_sys_error_transition(mhi_cntrl);
785 break;
786 case DEV_ST_TRANSITION_DISABLE:
787 mhi_pm_disable_transition(mhi_cntrl);
788 break;
789 default:
790 break;
791 }
792 kfree(itr);
793 }
794}
795
796int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
797{
798 struct mhi_chan *itr, *tmp;
799 struct device *dev = &mhi_cntrl->mhi_dev->dev;
800 enum mhi_pm_state new_state;
801 int ret;
802
803 if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
804 return -EINVAL;
805
806 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
807 return -EIO;
808
809
810 if (atomic_read(&mhi_cntrl->dev_wake) ||
811 atomic_read(&mhi_cntrl->pending_pkts))
812 return -EBUSY;
813
814
815 read_lock_bh(&mhi_cntrl->pm_lock);
816 mhi_cntrl->wake_get(mhi_cntrl, false);
817 read_unlock_bh(&mhi_cntrl->pm_lock);
818
819 ret = wait_event_timeout(mhi_cntrl->state_event,
820 mhi_cntrl->dev_state == MHI_STATE_M0 ||
821 mhi_cntrl->dev_state == MHI_STATE_M1 ||
822 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
823 msecs_to_jiffies(mhi_cntrl->timeout_ms));
824
825 read_lock_bh(&mhi_cntrl->pm_lock);
826 mhi_cntrl->wake_put(mhi_cntrl, false);
827 read_unlock_bh(&mhi_cntrl->pm_lock);
828
829 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
830 dev_err(dev,
831 "Could not enter M0/M1 state");
832 return -EIO;
833 }
834
835 write_lock_irq(&mhi_cntrl->pm_lock);
836
837 if (atomic_read(&mhi_cntrl->dev_wake) ||
838 atomic_read(&mhi_cntrl->pending_pkts)) {
839 write_unlock_irq(&mhi_cntrl->pm_lock);
840 return -EBUSY;
841 }
842
843 dev_dbg(dev, "Allowing M3 transition\n");
844 new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
845 if (new_state != MHI_PM_M3_ENTER) {
846 write_unlock_irq(&mhi_cntrl->pm_lock);
847 dev_err(dev,
848 "Error setting to PM state: %s from: %s\n",
849 to_mhi_pm_state_str(MHI_PM_M3_ENTER),
850 to_mhi_pm_state_str(mhi_cntrl->pm_state));
851 return -EIO;
852 }
853
854
855 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
856 write_unlock_irq(&mhi_cntrl->pm_lock);
857 dev_dbg(dev, "Waiting for M3 completion\n");
858
859 ret = wait_event_timeout(mhi_cntrl->state_event,
860 mhi_cntrl->dev_state == MHI_STATE_M3 ||
861 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
862 msecs_to_jiffies(mhi_cntrl->timeout_ms));
863
864 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
865 dev_err(dev,
866 "Did not enter M3 state, MHI state: %s, PM state: %s\n",
867 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
868 to_mhi_pm_state_str(mhi_cntrl->pm_state));
869 return -EIO;
870 }
871
872
873 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
874 mutex_lock(&itr->mutex);
875 if (itr->mhi_dev)
876 mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
877 mutex_unlock(&itr->mutex);
878 }
879
880 return 0;
881}
882EXPORT_SYMBOL_GPL(mhi_pm_suspend);
883
884int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
885{
886 struct mhi_chan *itr, *tmp;
887 struct device *dev = &mhi_cntrl->mhi_dev->dev;
888 enum mhi_pm_state cur_state;
889 int ret;
890
891 dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n",
892 to_mhi_pm_state_str(mhi_cntrl->pm_state),
893 TO_MHI_STATE_STR(mhi_cntrl->dev_state));
894
895 if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
896 return 0;
897
898 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
899 return -EIO;
900
901 if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3)
902 return -EINVAL;
903
904
905 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
906 mutex_lock(&itr->mutex);
907 if (itr->mhi_dev)
908 mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
909 mutex_unlock(&itr->mutex);
910 }
911
912 write_lock_irq(&mhi_cntrl->pm_lock);
913 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
914 if (cur_state != MHI_PM_M3_EXIT) {
915 write_unlock_irq(&mhi_cntrl->pm_lock);
916 dev_info(dev,
917 "Error setting to PM state: %s from: %s\n",
918 to_mhi_pm_state_str(MHI_PM_M3_EXIT),
919 to_mhi_pm_state_str(mhi_cntrl->pm_state));
920 return -EIO;
921 }
922
923
924 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
925 write_unlock_irq(&mhi_cntrl->pm_lock);
926
927 ret = wait_event_timeout(mhi_cntrl->state_event,
928 mhi_cntrl->dev_state == MHI_STATE_M0 ||
929 mhi_cntrl->dev_state == MHI_STATE_M2 ||
930 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
931 msecs_to_jiffies(mhi_cntrl->timeout_ms));
932
933 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
934 dev_err(dev,
935 "Did not enter M0 state, MHI state: %s, PM state: %s\n",
936 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
937 to_mhi_pm_state_str(mhi_cntrl->pm_state));
938 return -EIO;
939 }
940
941 return 0;
942}
943EXPORT_SYMBOL_GPL(mhi_pm_resume);
944
945int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
946{
947 int ret;
948
949
950 read_lock_bh(&mhi_cntrl->pm_lock);
951 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
952 read_unlock_bh(&mhi_cntrl->pm_lock);
953 return -EIO;
954 }
955 mhi_cntrl->wake_get(mhi_cntrl, true);
956 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
957 mhi_trigger_resume(mhi_cntrl);
958 read_unlock_bh(&mhi_cntrl->pm_lock);
959
960 ret = wait_event_timeout(mhi_cntrl->state_event,
961 mhi_cntrl->pm_state == MHI_PM_M0 ||
962 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
963 msecs_to_jiffies(mhi_cntrl->timeout_ms));
964
965 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
966 read_lock_bh(&mhi_cntrl->pm_lock);
967 mhi_cntrl->wake_put(mhi_cntrl, false);
968 read_unlock_bh(&mhi_cntrl->pm_lock);
969 return -EIO;
970 }
971
972 return 0;
973}
974
975
976static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
977{
978 unsigned long flags;
979
980
981
982
983
984 if (unlikely(force)) {
985 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
986 atomic_inc(&mhi_cntrl->dev_wake);
987 if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
988 !mhi_cntrl->wake_set) {
989 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
990 mhi_cntrl->wake_set = true;
991 }
992 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
993 } else {
994
995
996
997
998 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
999 return;
1000
1001 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1002 if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
1003 MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
1004 !mhi_cntrl->wake_set) {
1005 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
1006 mhi_cntrl->wake_set = true;
1007 }
1008 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1009 }
1010}
1011
1012
1013static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
1014 bool override)
1015{
1016 unsigned long flags;
1017
1018
1019
1020
1021
1022 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
1023 return;
1024
1025 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1026 if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
1027 MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
1028 mhi_cntrl->wake_set) {
1029 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
1030 mhi_cntrl->wake_set = false;
1031 }
1032 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1033}
1034
1035int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
1036{
1037 enum mhi_state state;
1038 enum mhi_ee_type current_ee;
1039 enum dev_st_transition next_state;
1040 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1041 u32 val;
1042 int ret;
1043
1044 dev_info(dev, "Requested to power ON\n");
1045
1046
1047 if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
1048 !mhi_cntrl->wake_toggle) {
1049 mhi_cntrl->wake_get = mhi_assert_dev_wake;
1050 mhi_cntrl->wake_put = mhi_deassert_dev_wake;
1051 mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
1052 mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
1053 }
1054
1055 mutex_lock(&mhi_cntrl->pm_mutex);
1056 mhi_cntrl->pm_state = MHI_PM_DISABLE;
1057
1058 ret = mhi_init_irq_setup(mhi_cntrl);
1059 if (ret)
1060 goto error_setup_irq;
1061
1062
1063 write_lock_irq(&mhi_cntrl->pm_lock);
1064 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val);
1065 if (ret) {
1066 write_unlock_irq(&mhi_cntrl->pm_lock);
1067 goto error_bhi_offset;
1068 }
1069
1070 mhi_cntrl->bhi = mhi_cntrl->regs + val;
1071
1072
1073 if (mhi_cntrl->fbc_download) {
1074 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val);
1075 if (ret) {
1076 write_unlock_irq(&mhi_cntrl->pm_lock);
1077 dev_err(dev, "Error reading BHIE offset\n");
1078 goto error_bhi_offset;
1079 }
1080
1081 mhi_cntrl->bhie = mhi_cntrl->regs + val;
1082 }
1083
1084 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1085 mhi_cntrl->pm_state = MHI_PM_POR;
1086 mhi_cntrl->ee = MHI_EE_MAX;
1087 current_ee = mhi_get_exec_env(mhi_cntrl);
1088 write_unlock_irq(&mhi_cntrl->pm_lock);
1089
1090
1091 if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) {
1092 dev_err(dev, "Not a valid EE for power on\n");
1093 ret = -EIO;
1094 goto error_bhi_offset;
1095 }
1096
1097 state = mhi_get_mhi_state(mhi_cntrl);
1098 if (state == MHI_STATE_SYS_ERR) {
1099 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
1100 ret = wait_event_timeout(mhi_cntrl->state_event,
1101 MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
1102 mhi_read_reg_field(mhi_cntrl,
1103 mhi_cntrl->regs,
1104 MHICTRL,
1105 MHICTRL_RESET_MASK,
1106 MHICTRL_RESET_SHIFT,
1107 &val) ||
1108 !val,
1109 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1110 if (!ret) {
1111 ret = -EIO;
1112 dev_info(dev, "Failed to reset MHI due to syserr state\n");
1113 goto error_bhi_offset;
1114 }
1115
1116
1117
1118
1119
1120 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1121 }
1122
1123
1124 next_state = MHI_IN_PBL(current_ee) ?
1125 DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
1126
1127 mhi_queue_state_transition(mhi_cntrl, next_state);
1128
1129 mutex_unlock(&mhi_cntrl->pm_mutex);
1130
1131 dev_info(dev, "Power on setup success\n");
1132
1133 return 0;
1134
1135error_bhi_offset:
1136 mhi_deinit_free_irq(mhi_cntrl);
1137
1138error_setup_irq:
1139 mhi_cntrl->pm_state = MHI_PM_DISABLE;
1140 mutex_unlock(&mhi_cntrl->pm_mutex);
1141
1142 return ret;
1143}
1144EXPORT_SYMBOL_GPL(mhi_async_power_up);
1145
1146void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
1147{
1148 enum mhi_pm_state cur_state, transition_state;
1149 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1150
1151 mutex_lock(&mhi_cntrl->pm_mutex);
1152 write_lock_irq(&mhi_cntrl->pm_lock);
1153 cur_state = mhi_cntrl->pm_state;
1154 if (cur_state == MHI_PM_DISABLE) {
1155 write_unlock_irq(&mhi_cntrl->pm_lock);
1156 mutex_unlock(&mhi_cntrl->pm_mutex);
1157 return;
1158 }
1159
1160
1161 transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS :
1162 MHI_PM_LD_ERR_FATAL_DETECT;
1163
1164 cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
1165 if (cur_state != transition_state) {
1166 dev_err(dev, "Failed to move to state: %s from: %s\n",
1167 to_mhi_pm_state_str(transition_state),
1168 to_mhi_pm_state_str(mhi_cntrl->pm_state));
1169
1170 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
1171 }
1172
1173
1174 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
1175 mhi_cntrl->dev_state = MHI_STATE_RESET;
1176
1177 wake_up_all(&mhi_cntrl->state_event);
1178
1179 write_unlock_irq(&mhi_cntrl->pm_lock);
1180 mutex_unlock(&mhi_cntrl->pm_mutex);
1181
1182 mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
1183
1184
1185 flush_work(&mhi_cntrl->st_worker);
1186
1187 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
1188}
1189EXPORT_SYMBOL_GPL(mhi_power_down);
1190
1191int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
1192{
1193 int ret = mhi_async_power_up(mhi_cntrl);
1194
1195 if (ret)
1196 return ret;
1197
1198 wait_event_timeout(mhi_cntrl->state_event,
1199 MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
1200 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
1201 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1202
1203 ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
1204 if (ret)
1205 mhi_power_down(mhi_cntrl, false);
1206
1207 return ret;
1208}
1209EXPORT_SYMBOL(mhi_sync_power_up);
1210
1211int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
1212{
1213 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1214 int ret;
1215
1216
1217 if (mhi_cntrl->ee == MHI_EE_RDDM)
1218 return 0;
1219
1220 dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
1221 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
1222
1223
1224 ret = wait_event_timeout(mhi_cntrl->state_event,
1225 mhi_cntrl->ee == MHI_EE_RDDM,
1226 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1227 ret = ret ? 0 : -EIO;
1228
1229 return ret;
1230}
1231EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
1232
1233void mhi_device_get(struct mhi_device *mhi_dev)
1234{
1235 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1236
1237 mhi_dev->dev_wake++;
1238 read_lock_bh(&mhi_cntrl->pm_lock);
1239 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1240 mhi_trigger_resume(mhi_cntrl);
1241
1242 mhi_cntrl->wake_get(mhi_cntrl, true);
1243 read_unlock_bh(&mhi_cntrl->pm_lock);
1244}
1245EXPORT_SYMBOL_GPL(mhi_device_get);
1246
1247int mhi_device_get_sync(struct mhi_device *mhi_dev)
1248{
1249 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1250 int ret;
1251
1252 ret = __mhi_device_get_sync(mhi_cntrl);
1253 if (!ret)
1254 mhi_dev->dev_wake++;
1255
1256 return ret;
1257}
1258EXPORT_SYMBOL_GPL(mhi_device_get_sync);
1259
1260void mhi_device_put(struct mhi_device *mhi_dev)
1261{
1262 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1263
1264 mhi_dev->dev_wake--;
1265 read_lock_bh(&mhi_cntrl->pm_lock);
1266 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1267 mhi_trigger_resume(mhi_cntrl);
1268
1269 mhi_cntrl->wake_put(mhi_cntrl, false);
1270 read_unlock_bh(&mhi_cntrl->pm_lock);
1271}
1272EXPORT_SYMBOL_GPL(mhi_device_put);
1273