1
2
3
4#include "ice.h"
5#include "ice_lib.h"
6
7#define E810_OUT_PROP_DELAY_NS 1
8
9
10
11
12
13
14static void ice_set_tx_tstamp(struct ice_pf *pf, bool on)
15{
16 struct ice_vsi *vsi;
17 u32 val;
18 u16 i;
19
20 vsi = ice_get_main_vsi(pf);
21 if (!vsi)
22 return;
23
24
25 ice_for_each_rxq(vsi, i) {
26 if (!vsi->tx_rings[i])
27 continue;
28 vsi->tx_rings[i]->ptp_tx = on;
29 }
30
31
32 val = rd32(&pf->hw, PFINT_OICR_ENA);
33 if (on)
34 val |= PFINT_OICR_TSYN_TX_M;
35 else
36 val &= ~PFINT_OICR_TSYN_TX_M;
37 wr32(&pf->hw, PFINT_OICR_ENA, val);
38}
39
40
41
42
43
44
45static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
46{
47 struct ice_vsi *vsi;
48 u16 i;
49
50 vsi = ice_get_main_vsi(pf);
51 if (!vsi)
52 return;
53
54
55 ice_for_each_rxq(vsi, i) {
56 if (!vsi->rx_rings[i])
57 continue;
58 vsi->rx_rings[i]->ptp_rx = on;
59 }
60}
61
62
63
64
65
66
67
68
69
70static void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena)
71{
72 ice_set_tx_tstamp(pf, ena);
73 ice_set_rx_tstamp(pf, ena);
74
75 if (ena) {
76 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
77 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
78 } else {
79 pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
80 pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
81 }
82}
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98int ice_get_ptp_clock_index(struct ice_pf *pf)
99{
100 struct device *dev = ice_pf_to_dev(pf);
101 enum ice_aqc_driver_params param_idx;
102 struct ice_hw *hw = &pf->hw;
103 u8 tmr_idx;
104 u32 value;
105 int err;
106
107
108 if (pf->ptp.clock)
109 return ptp_clock_index(pf->ptp.clock);
110
111 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
112 if (!tmr_idx)
113 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
114 else
115 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
116
117 err = ice_aq_get_driver_param(hw, param_idx, &value, NULL);
118 if (err) {
119 dev_err(dev, "Failed to read PTP clock index parameter, err %d aq_err %s\n",
120 err, ice_aq_str(hw->adminq.sq_last_status));
121 return -1;
122 }
123
124
125
126
127
128 if (!(value & PTP_SHARED_CLK_IDX_VALID))
129 return -1;
130
131 return value & ~PTP_SHARED_CLK_IDX_VALID;
132}
133
134
135
136
137
138
139
140
141
142
143
144static void ice_set_ptp_clock_index(struct ice_pf *pf)
145{
146 struct device *dev = ice_pf_to_dev(pf);
147 enum ice_aqc_driver_params param_idx;
148 struct ice_hw *hw = &pf->hw;
149 u8 tmr_idx;
150 u32 value;
151 int err;
152
153 if (!pf->ptp.clock)
154 return;
155
156 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
157 if (!tmr_idx)
158 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
159 else
160 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
161
162 value = (u32)ptp_clock_index(pf->ptp.clock);
163 if (value > INT_MAX) {
164 dev_err(dev, "PTP Clock index is too large to store\n");
165 return;
166 }
167 value |= PTP_SHARED_CLK_IDX_VALID;
168
169 err = ice_aq_set_driver_param(hw, param_idx, value, NULL);
170 if (err) {
171 dev_err(dev, "Failed to set PTP clock index parameter, err %d aq_err %s\n",
172 err, ice_aq_str(hw->adminq.sq_last_status));
173 }
174}
175
176
177
178
179
180
181
182
183
184static void ice_clear_ptp_clock_index(struct ice_pf *pf)
185{
186 struct device *dev = ice_pf_to_dev(pf);
187 enum ice_aqc_driver_params param_idx;
188 struct ice_hw *hw = &pf->hw;
189 u8 tmr_idx;
190 int err;
191
192
193 if (!hw->func_caps.ts_func_info.src_tmr_owned)
194 return;
195
196 tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
197 if (!tmr_idx)
198 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
199 else
200 param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
201
202 err = ice_aq_set_driver_param(hw, param_idx, 0, NULL);
203 if (err) {
204 dev_dbg(dev, "Failed to clear PTP clock index parameter, err %d aq_err %s\n",
205 err, ice_aq_str(hw->adminq.sq_last_status));
206 }
207}
208
209
210
211
212
213
214
215static u64
216ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
217{
218 struct ice_hw *hw = &pf->hw;
219 u32 hi, lo, lo2;
220 u8 tmr_idx;
221
222 tmr_idx = ice_get_ptp_src_clock_index(hw);
223
224 ptp_read_system_prets(sts);
225
226 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
227
228
229 ptp_read_system_postts(sts);
230
231 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
232 lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx));
233
234 if (lo2 < lo) {
235
236
237
238 ptp_read_system_prets(sts);
239 lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
240 ptp_read_system_postts(sts);
241 hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
242 }
243
244 return ((u64)hi << 32) | lo;
245}
246
247
248
249
250
251
252
253
254
255
256
257
258static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
259{
260 u64 systime;
261 int i;
262
263
264 systime = ice_ptp_read_src_clk_reg(pf, NULL);
265
266
267 WRITE_ONCE(pf->ptp.cached_phc_time, systime);
268
269 ice_for_each_vsi(pf, i) {
270 struct ice_vsi *vsi = pf->vsi[i];
271 int j;
272
273 if (!vsi)
274 continue;
275
276 if (vsi->type != ICE_VSI_PF)
277 continue;
278
279 ice_for_each_rxq(vsi, j) {
280 if (!vsi->rx_rings[j])
281 continue;
282 WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
283 }
284 }
285}
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
326{
327 u32 delta, phc_time_lo;
328 u64 ns;
329
330
331 phc_time_lo = (u32)cached_phc_time;
332
333
334
335
336 delta = (in_tstamp - phc_time_lo);
337
338
339
340
341
342
343 if (delta > (U32_MAX / 2)) {
344
345 delta = (phc_time_lo - in_tstamp);
346 ns = cached_phc_time - delta;
347 } else {
348 ns = cached_phc_time + delta;
349 }
350
351 return ns;
352}
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
381{
382 const u64 mask = GENMASK_ULL(31, 0);
383
384 return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
385 (in_tstamp >> 8) & mask);
386}
387
388
389
390
391
392
393
394
395
396
397
398
399static void
400ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts,
401 struct ptp_system_timestamp *sts)
402{
403 u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts);
404
405 *ts = ns_to_timespec64(time_ns);
406}
407
408
409
410
411
412
413
414
415static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts)
416{
417 u64 ns = timespec64_to_ns(ts);
418 struct ice_hw *hw = &pf->hw;
419
420 return ice_ptp_init_time(hw, ns);
421}
422
423
424
425
426
427
428
429
430
431static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
432{
433 struct ice_hw *hw = &pf->hw;
434
435 return ice_ptp_adj_clock(hw, adj);
436}
437
438
439
440
441
442
443
444
445
446static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
447{
448 struct ice_pf *pf = ptp_info_to_pf(info);
449 u64 freq, divisor = 1000000ULL;
450 struct ice_hw *hw = &pf->hw;
451 s64 incval, diff;
452 int neg_adj = 0;
453 int err;
454
455 incval = ICE_PTP_NOMINAL_INCVAL_E810;
456
457 if (scaled_ppm < 0) {
458 neg_adj = 1;
459 scaled_ppm = -scaled_ppm;
460 }
461
462 while ((u64)scaled_ppm > div_u64(U64_MAX, incval)) {
463
464
465
466 scaled_ppm >>= 2;
467 divisor >>= 2;
468 }
469
470 freq = (incval * (u64)scaled_ppm) >> 16;
471 diff = div_u64(freq, divisor);
472
473 if (neg_adj)
474 incval -= diff;
475 else
476 incval += diff;
477
478 err = ice_ptp_write_incval_locked(hw, incval);
479 if (err) {
480 dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
481 err);
482 return -EIO;
483 }
484
485 return 0;
486}
487
488
489
490
491
492
493
494static void ice_ptp_extts_work(struct kthread_work *work)
495{
496 struct ice_ptp *ptp = container_of(work, struct ice_ptp, extts_work);
497 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
498 struct ptp_clock_event event;
499 struct ice_hw *hw = &pf->hw;
500 u8 chan, tmr_idx;
501 u32 hi, lo;
502
503 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
504
505
506
507
508
509 for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
510
511 if (pf->ptp.ext_ts_irq & (1 << chan)) {
512 lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
513 hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
514 event.timestamp = (((u64)hi) << 32) | lo;
515 event.type = PTP_CLOCK_EXTTS;
516 event.index = chan;
517
518
519 ptp_clock_event(pf->ptp.clock, &event);
520 pf->ptp.ext_ts_irq &= ~(1 << chan);
521 }
522 }
523}
524
525
526
527
528
529
530
531
532
533static int
534ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
535 unsigned int extts_flags)
536{
537 u32 func, aux_reg, gpio_reg, irq_reg;
538 struct ice_hw *hw = &pf->hw;
539 u8 tmr_idx;
540
541 if (chan > (unsigned int)pf->ptp.info.n_ext_ts)
542 return -EINVAL;
543
544 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
545
546 irq_reg = rd32(hw, PFINT_OICR_ENA);
547
548 if (ena) {
549
550 irq_reg |= PFINT_OICR_TSYN_EVNT_M;
551 aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
552
553#define GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE BIT(0)
554#define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1)
555
556
557 if (extts_flags & PTP_FALLING_EDGE)
558 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
559 if (extts_flags & PTP_RISING_EDGE)
560 aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
561
562
563
564
565
566 func = 1 + chan + (tmr_idx * 3);
567 gpio_reg = ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) &
568 GLGEN_GPIO_CTL_PIN_FUNC_M);
569 pf->ptp.ext_ts_chan |= (1 << chan);
570 } else {
571
572 aux_reg = 0;
573 gpio_reg = 0;
574 pf->ptp.ext_ts_chan &= ~(1 << chan);
575 if (!pf->ptp.ext_ts_chan)
576 irq_reg &= ~PFINT_OICR_TSYN_EVNT_M;
577 }
578
579 wr32(hw, PFINT_OICR_ENA, irq_reg);
580 wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
581 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
582
583 return 0;
584}
585
586
587
588
589
590
591
592
593
594
595
596static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
597 struct ice_perout_channel *config, bool store)
598{
599 u64 current_time, period, start_time, phase;
600 struct ice_hw *hw = &pf->hw;
601 u32 func, val, gpio_pin;
602 u8 tmr_idx;
603
604 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
605
606
607 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
608
609
610
611
612 if (!config || !config->ena) {
613 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0);
614 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0);
615 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0);
616
617 val = GLGEN_GPIO_CTL_PIN_DIR_M;
618 gpio_pin = pf->ptp.perout_channels[chan].gpio_pin;
619 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
620
621
622 if (store)
623 memset(&pf->ptp.perout_channels[chan], 0,
624 sizeof(struct ice_perout_channel));
625
626 return 0;
627 }
628 period = config->period;
629 start_time = config->start_time;
630 div64_u64_rem(start_time, period, &phase);
631 gpio_pin = config->gpio_pin;
632
633
634 if (period & 0x1) {
635 dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n");
636 goto err;
637 }
638
639 period >>= 1;
640
641
642
643#define MIN_PULSE 3
644 if (period <= MIN_PULSE || period > U32_MAX) {
645 dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33",
646 MIN_PULSE * 2);
647 goto err;
648 }
649
650 wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period));
651
652
653 current_time = ice_ptp_read_src_clk_reg(pf, NULL);
654
655
656
657
658 if (start_time < current_time)
659 start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
660 NSEC_PER_SEC) * NSEC_PER_SEC + phase;
661
662 start_time -= E810_OUT_PROP_DELAY_NS;
663
664
665 wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
666 wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time));
667
668
669 val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M;
670 wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val);
671
672
673 func = 8 + chan + (tmr_idx * 4);
674 val = GLGEN_GPIO_CTL_PIN_DIR_M |
675 ((func << GLGEN_GPIO_CTL_PIN_FUNC_S) & GLGEN_GPIO_CTL_PIN_FUNC_M);
676 wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
677
678
679 if (store) {
680 memcpy(&pf->ptp.perout_channels[chan], config,
681 sizeof(struct ice_perout_channel));
682 pf->ptp.perout_channels[chan].start_time = phase;
683 }
684
685 return 0;
686err:
687 dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n");
688 return -EFAULT;
689}
690
691
692
693
694
695
696
697static int
698ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
699 struct ptp_clock_request *rq, int on)
700{
701 struct ice_pf *pf = ptp_info_to_pf(info);
702 struct ice_perout_channel clk_cfg = {0};
703 unsigned int chan;
704 u32 gpio_pin;
705 int err;
706
707 switch (rq->type) {
708 case PTP_CLK_REQ_PEROUT:
709 chan = rq->perout.index;
710 if (chan == PPS_CLK_GEN_CHAN)
711 clk_cfg.gpio_pin = PPS_PIN_INDEX;
712 else
713 clk_cfg.gpio_pin = chan;
714
715 clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
716 rq->perout.period.nsec);
717 clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) +
718 rq->perout.start.nsec);
719 clk_cfg.ena = !!on;
720
721 err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
722 break;
723 case PTP_CLK_REQ_EXTTS:
724 chan = rq->extts.index;
725 gpio_pin = chan;
726
727 err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin,
728 rq->extts.flags);
729 break;
730 default:
731 return -EOPNOTSUPP;
732 }
733
734 return err;
735}
736
737
738
739
740
741
742
743
744
745
746
747static int
748ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
749 struct ptp_system_timestamp *sts)
750{
751 struct ice_pf *pf = ptp_info_to_pf(info);
752 struct ice_hw *hw = &pf->hw;
753
754 if (!ice_ptp_lock(hw)) {
755 dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n");
756 return -EBUSY;
757 }
758
759 ice_ptp_read_time(pf, ts, sts);
760 ice_ptp_unlock(hw);
761
762 return 0;
763}
764
765
766
767
768
769
770
771
772
773static int
774ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
775{
776 struct ice_pf *pf = ptp_info_to_pf(info);
777 struct timespec64 ts64 = *ts;
778 struct ice_hw *hw = &pf->hw;
779 int err;
780
781 if (!ice_ptp_lock(hw)) {
782 err = -EBUSY;
783 goto exit;
784 }
785
786 err = ice_ptp_write_init(pf, &ts64);
787 ice_ptp_unlock(hw);
788
789 if (!err)
790 ice_ptp_update_cached_phctime(pf);
791
792exit:
793 if (err) {
794 dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
795 return err;
796 }
797
798 return 0;
799}
800
801
802
803
804
805
806static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
807{
808 struct timespec64 now, then;
809
810 then = ns_to_timespec64(delta);
811 ice_ptp_gettimex64(info, &now, NULL);
812 now = timespec64_add(now, then);
813
814 return ice_ptp_settime64(info, (const struct timespec64 *)&now);
815}
816
817
818
819
820
821
822static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
823{
824 struct ice_pf *pf = ptp_info_to_pf(info);
825 struct ice_hw *hw = &pf->hw;
826 struct device *dev;
827 int err;
828
829 dev = ice_pf_to_dev(pf);
830
831
832
833
834
835 if (delta > S32_MAX || delta < S32_MIN) {
836 dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta);
837 return ice_ptp_adjtime_nonatomic(info, delta);
838 }
839
840 if (!ice_ptp_lock(hw)) {
841 dev_err(dev, "PTP failed to acquire semaphore in adjtime\n");
842 return -EBUSY;
843 }
844
845 err = ice_ptp_write_adj(pf, delta);
846
847 ice_ptp_unlock(hw);
848
849 if (err) {
850 dev_err(dev, "PTP failed to adjust time, err %d\n", err);
851 return err;
852 }
853
854 ice_ptp_update_cached_phctime(pf);
855
856 return 0;
857}
858
859
860
861
862
863
864
865
866int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
867{
868 struct hwtstamp_config *config;
869
870 if (!test_bit(ICE_FLAG_PTP, pf->flags))
871 return -EIO;
872
873 config = &pf->ptp.tstamp_config;
874
875 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
876 -EFAULT : 0;
877}
878
879
880
881
882
883
884static int
885ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
886{
887
888 if (config->flags)
889 return -EINVAL;
890
891 switch (config->tx_type) {
892 case HWTSTAMP_TX_OFF:
893 ice_set_tx_tstamp(pf, false);
894 break;
895 case HWTSTAMP_TX_ON:
896 ice_set_tx_tstamp(pf, true);
897 break;
898 default:
899 return -ERANGE;
900 }
901
902 switch (config->rx_filter) {
903 case HWTSTAMP_FILTER_NONE:
904 ice_set_rx_tstamp(pf, false);
905 break;
906 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
907 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
908 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
909 case HWTSTAMP_FILTER_PTP_V2_EVENT:
910 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
911 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
912 case HWTSTAMP_FILTER_PTP_V2_SYNC:
913 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
914 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
915 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
916 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
917 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
918 case HWTSTAMP_FILTER_NTP_ALL:
919 case HWTSTAMP_FILTER_ALL:
920 config->rx_filter = HWTSTAMP_FILTER_ALL;
921 ice_set_rx_tstamp(pf, true);
922 break;
923 default:
924 return -ERANGE;
925 }
926
927 return 0;
928}
929
930
931
932
933
934
935
936
937int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
938{
939 struct hwtstamp_config config;
940 int err;
941
942 if (!test_bit(ICE_FLAG_PTP, pf->flags))
943 return -EAGAIN;
944
945 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
946 return -EFAULT;
947
948 err = ice_ptp_set_timestamp_mode(pf, &config);
949 if (err)
950 return err;
951
952
953 pf->ptp.tstamp_config = config;
954
955 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
956 -EFAULT : 0;
957}
958
959
960
961
962
963
964
965
966
967
968void
969ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
970 union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
971{
972 u32 ts_high;
973 u64 ts_ns;
974
975
976 if (rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID) {
977 struct skb_shared_hwtstamps *hwtstamps;
978
979
980
981
982
983
984
985 ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
986 ts_ns = ice_ptp_extend_32b_ts(rx_ring->cached_phctime, ts_high);
987
988 hwtstamps = skb_hwtstamps(skb);
989 memset(hwtstamps, 0, sizeof(*hwtstamps));
990 hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
991 }
992}
993
994
995
996
997
998static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info)
999{
1000 info->n_per_out = E810_N_PER_OUT;
1001 info->n_ext_ts = E810_N_EXT_TS;
1002}
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014static void
1015ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
1016{
1017 info->enable = ice_ptp_gpio_enable_e810;
1018
1019 ice_ptp_setup_pins_e810(info);
1020}
1021
1022
1023
1024
1025
1026static void ice_ptp_set_caps(struct ice_pf *pf)
1027{
1028 struct ptp_clock_info *info = &pf->ptp.info;
1029 struct device *dev = ice_pf_to_dev(pf);
1030
1031 snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
1032 dev_driver_string(dev), dev_name(dev));
1033 info->owner = THIS_MODULE;
1034 info->max_adj = 999999999;
1035 info->adjtime = ice_ptp_adjtime;
1036 info->adjfine = ice_ptp_adjfine;
1037 info->gettimex64 = ice_ptp_gettimex64;
1038 info->settime64 = ice_ptp_settime64;
1039
1040 ice_ptp_set_funcs_e810(pf, info);
1041}
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052static long ice_ptp_create_clock(struct ice_pf *pf)
1053{
1054 struct ptp_clock_info *info;
1055 struct ptp_clock *clock;
1056 struct device *dev;
1057
1058
1059 if (pf->ptp.clock)
1060 return 0;
1061
1062 ice_ptp_set_caps(pf);
1063
1064 info = &pf->ptp.info;
1065 dev = ice_pf_to_dev(pf);
1066
1067
1068 if (info->n_pins) {
1069 info->pin_config = devm_kcalloc(dev, info->n_pins,
1070 sizeof(*info->pin_config),
1071 GFP_KERNEL);
1072 if (!info->pin_config) {
1073 info->n_pins = 0;
1074 return -ENOMEM;
1075 }
1076 }
1077
1078
1079 clock = ptp_clock_register(info, dev);
1080 if (IS_ERR(clock))
1081 return PTR_ERR(clock);
1082
1083 pf->ptp.clock = clock;
1084
1085 return 0;
1086}
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
1123{
1124 struct ice_ptp_port *ptp_port;
1125 struct ice_ptp_tx *tx;
1126 struct ice_pf *pf;
1127 struct ice_hw *hw;
1128 u8 idx;
1129
1130 tx = container_of(work, struct ice_ptp_tx, work);
1131 if (!tx->init)
1132 return;
1133
1134 ptp_port = container_of(tx, struct ice_ptp_port, tx);
1135 pf = ptp_port_to_pf(ptp_port);
1136 hw = &pf->hw;
1137
1138 for_each_set_bit(idx, tx->in_use, tx->len) {
1139 struct skb_shared_hwtstamps shhwtstamps = {};
1140 u8 phy_idx = idx + tx->quad_offset;
1141 u64 raw_tstamp, tstamp;
1142 struct sk_buff *skb;
1143 int err;
1144
1145 err = ice_read_phy_tstamp(hw, tx->quad, phy_idx,
1146 &raw_tstamp);
1147 if (err)
1148 continue;
1149
1150
1151 if (!(raw_tstamp & ICE_PTP_TS_VALID))
1152 continue;
1153
1154
1155
1156
1157 ice_clear_phy_tstamp(hw, tx->quad, phy_idx);
1158
1159
1160
1161
1162 spin_lock(&tx->lock);
1163 clear_bit(idx, tx->in_use);
1164 skb = tx->tstamps[idx].skb;
1165 tx->tstamps[idx].skb = NULL;
1166 spin_unlock(&tx->lock);
1167
1168
1169
1170
1171 if (!skb)
1172 continue;
1173
1174
1175 tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
1176 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
1177
1178 skb_tstamp_tx(skb, &shhwtstamps);
1179 dev_kfree_skb_any(skb);
1180 }
1181
1182
1183
1184
1185 spin_lock(&tx->lock);
1186 if (!bitmap_empty(tx->in_use, tx->len))
1187 kthread_queue_work(pf->ptp.kworker, &tx->work);
1188 spin_unlock(&tx->lock);
1189}
1190
1191
1192
1193
1194
1195
1196s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
1197{
1198 u8 idx;
1199
1200
1201 if (!tx->init)
1202 return -1;
1203
1204 spin_lock(&tx->lock);
1205
1206 idx = find_first_zero_bit(tx->in_use, tx->len);
1207 if (idx < tx->len) {
1208
1209
1210
1211
1212 set_bit(idx, tx->in_use);
1213 tx->tstamps[idx].start = jiffies;
1214 tx->tstamps[idx].skb = skb_get(skb);
1215 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1216 }
1217
1218 spin_unlock(&tx->lock);
1219
1220
1221
1222
1223 if (idx >= tx->len)
1224 return -1;
1225 else
1226 return idx + tx->quad_offset;
1227}
1228
1229
1230
1231
1232
1233
1234
1235
1236void ice_ptp_process_ts(struct ice_pf *pf)
1237{
1238 if (pf->ptp.port.tx.init)
1239 kthread_queue_work(pf->ptp.kworker, &pf->ptp.port.tx.work);
1240}
1241
1242
1243
1244
1245
1246
1247
1248
1249static int
1250ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
1251{
1252 tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL);
1253 if (!tx->tstamps)
1254 return -ENOMEM;
1255
1256 tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
1257 if (!tx->in_use) {
1258 kfree(tx->tstamps);
1259 tx->tstamps = NULL;
1260 return -ENOMEM;
1261 }
1262
1263 spin_lock_init(&tx->lock);
1264 kthread_init_work(&tx->work, ice_ptp_tx_tstamp_work);
1265
1266 tx->init = 1;
1267
1268 return 0;
1269}
1270
1271
1272
1273
1274
1275
1276static void
1277ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
1278{
1279 u8 idx;
1280
1281 for (idx = 0; idx < tx->len; idx++) {
1282 u8 phy_idx = idx + tx->quad_offset;
1283
1284
1285 if (!pf->hw.reset_ongoing)
1286 ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
1287
1288 if (tx->tstamps[idx].skb) {
1289 dev_kfree_skb_any(tx->tstamps[idx].skb);
1290 tx->tstamps[idx].skb = NULL;
1291 }
1292 }
1293}
1294
1295
1296
1297
1298
1299
1300
1301
1302static void
1303ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
1304{
1305 tx->init = 0;
1306
1307 kthread_cancel_work_sync(&tx->work);
1308
1309 ice_ptp_flush_tx_tracker(pf, tx);
1310
1311 kfree(tx->tstamps);
1312 tx->tstamps = NULL;
1313
1314 kfree(tx->in_use);
1315 tx->in_use = NULL;
1316
1317 tx->len = 0;
1318}
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328static int
1329ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
1330{
1331 tx->quad = pf->hw.port_info->lport;
1332 tx->quad_offset = 0;
1333 tx->len = INDEX_PER_QUAD;
1334
1335 return ice_ptp_alloc_tx_tracker(tx);
1336}
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx)
1349{
1350 u8 idx;
1351
1352 if (!tx->init)
1353 return;
1354
1355 for_each_set_bit(idx, tx->in_use, tx->len) {
1356 struct sk_buff *skb;
1357
1358
1359 if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
1360 continue;
1361
1362 spin_lock(&tx->lock);
1363 skb = tx->tstamps[idx].skb;
1364 tx->tstamps[idx].skb = NULL;
1365 clear_bit(idx, tx->in_use);
1366 spin_unlock(&tx->lock);
1367
1368
1369 dev_kfree_skb_any(skb);
1370 }
1371}
1372
1373static void ice_ptp_periodic_work(struct kthread_work *work)
1374{
1375 struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
1376 struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
1377
1378 if (!test_bit(ICE_FLAG_PTP, pf->flags))
1379 return;
1380
1381 ice_ptp_update_cached_phctime(pf);
1382
1383 ice_ptp_tx_tstamp_cleanup(&pf->ptp.port.tx);
1384
1385
1386 kthread_queue_delayed_work(ptp->kworker, &ptp->work,
1387 msecs_to_jiffies(500));
1388}
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398static int ice_ptp_init_owner(struct ice_pf *pf)
1399{
1400 struct device *dev = ice_pf_to_dev(pf);
1401 struct ice_hw *hw = &pf->hw;
1402 struct timespec64 ts;
1403 u8 src_idx;
1404 int err;
1405
1406 wr32(hw, GLTSYN_SYNC_DLAY, 0);
1407
1408
1409 src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
1410
1411
1412 wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
1413
1414
1415 err = ice_ptp_init_phy_e810(hw);
1416 if (err)
1417 goto err_exit;
1418
1419
1420 (void)rd32(hw, GLTSYN_STAT(src_idx));
1421
1422
1423 if (!ice_ptp_lock(hw)) {
1424 err = -EBUSY;
1425 goto err_exit;
1426 }
1427
1428
1429 err = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
1430 if (err) {
1431 ice_ptp_unlock(hw);
1432 goto err_exit;
1433 }
1434
1435 ts = ktime_to_timespec64(ktime_get_real());
1436
1437 err = ice_ptp_write_init(pf, &ts);
1438 if (err) {
1439 ice_ptp_unlock(hw);
1440 goto err_exit;
1441 }
1442
1443
1444 ice_ptp_unlock(hw);
1445
1446
1447 err = ice_ptp_create_clock(pf);
1448 if (err)
1449 goto err_clk;
1450
1451
1452 ice_set_ptp_clock_index(pf);
1453
1454 return 0;
1455
1456err_clk:
1457 pf->ptp.clock = NULL;
1458err_exit:
1459 dev_err(dev, "PTP failed to register clock, err %d\n", err);
1460
1461 return err;
1462}
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472void ice_ptp_init(struct ice_pf *pf)
1473{
1474 struct device *dev = ice_pf_to_dev(pf);
1475 struct kthread_worker *kworker;
1476 struct ice_hw *hw = &pf->hw;
1477 int err;
1478
1479
1480 if (!ice_is_e810(hw))
1481 return;
1482
1483
1484 if (hw->func_caps.ts_func_info.src_tmr_owned) {
1485 err = ice_ptp_init_owner(pf);
1486 if (err)
1487 return;
1488 }
1489
1490
1491 ice_ptp_cfg_timestamp(pf, false);
1492
1493
1494 ice_ptp_init_tx_e810(pf, &pf->ptp.port.tx);
1495
1496
1497 kthread_init_delayed_work(&pf->ptp.work, ice_ptp_periodic_work);
1498 kthread_init_work(&pf->ptp.extts_work, ice_ptp_extts_work);
1499
1500
1501
1502
1503 kworker = kthread_create_worker(0, "ice-ptp-%s", dev_name(dev));
1504 if (IS_ERR(kworker)) {
1505 err = PTR_ERR(kworker);
1506 goto err_kworker;
1507 }
1508 pf->ptp.kworker = kworker;
1509
1510 set_bit(ICE_FLAG_PTP, pf->flags);
1511
1512
1513 kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 0);
1514
1515 dev_info(dev, "PTP init successful\n");
1516 return;
1517
1518err_kworker:
1519
1520 if (pf->ptp.clock) {
1521 ptp_clock_unregister(pf->ptp.clock);
1522 pf->ptp.clock = NULL;
1523 }
1524 dev_err(dev, "PTP failed %d\n", err);
1525}
1526
1527
1528
1529
1530
1531
1532
1533
1534void ice_ptp_release(struct ice_pf *pf)
1535{
1536
1537 ice_ptp_cfg_timestamp(pf, false);
1538
1539 ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
1540
1541 clear_bit(ICE_FLAG_PTP, pf->flags);
1542
1543 kthread_cancel_delayed_work_sync(&pf->ptp.work);
1544
1545 if (pf->ptp.kworker) {
1546 kthread_destroy_worker(pf->ptp.kworker);
1547 pf->ptp.kworker = NULL;
1548 }
1549
1550 if (!pf->ptp.clock)
1551 return;
1552
1553 ice_clear_ptp_clock_index(pf);
1554 ptp_clock_unregister(pf->ptp.clock);
1555 pf->ptp.clock = NULL;
1556
1557 dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
1558}
1559