1
2
3
4#include <linux/if_ether.h>
5#include <linux/delay.h>
6#include <linux/pci.h>
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>
9
10#include "e1000_mac.h"
11
12#include "igb.h"
13
14static s32 igb_set_default_fc(struct e1000_hw *hw);
15static void igb_set_fc_watermarks(struct e1000_hw *hw);
16
17
18
19
20
21
22
23
24
25s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
26{
27 struct e1000_bus_info *bus = &hw->bus;
28 s32 ret_val;
29 u32 reg;
30 u16 pcie_link_status;
31
32 bus->type = e1000_bus_type_pci_express;
33
34 ret_val = igb_read_pcie_cap_reg(hw,
35 PCI_EXP_LNKSTA,
36 &pcie_link_status);
37 if (ret_val) {
38 bus->width = e1000_bus_width_unknown;
39 bus->speed = e1000_bus_speed_unknown;
40 } else {
41 switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
42 case PCI_EXP_LNKSTA_CLS_2_5GB:
43 bus->speed = e1000_bus_speed_2500;
44 break;
45 case PCI_EXP_LNKSTA_CLS_5_0GB:
46 bus->speed = e1000_bus_speed_5000;
47 break;
48 default:
49 bus->speed = e1000_bus_speed_unknown;
50 break;
51 }
52
53 bus->width = (enum e1000_bus_width)((pcie_link_status &
54 PCI_EXP_LNKSTA_NLW) >>
55 PCI_EXP_LNKSTA_NLW_SHIFT);
56 }
57
58 reg = rd32(E1000_STATUS);
59 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
60
61 return 0;
62}
63
64
65
66
67
68
69
70
71void igb_clear_vfta(struct e1000_hw *hw)
72{
73 u32 offset;
74
75 for (offset = E1000_VLAN_FILTER_TBL_SIZE; offset--;)
76 hw->mac.ops.write_vfta(hw, offset, 0);
77}
78
79
80
81
82
83
84
85
86
87
88void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
89{
90 struct igb_adapter *adapter = hw->back;
91
92 array_wr32(E1000_VFTA, offset, value);
93 wrfl();
94
95 adapter->shadow_vfta[offset] = value;
96}
97
98
99
100
101
102
103
104
105
106
107void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
108{
109 u32 i;
110 u8 mac_addr[ETH_ALEN] = {0};
111
112
113 hw_dbg("Programming MAC Address into RAR[0]\n");
114
115 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
116
117
118 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
119 for (i = 1; i < rar_count; i++)
120 hw->mac.ops.rar_set(hw, mac_addr, i);
121}
122
123
124
125
126
127
128
129
130
131
132static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass)
133{
134 s32 regindex, first_empty_slot;
135 u32 bits;
136
137
138 if (vlan == 0)
139 return 0;
140
141
142
143
144
145 first_empty_slot = vlvf_bypass ? -E1000_ERR_NO_SPACE : 0;
146
147
148
149
150
151
152 for (regindex = E1000_VLVF_ARRAY_SIZE; --regindex > 0;) {
153 bits = rd32(E1000_VLVF(regindex)) & E1000_VLVF_VLANID_MASK;
154 if (bits == vlan)
155 return regindex;
156 if (!first_empty_slot && !bits)
157 first_empty_slot = regindex;
158 }
159
160 return first_empty_slot ? : -E1000_ERR_NO_SPACE;
161}
162
163
164
165
166
167
168
169
170
171
172
173
174s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind,
175 bool vlan_on, bool vlvf_bypass)
176{
177 struct igb_adapter *adapter = hw->back;
178 u32 regidx, vfta_delta, vfta, bits;
179 s32 vlvf_index;
180
181 if ((vlan > 4095) || (vind > 7))
182 return -E1000_ERR_PARAM;
183
184
185
186
187
188
189
190
191
192
193
194
195 regidx = vlan / 32;
196 vfta_delta = BIT(vlan % 32);
197 vfta = adapter->shadow_vfta[regidx];
198
199
200
201
202
203 vfta_delta &= vlan_on ? ~vfta : vfta;
204 vfta ^= vfta_delta;
205
206
207
208
209
210
211
212
213
214 if (!adapter->vfs_allocated_count)
215 goto vfta_update;
216
217 vlvf_index = igb_find_vlvf_slot(hw, vlan, vlvf_bypass);
218 if (vlvf_index < 0) {
219 if (vlvf_bypass)
220 goto vfta_update;
221 return vlvf_index;
222 }
223
224 bits = rd32(E1000_VLVF(vlvf_index));
225
226
227 bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
228 if (vlan_on)
229 goto vlvf_update;
230
231
232 bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
233
234 if (!(bits & E1000_VLVF_POOLSEL_MASK)) {
235
236
237
238
239 if (vfta_delta)
240 hw->mac.ops.write_vfta(hw, regidx, vfta);
241
242
243 wr32(E1000_VLVF(vlvf_index), 0);
244
245 return 0;
246 }
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262 vfta_delta = 0;
263
264vlvf_update:
265
266 wr32(E1000_VLVF(vlvf_index), bits | vlan | E1000_VLVF_VLANID_ENABLE);
267
268vfta_update:
269
270 if (vfta_delta)
271 hw->mac.ops.write_vfta(hw, regidx, vfta);
272
273 return 0;
274}
275
276
277
278
279
280
281
282
283
284
285
286
287s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
288{
289 u32 i;
290 s32 ret_val = 0;
291 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
292 u8 alt_mac_addr[ETH_ALEN];
293
294
295
296
297 if (hw->mac.type >= e1000_82580)
298 goto out;
299
300 ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
301 &nvm_alt_mac_addr_offset);
302 if (ret_val) {
303 hw_dbg("NVM Read Error\n");
304 goto out;
305 }
306
307 if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
308 (nvm_alt_mac_addr_offset == 0x0000))
309
310 goto out;
311
312 if (hw->bus.func == E1000_FUNC_1)
313 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
314 if (hw->bus.func == E1000_FUNC_2)
315 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
316
317 if (hw->bus.func == E1000_FUNC_3)
318 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
319 for (i = 0; i < ETH_ALEN; i += 2) {
320 offset = nvm_alt_mac_addr_offset + (i >> 1);
321 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
322 if (ret_val) {
323 hw_dbg("NVM Read Error\n");
324 goto out;
325 }
326
327 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
328 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
329 }
330
331
332 if (is_multicast_ether_addr(alt_mac_addr)) {
333 hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
334 goto out;
335 }
336
337
338
339
340
341 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
342
343out:
344 return ret_val;
345}
346
347
348
349
350
351
352
353
354
355
356void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
357{
358 u32 rar_low, rar_high;
359
360
361
362
363 rar_low = ((u32) addr[0] |
364 ((u32) addr[1] << 8) |
365 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
366
367 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
368
369
370 if (rar_low || rar_high)
371 rar_high |= E1000_RAH_AV;
372
373
374
375
376
377 wr32(E1000_RAL(index), rar_low);
378 wrfl();
379 wr32(E1000_RAH(index), rar_high);
380 wrfl();
381}
382
383
384
385
386
387
388
389
390
391
392
393void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
394{
395 u32 hash_bit, hash_reg, mta;
396
397
398
399
400
401
402
403
404
405
406 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
407 hash_bit = hash_value & 0x1F;
408
409 mta = array_rd32(E1000_MTA, hash_reg);
410
411 mta |= BIT(hash_bit);
412
413 array_wr32(E1000_MTA, hash_reg, mta);
414 wrfl();
415}
416
417
418
419
420
421
422
423
424
425
426static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
427{
428 u32 hash_value, hash_mask;
429 u8 bit_shift = 0;
430
431
432 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
433
434
435
436
437 while (hash_mask >> bit_shift != 0xFF)
438 bit_shift++;
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465 switch (hw->mac.mc_filter_type) {
466 default:
467 case 0:
468 break;
469 case 1:
470 bit_shift += 1;
471 break;
472 case 2:
473 bit_shift += 2;
474 break;
475 case 3:
476 bit_shift += 4;
477 break;
478 }
479
480 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
481 (((u16) mc_addr[5]) << bit_shift)));
482
483 return hash_value;
484}
485
486
487
488
489
490
491
492
493static void igb_i21x_hw_doublecheck(struct e1000_hw *hw)
494{
495 bool is_failed;
496 int i;
497
498 do {
499 is_failed = false;
500 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) {
501 if (array_rd32(E1000_MTA, i) != hw->mac.mta_shadow[i]) {
502 is_failed = true;
503 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
504 wrfl();
505 break;
506 }
507 }
508 } while (is_failed);
509}
510
511
512
513
514
515
516
517
518
519
520void igb_update_mc_addr_list(struct e1000_hw *hw,
521 u8 *mc_addr_list, u32 mc_addr_count)
522{
523 u32 hash_value, hash_bit, hash_reg;
524 int i;
525
526
527 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
528
529
530 for (i = 0; (u32) i < mc_addr_count; i++) {
531 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
532
533 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
534 hash_bit = hash_value & 0x1F;
535
536 hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
537 mc_addr_list += (ETH_ALEN);
538 }
539
540
541 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
542 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
543 wrfl();
544 if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211)
545 igb_i21x_hw_doublecheck(hw);
546}
547
548
549
550
551
552
553
554void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
555{
556 rd32(E1000_CRCERRS);
557 rd32(E1000_SYMERRS);
558 rd32(E1000_MPC);
559 rd32(E1000_SCC);
560 rd32(E1000_ECOL);
561 rd32(E1000_MCC);
562 rd32(E1000_LATECOL);
563 rd32(E1000_COLC);
564 rd32(E1000_DC);
565 rd32(E1000_SEC);
566 rd32(E1000_RLEC);
567 rd32(E1000_XONRXC);
568 rd32(E1000_XONTXC);
569 rd32(E1000_XOFFRXC);
570 rd32(E1000_XOFFTXC);
571 rd32(E1000_FCRUC);
572 rd32(E1000_GPRC);
573 rd32(E1000_BPRC);
574 rd32(E1000_MPRC);
575 rd32(E1000_GPTC);
576 rd32(E1000_GORCL);
577 rd32(E1000_GORCH);
578 rd32(E1000_GOTCL);
579 rd32(E1000_GOTCH);
580 rd32(E1000_RNBC);
581 rd32(E1000_RUC);
582 rd32(E1000_RFC);
583 rd32(E1000_ROC);
584 rd32(E1000_RJC);
585 rd32(E1000_TORL);
586 rd32(E1000_TORH);
587 rd32(E1000_TOTL);
588 rd32(E1000_TOTH);
589 rd32(E1000_TPR);
590 rd32(E1000_TPT);
591 rd32(E1000_MPTC);
592 rd32(E1000_BPTC);
593}
594
595
596
597
598
599
600
601
602
603s32 igb_check_for_copper_link(struct e1000_hw *hw)
604{
605 struct e1000_mac_info *mac = &hw->mac;
606 s32 ret_val;
607 bool link;
608
609
610
611
612
613
614 if (!mac->get_link_status) {
615 ret_val = 0;
616 goto out;
617 }
618
619
620
621
622
623 ret_val = igb_phy_has_link(hw, 1, 0, &link);
624 if (ret_val)
625 goto out;
626
627 if (!link)
628 goto out;
629
630 mac->get_link_status = false;
631
632
633
634
635 igb_check_downshift(hw);
636
637
638
639
640 if (!mac->autoneg) {
641 ret_val = -E1000_ERR_CONFIG;
642 goto out;
643 }
644
645
646
647
648
649 igb_config_collision_dist(hw);
650
651
652
653
654
655
656 ret_val = igb_config_fc_after_link_up(hw);
657 if (ret_val)
658 hw_dbg("Error configuring flow control\n");
659
660out:
661 return ret_val;
662}
663
664
665
666
667
668
669
670
671
672
673
674s32 igb_setup_link(struct e1000_hw *hw)
675{
676 s32 ret_val = 0;
677
678
679
680
681 if (igb_check_reset_block(hw))
682 goto out;
683
684
685
686
687 if (hw->fc.requested_mode == e1000_fc_default) {
688 ret_val = igb_set_default_fc(hw);
689 if (ret_val)
690 goto out;
691 }
692
693
694
695
696
697 hw->fc.current_mode = hw->fc.requested_mode;
698
699 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
700
701
702 ret_val = hw->mac.ops.setup_physical_interface(hw);
703 if (ret_val)
704 goto out;
705
706
707
708
709
710
711 hw_dbg("Initializing the Flow Control address, type and timer regs\n");
712 wr32(E1000_FCT, FLOW_CONTROL_TYPE);
713 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
714 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
715
716 wr32(E1000_FCTTV, hw->fc.pause_time);
717
718 igb_set_fc_watermarks(hw);
719
720out:
721
722 return ret_val;
723}
724
725
726
727
728
729
730
731
732
733void igb_config_collision_dist(struct e1000_hw *hw)
734{
735 u32 tctl;
736
737 tctl = rd32(E1000_TCTL);
738
739 tctl &= ~E1000_TCTL_COLD;
740 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
741
742 wr32(E1000_TCTL, tctl);
743 wrfl();
744}
745
746
747
748
749
750
751
752
753
754static void igb_set_fc_watermarks(struct e1000_hw *hw)
755{
756 u32 fcrtl = 0, fcrth = 0;
757
758
759
760
761
762
763
764 if (hw->fc.current_mode & e1000_fc_tx_pause) {
765
766
767
768
769 fcrtl = hw->fc.low_water;
770 if (hw->fc.send_xon)
771 fcrtl |= E1000_FCRTL_XONE;
772
773 fcrth = hw->fc.high_water;
774 }
775 wr32(E1000_FCRTL, fcrtl);
776 wr32(E1000_FCRTH, fcrth);
777}
778
779
780
781
782
783
784
785
786static s32 igb_set_default_fc(struct e1000_hw *hw)
787{
788 s32 ret_val = 0;
789 u16 lan_offset;
790 u16 nvm_data;
791
792
793
794
795
796
797
798
799
800 if (hw->mac.type == e1000_i350)
801 lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
802 else
803 lan_offset = 0;
804
805 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG + lan_offset,
806 1, &nvm_data);
807 if (ret_val) {
808 hw_dbg("NVM Read Error\n");
809 goto out;
810 }
811
812 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
813 hw->fc.requested_mode = e1000_fc_none;
814 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR)
815 hw->fc.requested_mode = e1000_fc_tx_pause;
816 else
817 hw->fc.requested_mode = e1000_fc_full;
818
819out:
820 return ret_val;
821}
822
823
824
825
826
827
828
829
830
831
832
833s32 igb_force_mac_fc(struct e1000_hw *hw)
834{
835 u32 ctrl;
836 s32 ret_val = 0;
837
838 ctrl = rd32(E1000_CTRL);
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857 hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
858
859 switch (hw->fc.current_mode) {
860 case e1000_fc_none:
861 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
862 break;
863 case e1000_fc_rx_pause:
864 ctrl &= (~E1000_CTRL_TFCE);
865 ctrl |= E1000_CTRL_RFCE;
866 break;
867 case e1000_fc_tx_pause:
868 ctrl &= (~E1000_CTRL_RFCE);
869 ctrl |= E1000_CTRL_TFCE;
870 break;
871 case e1000_fc_full:
872 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
873 break;
874 default:
875 hw_dbg("Flow control param set incorrectly\n");
876 ret_val = -E1000_ERR_CONFIG;
877 goto out;
878 }
879
880 wr32(E1000_CTRL, ctrl);
881
882out:
883 return ret_val;
884}
885
886
887
888
889
890
891
892
893
894
895
896s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
897{
898 struct e1000_mac_info *mac = &hw->mac;
899 s32 ret_val = 0;
900 u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
901 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
902 u16 speed, duplex;
903
904
905
906
907
908 if (mac->autoneg_failed) {
909 if (hw->phy.media_type == e1000_media_type_internal_serdes)
910 ret_val = igb_force_mac_fc(hw);
911 } else {
912 if (hw->phy.media_type == e1000_media_type_copper)
913 ret_val = igb_force_mac_fc(hw);
914 }
915
916 if (ret_val) {
917 hw_dbg("Error forcing flow control settings\n");
918 goto out;
919 }
920
921
922
923
924
925
926 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
927
928
929
930
931 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
932 &mii_status_reg);
933 if (ret_val)
934 goto out;
935 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
936 &mii_status_reg);
937 if (ret_val)
938 goto out;
939
940 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
941 hw_dbg("Copper PHY and Auto Neg has not completed.\n");
942 goto out;
943 }
944
945
946
947
948
949
950
951 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
952 &mii_nway_adv_reg);
953 if (ret_val)
954 goto out;
955 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
956 &mii_nway_lp_ability_reg);
957 if (ret_val)
958 goto out;
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
994 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
995
996
997
998
999
1000
1001 if (hw->fc.requested_mode == e1000_fc_full) {
1002 hw->fc.current_mode = e1000_fc_full;
1003 hw_dbg("Flow Control = FULL.\n");
1004 } else {
1005 hw->fc.current_mode = e1000_fc_rx_pause;
1006 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1007 }
1008 }
1009
1010
1011
1012
1013
1014
1015
1016 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1017 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1018 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1019 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1020 hw->fc.current_mode = e1000_fc_tx_pause;
1021 hw_dbg("Flow Control = TX PAUSE frames only.\n");
1022 }
1023
1024
1025
1026
1027
1028
1029
1030 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1031 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1032 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1033 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1034 hw->fc.current_mode = e1000_fc_rx_pause;
1035 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1036 }
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057 else if ((hw->fc.requested_mode == e1000_fc_none) ||
1058 (hw->fc.requested_mode == e1000_fc_tx_pause) ||
1059 (hw->fc.strict_ieee)) {
1060 hw->fc.current_mode = e1000_fc_none;
1061 hw_dbg("Flow Control = NONE.\n");
1062 } else {
1063 hw->fc.current_mode = e1000_fc_rx_pause;
1064 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1065 }
1066
1067
1068
1069
1070
1071 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
1072 if (ret_val) {
1073 hw_dbg("Error getting link speed and duplex\n");
1074 goto out;
1075 }
1076
1077 if (duplex == HALF_DUPLEX)
1078 hw->fc.current_mode = e1000_fc_none;
1079
1080
1081
1082
1083 ret_val = igb_force_mac_fc(hw);
1084 if (ret_val) {
1085 hw_dbg("Error forcing flow control settings\n");
1086 goto out;
1087 }
1088 }
1089
1090
1091
1092
1093
1094 if ((hw->phy.media_type == e1000_media_type_internal_serdes)
1095 && mac->autoneg) {
1096
1097
1098
1099 pcs_status_reg = rd32(E1000_PCS_LSTAT);
1100
1101 if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
1102 hw_dbg("PCS Auto Neg has not completed.\n");
1103 return ret_val;
1104 }
1105
1106
1107
1108
1109
1110
1111
1112 pcs_adv_reg = rd32(E1000_PCS_ANADV);
1113 pcs_lp_ability_reg = rd32(E1000_PCS_LPAB);
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148 if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1149 (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
1150
1151
1152
1153
1154
1155
1156 if (hw->fc.requested_mode == e1000_fc_full) {
1157 hw->fc.current_mode = e1000_fc_full;
1158 hw_dbg("Flow Control = FULL.\n");
1159 } else {
1160 hw->fc.current_mode = e1000_fc_rx_pause;
1161 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1162 }
1163 }
1164
1165
1166
1167
1168
1169
1170
1171 else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
1172 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1173 (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1174 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1175 hw->fc.current_mode = e1000_fc_tx_pause;
1176 hw_dbg("Flow Control = Tx PAUSE frames only.\n");
1177 }
1178
1179
1180
1181
1182
1183
1184
1185 else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1186 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1187 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1188 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1189 hw->fc.current_mode = e1000_fc_rx_pause;
1190 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1191 } else {
1192
1193
1194
1195 hw->fc.current_mode = e1000_fc_none;
1196 hw_dbg("Flow Control = NONE.\n");
1197 }
1198
1199
1200
1201
1202 pcs_ctrl_reg = rd32(E1000_PCS_LCTL);
1203 pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1204 wr32(E1000_PCS_LCTL, pcs_ctrl_reg);
1205
1206 ret_val = igb_force_mac_fc(hw);
1207 if (ret_val) {
1208 hw_dbg("Error forcing flow control settings\n");
1209 return ret_val;
1210 }
1211 }
1212
1213out:
1214 return ret_val;
1215}
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1227 u16 *duplex)
1228{
1229 u32 status;
1230
1231 status = rd32(E1000_STATUS);
1232 if (status & E1000_STATUS_SPEED_1000) {
1233 *speed = SPEED_1000;
1234 hw_dbg("1000 Mbs, ");
1235 } else if (status & E1000_STATUS_SPEED_100) {
1236 *speed = SPEED_100;
1237 hw_dbg("100 Mbs, ");
1238 } else {
1239 *speed = SPEED_10;
1240 hw_dbg("10 Mbs, ");
1241 }
1242
1243 if (status & E1000_STATUS_FD) {
1244 *duplex = FULL_DUPLEX;
1245 hw_dbg("Full Duplex\n");
1246 } else {
1247 *duplex = HALF_DUPLEX;
1248 hw_dbg("Half Duplex\n");
1249 }
1250
1251 return 0;
1252}
1253
1254
1255
1256
1257
1258
1259
1260s32 igb_get_hw_semaphore(struct e1000_hw *hw)
1261{
1262 u32 swsm;
1263 s32 ret_val = 0;
1264 s32 timeout = hw->nvm.word_size + 1;
1265 s32 i = 0;
1266
1267
1268 while (i < timeout) {
1269 swsm = rd32(E1000_SWSM);
1270 if (!(swsm & E1000_SWSM_SMBI))
1271 break;
1272
1273 udelay(50);
1274 i++;
1275 }
1276
1277 if (i == timeout) {
1278 hw_dbg("Driver can't access device - SMBI bit is set.\n");
1279 ret_val = -E1000_ERR_NVM;
1280 goto out;
1281 }
1282
1283
1284 for (i = 0; i < timeout; i++) {
1285 swsm = rd32(E1000_SWSM);
1286 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
1287
1288
1289 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
1290 break;
1291
1292 udelay(50);
1293 }
1294
1295 if (i == timeout) {
1296
1297 igb_put_hw_semaphore(hw);
1298 hw_dbg("Driver can't access the NVM\n");
1299 ret_val = -E1000_ERR_NVM;
1300 goto out;
1301 }
1302
1303out:
1304 return ret_val;
1305}
1306
1307
1308
1309
1310
1311
1312
1313void igb_put_hw_semaphore(struct e1000_hw *hw)
1314{
1315 u32 swsm;
1316
1317 swsm = rd32(E1000_SWSM);
1318
1319 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1320
1321 wr32(E1000_SWSM, swsm);
1322}
1323
1324
1325
1326
1327
1328
1329
1330s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1331{
1332 s32 i = 0;
1333 s32 ret_val = 0;
1334
1335
1336 while (i < AUTO_READ_DONE_TIMEOUT) {
1337 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1338 break;
1339 usleep_range(1000, 2000);
1340 i++;
1341 }
1342
1343 if (i == AUTO_READ_DONE_TIMEOUT) {
1344 hw_dbg("Auto read by HW from NVM has not completed.\n");
1345 ret_val = -E1000_ERR_RESET;
1346 goto out;
1347 }
1348
1349out:
1350 return ret_val;
1351}
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1362{
1363 s32 ret_val;
1364
1365 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1366 if (ret_val) {
1367 hw_dbg("NVM Read Error\n");
1368 goto out;
1369 }
1370
1371 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1372 switch (hw->phy.media_type) {
1373 case e1000_media_type_internal_serdes:
1374 *data = ID_LED_DEFAULT_82575_SERDES;
1375 break;
1376 case e1000_media_type_copper:
1377 default:
1378 *data = ID_LED_DEFAULT;
1379 break;
1380 }
1381 }
1382out:
1383 return ret_val;
1384}
1385
1386
1387
1388
1389
1390
1391s32 igb_id_led_init(struct e1000_hw *hw)
1392{
1393 struct e1000_mac_info *mac = &hw->mac;
1394 s32 ret_val;
1395 const u32 ledctl_mask = 0x000000FF;
1396 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1397 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1398 u16 data, i, temp;
1399 const u16 led_mask = 0x0F;
1400
1401
1402 if ((hw->mac.type == e1000_i210) ||
1403 (hw->mac.type == e1000_i211))
1404 ret_val = igb_valid_led_default_i210(hw, &data);
1405 else
1406 ret_val = igb_valid_led_default(hw, &data);
1407
1408 if (ret_val)
1409 goto out;
1410
1411 mac->ledctl_default = rd32(E1000_LEDCTL);
1412 mac->ledctl_mode1 = mac->ledctl_default;
1413 mac->ledctl_mode2 = mac->ledctl_default;
1414
1415 for (i = 0; i < 4; i++) {
1416 temp = (data >> (i << 2)) & led_mask;
1417 switch (temp) {
1418 case ID_LED_ON1_DEF2:
1419 case ID_LED_ON1_ON2:
1420 case ID_LED_ON1_OFF2:
1421 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1422 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1423 break;
1424 case ID_LED_OFF1_DEF2:
1425 case ID_LED_OFF1_ON2:
1426 case ID_LED_OFF1_OFF2:
1427 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1428 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1429 break;
1430 default:
1431
1432 break;
1433 }
1434 switch (temp) {
1435 case ID_LED_DEF1_ON2:
1436 case ID_LED_ON1_ON2:
1437 case ID_LED_OFF1_ON2:
1438 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1439 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1440 break;
1441 case ID_LED_DEF1_OFF2:
1442 case ID_LED_ON1_OFF2:
1443 case ID_LED_OFF1_OFF2:
1444 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1445 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1446 break;
1447 default:
1448
1449 break;
1450 }
1451 }
1452
1453out:
1454 return ret_val;
1455}
1456
1457
1458
1459
1460
1461
1462
1463
1464s32 igb_cleanup_led(struct e1000_hw *hw)
1465{
1466 wr32(E1000_LEDCTL, hw->mac.ledctl_default);
1467 return 0;
1468}
1469
1470
1471
1472
1473
1474
1475
1476s32 igb_blink_led(struct e1000_hw *hw)
1477{
1478 u32 ledctl_blink = 0;
1479 u32 i;
1480
1481 if (hw->phy.media_type == e1000_media_type_fiber) {
1482
1483 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1484 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1485 } else {
1486
1487
1488
1489
1490
1491
1492 ledctl_blink = hw->mac.ledctl_mode2;
1493 for (i = 0; i < 32; i += 8) {
1494 u32 mode = (hw->mac.ledctl_mode2 >> i) &
1495 E1000_LEDCTL_LED0_MODE_MASK;
1496 u32 led_default = hw->mac.ledctl_default >> i;
1497
1498 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
1499 (mode == E1000_LEDCTL_MODE_LED_ON)) ||
1500 ((led_default & E1000_LEDCTL_LED0_IVRT) &&
1501 (mode == E1000_LEDCTL_MODE_LED_OFF))) {
1502 ledctl_blink &=
1503 ~(E1000_LEDCTL_LED0_MODE_MASK << i);
1504 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
1505 E1000_LEDCTL_MODE_LED_ON) << i;
1506 }
1507 }
1508 }
1509
1510 wr32(E1000_LEDCTL, ledctl_blink);
1511
1512 return 0;
1513}
1514
1515
1516
1517
1518
1519
1520
1521s32 igb_led_off(struct e1000_hw *hw)
1522{
1523 switch (hw->phy.media_type) {
1524 case e1000_media_type_copper:
1525 wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
1526 break;
1527 default:
1528 break;
1529 }
1530
1531 return 0;
1532}
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545s32 igb_disable_pcie_master(struct e1000_hw *hw)
1546{
1547 u32 ctrl;
1548 s32 timeout = MASTER_DISABLE_TIMEOUT;
1549 s32 ret_val = 0;
1550
1551 if (hw->bus.type != e1000_bus_type_pci_express)
1552 goto out;
1553
1554 ctrl = rd32(E1000_CTRL);
1555 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1556 wr32(E1000_CTRL, ctrl);
1557
1558 while (timeout) {
1559 if (!(rd32(E1000_STATUS) &
1560 E1000_STATUS_GIO_MASTER_ENABLE))
1561 break;
1562 udelay(100);
1563 timeout--;
1564 }
1565
1566 if (!timeout) {
1567 hw_dbg("Master requests are pending.\n");
1568 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
1569 goto out;
1570 }
1571
1572out:
1573 return ret_val;
1574}
1575
1576
1577
1578
1579
1580
1581
1582
1583s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1584{
1585 s32 ret_val = 0;
1586
1587
1588 if (hw->mac.type >= e1000_82580)
1589 goto out;
1590
1591 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1592 hw_dbg("Invalid MDI setting detected\n");
1593 hw->phy.mdix = 1;
1594 ret_val = -E1000_ERR_CONFIG;
1595 goto out;
1596 }
1597
1598out:
1599 return ret_val;
1600}
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
1614 u32 offset, u8 data)
1615{
1616 u32 i, regvalue = 0;
1617 s32 ret_val = 0;
1618
1619
1620 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
1621 wr32(reg, regvalue);
1622
1623
1624 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
1625 udelay(5);
1626 regvalue = rd32(reg);
1627 if (regvalue & E1000_GEN_CTL_READY)
1628 break;
1629 }
1630 if (!(regvalue & E1000_GEN_CTL_READY)) {
1631 hw_dbg("Reg %08x did not indicate ready\n", reg);
1632 ret_val = -E1000_ERR_PHY;
1633 goto out;
1634 }
1635
1636out:
1637 return ret_val;
1638}
1639
1640
1641
1642
1643
1644
1645
1646
1647bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1648{
1649 u32 manc;
1650 u32 fwsm, factps;
1651 bool ret_val = false;
1652
1653 if (!hw->mac.asf_firmware_present)
1654 goto out;
1655
1656 manc = rd32(E1000_MANC);
1657
1658 if (!(manc & E1000_MANC_RCV_TCO_EN))
1659 goto out;
1660
1661 if (hw->mac.arc_subsystem_valid) {
1662 fwsm = rd32(E1000_FWSM);
1663 factps = rd32(E1000_FACTPS);
1664
1665 if (!(factps & E1000_FACTPS_MNGCG) &&
1666 ((fwsm & E1000_FWSM_MODE_MASK) ==
1667 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
1668 ret_val = true;
1669 goto out;
1670 }
1671 } else {
1672 if ((manc & E1000_MANC_SMBUS_EN) &&
1673 !(manc & E1000_MANC_ASF_EN)) {
1674 ret_val = true;
1675 goto out;
1676 }
1677 }
1678
1679out:
1680 return ret_val;
1681}
1682