1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#ifndef _OCTEON_MAIN_H_
24#define _OCTEON_MAIN_H_
25
26#include <linux/sched/signal.h>
27
28#if BITS_PER_LONG == 32
29#define CVM_CAST64(v) ((long long)(v))
30#elif BITS_PER_LONG == 64
31#define CVM_CAST64(v) ((long long)(long)(v))
32#else
33#error "Unknown system architecture"
34#endif
35
36#define DRV_NAME "LiquidIO"
37
38struct octeon_device_priv {
39
40 struct tasklet_struct droq_tasklet;
41 unsigned long napi_mask;
42 struct octeon_device *dev;
43};
44
45
46
47
48
49struct octnet_buf_free_info {
50
51 struct lio *lio;
52
53
54 struct sk_buff *skb;
55
56
57 struct octnic_gather *g;
58
59
60 u64 dptr;
61
62
63 struct octeon_soft_command *sc;
64};
65
66
67int octeon_report_sent_bytes_to_bql(void *buf, int reqtype);
68void octeon_update_tx_completion_counters(void *buf, int reqtype,
69 unsigned int *pkts_compl,
70 unsigned int *bytes_compl);
71void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
72 unsigned int bytes_compl);
73void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac);
74
75void octeon_schedule_rxq_oom_work(struct octeon_device *oct,
76 struct octeon_droq *droq);
77
78
79static inline void octeon_swap_8B_data(u64 *data, u32 blocks)
80{
81 while (blocks) {
82 cpu_to_be64s(data);
83 blocks--;
84 data++;
85 }
86}
87
88
89
90
91
92
93static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx)
94{
95 dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n",
96 baridx);
97
98 if (oct->mmio[baridx].done)
99 iounmap(oct->mmio[baridx].hw_addr);
100
101 if (oct->mmio[baridx].start)
102 pci_release_region(oct->pci_dev, baridx * 2);
103}
104
105
106
107
108
109
110
111static inline int octeon_map_pci_barx(struct octeon_device *oct,
112 int baridx, int max_map_len)
113{
114 u32 mapped_len = 0;
115
116 if (pci_request_region(oct->pci_dev, baridx * 2, DRV_NAME)) {
117 dev_err(&oct->pci_dev->dev, "pci_request_region failed for bar %d\n",
118 baridx);
119 return 1;
120 }
121
122 oct->mmio[baridx].start = pci_resource_start(oct->pci_dev, baridx * 2);
123 oct->mmio[baridx].len = pci_resource_len(oct->pci_dev, baridx * 2);
124
125 mapped_len = oct->mmio[baridx].len;
126 if (!mapped_len)
127 goto err_release_region;
128
129 if (max_map_len && (mapped_len > max_map_len))
130 mapped_len = max_map_len;
131
132 oct->mmio[baridx].hw_addr =
133 ioremap(oct->mmio[baridx].start, mapped_len);
134 oct->mmio[baridx].mapped_len = mapped_len;
135
136 dev_dbg(&oct->pci_dev->dev, "BAR%d start: 0x%llx mapped %u of %u bytes\n",
137 baridx, oct->mmio[baridx].start, mapped_len,
138 oct->mmio[baridx].len);
139
140 if (!oct->mmio[baridx].hw_addr) {
141 dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n",
142 baridx);
143 goto err_release_region;
144 }
145 oct->mmio[baridx].done = 1;
146
147 return 0;
148
149err_release_region:
150 pci_release_region(oct->pci_dev, baridx * 2);
151 return 1;
152}
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184static inline int
185wait_for_sc_completion_timeout(struct octeon_device *oct_dev,
186 struct octeon_soft_command *sc,
187 unsigned long timeout)
188{
189 int errno = 0;
190 long timeout_jiff;
191
192 if (timeout)
193 timeout_jiff = msecs_to_jiffies(timeout);
194 else
195 timeout_jiff = MAX_SCHEDULE_TIMEOUT;
196
197 timeout_jiff =
198 wait_for_completion_interruptible_timeout(&sc->complete,
199 timeout_jiff);
200 if (timeout_jiff == 0) {
201 dev_err(&oct_dev->pci_dev->dev, "%s: sc is timeout\n",
202 __func__);
203 WRITE_ONCE(sc->caller_is_done, true);
204 errno = -ETIME;
205 } else if (timeout_jiff == -ERESTARTSYS) {
206 dev_err(&oct_dev->pci_dev->dev, "%s: sc is interrupted\n",
207 __func__);
208 WRITE_ONCE(sc->caller_is_done, true);
209 errno = -EINTR;
210 } else if (sc->sc_status == OCTEON_REQUEST_TIMEOUT) {
211 dev_err(&oct_dev->pci_dev->dev, "%s: sc has fatal timeout\n",
212 __func__);
213 WRITE_ONCE(sc->caller_is_done, true);
214 errno = -EBUSY;
215 }
216
217 return errno;
218}
219
220#ifndef ROUNDUP4
221#define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
222#endif
223
224#ifndef ROUNDUP8
225#define ROUNDUP8(val) (((val) + 7) & 0xfffffff8)
226#endif
227
228#ifndef ROUNDUP16
229#define ROUNDUP16(val) (((val) + 15) & 0xfffffff0)
230#endif
231
232#ifndef ROUNDUP128
233#define ROUNDUP128(val) (((val) + 127) & 0xffffff80)
234#endif
235
236#endif
237