1
2#ifndef __ASM_SH_CHECKSUM_H
3#define __ASM_SH_CHECKSUM_H
4
5
6
7
8
9#include <linux/in6.h>
10
11
12
13
14
15
16
17
18
19
20
21
22
23asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
24
25
26
27
28
29
30
31
32
33asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
34
35#define _HAVE_ARCH_CSUM_AND_COPY
36
37
38
39
40
41
42
43static inline
44__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
45{
46 return csum_partial_copy_generic(src, dst, len);
47}
48
49#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
50static inline
51__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
52{
53 if (!access_ok(src, len))
54 return 0;
55 return csum_partial_copy_generic((__force const void *)src, dst, len);
56}
57
58
59
60
61
62static inline __sum16 csum_fold(__wsum sum)
63{
64 unsigned int __dummy;
65 __asm__("swap.w %0, %1\n\t"
66 "extu.w %0, %0\n\t"
67 "extu.w %1, %1\n\t"
68 "add %1, %0\n\t"
69 "swap.w %0, %1\n\t"
70 "add %1, %0\n\t"
71 "not %0, %0\n\t"
72 : "=r" (sum), "=&r" (__dummy)
73 : "0" (sum)
74 : "t");
75 return (__force __sum16)sum;
76}
77
78
79
80
81
82
83
84
85static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
86{
87 unsigned int sum, __dummy0, __dummy1;
88
89 __asm__ __volatile__(
90 "mov.l @%1+, %0\n\t"
91 "mov.l @%1+, %3\n\t"
92 "add #-2, %2\n\t"
93 "clrt\n\t"
94 "1:\t"
95 "addc %3, %0\n\t"
96 "movt %4\n\t"
97 "mov.l @%1+, %3\n\t"
98 "dt %2\n\t"
99 "bf/s 1b\n\t"
100 " cmp/eq #1, %4\n\t"
101 "addc %3, %0\n\t"
102 "addc %2, %0"
103
104
105
106 : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (__dummy0), "=&z" (__dummy1)
107 : "1" (iph), "2" (ihl)
108 : "t", "memory");
109
110 return csum_fold(sum);
111}
112
113static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
114 __u32 len, __u8 proto,
115 __wsum sum)
116{
117#ifdef __LITTLE_ENDIAN__
118 unsigned long len_proto = (proto + len) << 8;
119#else
120 unsigned long len_proto = proto + len;
121#endif
122 __asm__("clrt\n\t"
123 "addc %0, %1\n\t"
124 "addc %2, %1\n\t"
125 "addc %3, %1\n\t"
126 "movt %0\n\t"
127 "add %1, %0"
128 : "=r" (sum), "=r" (len_proto)
129 : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum)
130 : "t");
131
132 return sum;
133}
134
135
136
137
138
139static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
140 __u32 len, __u8 proto,
141 __wsum sum)
142{
143 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
144}
145
146
147
148
149
150static inline __sum16 ip_compute_csum(const void *buff, int len)
151{
152 return csum_fold(csum_partial(buff, len, 0));
153}
154
155#define _HAVE_ARCH_IPV6_CSUM
156static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
157 const struct in6_addr *daddr,
158 __u32 len, __u8 proto, __wsum sum)
159{
160 unsigned int __dummy;
161 __asm__("clrt\n\t"
162 "mov.l @(0,%2), %1\n\t"
163 "addc %1, %0\n\t"
164 "mov.l @(4,%2), %1\n\t"
165 "addc %1, %0\n\t"
166 "mov.l @(8,%2), %1\n\t"
167 "addc %1, %0\n\t"
168 "mov.l @(12,%2), %1\n\t"
169 "addc %1, %0\n\t"
170 "mov.l @(0,%3), %1\n\t"
171 "addc %1, %0\n\t"
172 "mov.l @(4,%3), %1\n\t"
173 "addc %1, %0\n\t"
174 "mov.l @(8,%3), %1\n\t"
175 "addc %1, %0\n\t"
176 "mov.l @(12,%3), %1\n\t"
177 "addc %1, %0\n\t"
178 "addc %4, %0\n\t"
179 "addc %5, %0\n\t"
180 "movt %1\n\t"
181 "add %1, %0\n"
182 : "=r" (sum), "=&r" (__dummy)
183 : "r" (saddr), "r" (daddr),
184 "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
185 : "t");
186
187 return csum_fold(sum);
188}
189
190
191
192
193#define HAVE_CSUM_COPY_USER
194static inline __wsum csum_and_copy_to_user(const void *src,
195 void __user *dst,
196 int len)
197{
198 if (!access_ok(dst, len))
199 return 0;
200 return csum_partial_copy_generic((__force const void *)src, dst, len);
201}
202#endif
203