1
2
3
4
5
6
7
8
9
10#ifndef __ASM_ARM_CHECKSUM_H
11#define __ASM_ARM_CHECKSUM_H
12
13#include <linux/in6.h>
14
15
16
17
18
19
20
21
22
23
24
25
26
27__wsum csum_partial(const void *buff, int len, __wsum sum);
28
29
30
31
32
33
34
35
36
37__wsum
38csum_partial_copy_nocheck(const void *src, void *dst, int len);
39
40__wsum
41csum_partial_copy_from_user(const void __user *src, void *dst, int len);
42
43#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
44#define _HAVE_ARCH_CSUM_AND_COPY
45static inline
46__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
47{
48 if (!access_ok(src, len))
49 return 0;
50
51 return csum_partial_copy_from_user(src, dst, len);
52}
53
54
55
56
57static inline __sum16 csum_fold(__wsum sum)
58{
59 __asm__(
60 "add %0, %1, %1, ror #16 @ csum_fold"
61 : "=r" (sum)
62 : "r" (sum)
63 : "cc");
64 return (__force __sum16)(~(__force u32)sum >> 16);
65}
66
67
68
69
70
71static inline __sum16
72ip_fast_csum(const void *iph, unsigned int ihl)
73{
74 unsigned int tmp1;
75 __wsum sum;
76
77 __asm__ __volatile__(
78 "ldr %0, [%1], #4 @ ip_fast_csum \n\
79 ldr %3, [%1], #4 \n\
80 sub %2, %2, #5 \n\
81 adds %0, %0, %3 \n\
82 ldr %3, [%1], #4 \n\
83 adcs %0, %0, %3 \n\
84 ldr %3, [%1], #4 \n\
851: adcs %0, %0, %3 \n\
86 ldr %3, [%1], #4 \n\
87 tst %2, #15 @ do this carefully \n\
88 subne %2, %2, #1 @ without destroying \n\
89 bne 1b @ the carry flag \n\
90 adcs %0, %0, %3 \n\
91 adc %0, %0, #0"
92 : "=r" (sum), "=r" (iph), "=r" (ihl), "=r" (tmp1)
93 : "1" (iph), "2" (ihl)
94 : "cc", "memory");
95 return csum_fold(sum);
96}
97
98static inline __wsum
99csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
100 __u8 proto, __wsum sum)
101{
102 u32 lenprot = len + proto;
103 if (__builtin_constant_p(sum) && sum == 0) {
104 __asm__(
105 "adds %0, %1, %2 @ csum_tcpudp_nofold0 \n\t"
106#ifdef __ARMEB__
107 "adcs %0, %0, %3 \n\t"
108#else
109 "adcs %0, %0, %3, ror #8 \n\t"
110#endif
111 "adc %0, %0, #0"
112 : "=&r" (sum)
113 : "r" (daddr), "r" (saddr), "r" (lenprot)
114 : "cc");
115 } else {
116 __asm__(
117 "adds %0, %1, %2 @ csum_tcpudp_nofold \n\t"
118 "adcs %0, %0, %3 \n\t"
119#ifdef __ARMEB__
120 "adcs %0, %0, %4 \n\t"
121#else
122 "adcs %0, %0, %4, ror #8 \n\t"
123#endif
124 "adc %0, %0, #0"
125 : "=&r"(sum)
126 : "r" (sum), "r" (daddr), "r" (saddr), "r" (lenprot)
127 : "cc");
128 }
129 return sum;
130}
131
132
133
134
135static inline __sum16
136csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
137 __u8 proto, __wsum sum)
138{
139 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
140}
141
142
143
144
145
146
147static inline __sum16
148ip_compute_csum(const void *buff, int len)
149{
150 return csum_fold(csum_partial(buff, len, 0));
151}
152
153#define _HAVE_ARCH_IPV6_CSUM
154extern __wsum
155__csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __be32 len,
156 __be32 proto, __wsum sum);
157
158static inline __sum16
159csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
160 __u32 len, __u8 proto, __wsum sum)
161{
162 return csum_fold(__csum_ipv6_magic(saddr, daddr, htonl(len),
163 htonl(proto), sum));
164}
165#endif
166