16a4e02ccec34600e2166c9edbfc87204db3d718
[reactos.git] / reactos / drivers / lib / ip / network / i386 / checksum.S
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * IP/TCP/UDP checksumming routines
7 *
8 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
9 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
10 * Tom May, <ftom@netcom.com>
11 * Pentium Pro/II routines:
12 * Alexander Kjeldaas <astor@guardian.no>
13 * Finn Arne Gangstad <finnag@guardian.no>
14 * Lots of code moved from tcp.c and ip.c; see those files
15 * for more names.
16 *
17 * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception
18 * handling.
19 * Andi Kleen, add zeroing on error
20 * converted to pure assembler
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 */
27
28 /*
29 * computes a partial checksum, e.g. for TCP/UDP fragments
30 */
31
32 /*
33 unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
34 */
35
36 .text
37 .align 4
38 .globl _csum_partial
39
40 #ifndef CONFIG_X86_USE_PPRO_CHECKSUM
41
42 /*
43 * Experiments with Ethernet and SLIP connections show that buff
44 * is aligned on either a 2-byte or 4-byte boundary. We get at
45 * least a twofold speedup on 486 and Pentium if it is 4-byte aligned.
46 * Fortunately, it is easy to convert 2-byte alignment to 4-byte
47 * alignment for the unrolled loop.
48 */
49 _csum_partial:
50 pushl %esi
51 pushl %ebx
52 movl 20(%esp),%eax # Function arg: unsigned int sum
53 movl 16(%esp),%ecx # Function arg: int len
54 movl 12(%esp),%esi # Function arg: unsigned char *buff
55 testl $3, %esi # Check alignment.
56 jz 2f # Jump if alignment is ok.
57 testl $1, %esi # Check alignment.
58 jz 10f # Jump if alignment is boundary of 2bytes.
59
60 // buf is odd
61 dec %ecx
62 jl 8f
63 movzbl (%esi), %ebx
64 adcl %ebx, %eax
65 roll $8, %eax
66 inc %esi
67 testl $2, %esi
68 jz 2f
69 10:
70 subl $2, %ecx # Alignment uses up two bytes.
71 jae 1f # Jump if we had at least two bytes.
72 addl $2, %ecx # ecx was < 2. Deal with it.
73 jmp 4f
74 1: movw (%esi), %bx
75 addl $2, %esi
76 addw %bx, %ax
77 adcl $0, %eax
78 2:
79 movl %ecx, %edx
80 shrl $5, %ecx
81 jz 2f
82 testl %esi, %esi
83 1: movl (%esi), %ebx
84 adcl %ebx, %eax
85 movl 4(%esi), %ebx
86 adcl %ebx, %eax
87 movl 8(%esi), %ebx
88 adcl %ebx, %eax
89 movl 12(%esi), %ebx
90 adcl %ebx, %eax
91 movl 16(%esi), %ebx
92 adcl %ebx, %eax
93 movl 20(%esi), %ebx
94 adcl %ebx, %eax
95 movl 24(%esi), %ebx
96 adcl %ebx, %eax
97 movl 28(%esi), %ebx
98 adcl %ebx, %eax
99 lea 32(%esi), %esi
100 dec %ecx
101 jne 1b
102 adcl $0, %eax
103 2: movl %edx, %ecx
104 andl $0x1c, %edx
105 je 4f
106 shrl $2, %edx # This clears CF
107 3: adcl (%esi), %eax
108 lea 4(%esi), %esi
109 dec %edx
110 jne 3b
111 adcl $0, %eax
112 4: andl $3, %ecx
113 jz 7f
114 cmpl $2, %ecx
115 jb 5f
116 movw (%esi),%cx
117 leal 2(%esi),%esi
118 je 6f
119 shll $16,%ecx
120 5: movb (%esi),%cl
121 6: addl %ecx,%eax
122 adcl $0, %eax
123 7:
124 testl $1, 12(%esp)
125 jz 8f
126 roll $8, %eax
127 8:
128 popl %ebx
129 popl %esi
130 ret
131
132 #else
133
134 /* Version for PentiumII/PPro */
135
136 csum_partial:
137 pushl %esi
138 pushl %ebx
139 movl 20(%esp),%eax # Function arg: unsigned int sum
140 movl 16(%esp),%ecx # Function arg: int len
141 movl 12(%esp),%esi # Function arg: const unsigned char *buf
142
143 testl $3, %esi
144 jnz 25f
145 10:
146 movl %ecx, %edx
147 movl %ecx, %ebx
148 andl $0x7c, %ebx
149 shrl $7, %ecx
150 addl %ebx,%esi
151 shrl $2, %ebx
152 negl %ebx
153 lea 45f(%ebx,%ebx,2), %ebx
154 testl %esi, %esi
155 jmp *%ebx
156
157 # Handle 2-byte-aligned regions
158 20: addw (%esi), %ax
159 lea 2(%esi), %esi
160 adcl $0, %eax
161 jmp 10b
162 25:
163 testl $1, %esi
164 jz 30f
165 # buf is odd
166 dec %ecx
167 jl 90f
168 movzbl (%esi), %ebx
169 addl %ebx, %eax
170 adcl $0, %eax
171 roll $8, %eax
172 inc %esi
173 testl $2, %esi
174 jz 10b
175
176 30: subl $2, %ecx
177 ja 20b
178 je 32f
179 addl $2, %ecx
180 jz 80f
181 movzbl (%esi),%ebx # csumming 1 byte, 2-aligned
182 addl %ebx, %eax
183 adcl $0, %eax
184 jmp 80f
185 32:
186 addw (%esi), %ax # csumming 2 bytes, 2-aligned
187 adcl $0, %eax
188 jmp 80f
189
190 40:
191 addl -128(%esi), %eax
192 adcl -124(%esi), %eax
193 adcl -120(%esi), %eax
194 adcl -116(%esi), %eax
195 adcl -112(%esi), %eax
196 adcl -108(%esi), %eax
197 adcl -104(%esi), %eax
198 adcl -100(%esi), %eax
199 adcl -96(%esi), %eax
200 adcl -92(%esi), %eax
201 adcl -88(%esi), %eax
202 adcl -84(%esi), %eax
203 adcl -80(%esi), %eax
204 adcl -76(%esi), %eax
205 adcl -72(%esi), %eax
206 adcl -68(%esi), %eax
207 adcl -64(%esi), %eax
208 adcl -60(%esi), %eax
209 adcl -56(%esi), %eax
210 adcl -52(%esi), %eax
211 adcl -48(%esi), %eax
212 adcl -44(%esi), %eax
213 adcl -40(%esi), %eax
214 adcl -36(%esi), %eax
215 adcl -32(%esi), %eax
216 adcl -28(%esi), %eax
217 adcl -24(%esi), %eax
218 adcl -20(%esi), %eax
219 adcl -16(%esi), %eax
220 adcl -12(%esi), %eax
221 adcl -8(%esi), %eax
222 adcl -4(%esi), %eax
223 45:
224 lea 128(%esi), %esi
225 adcl $0, %eax
226 dec %ecx
227 jge 40b
228 movl %edx, %ecx
229 50: andl $3, %ecx
230 jz 80f
231
232 # Handle the last 1-3 bytes without jumping
233 notl %ecx # 1->2, 2->1, 3->0, higher bits are masked
234 movl $0xffffff,%ebx # by the shll and shrl instructions
235 shll $3,%ecx
236 shrl %cl,%ebx
237 andl -128(%esi),%ebx # esi is 4-aligned so should be ok
238 addl %ebx,%eax
239 adcl $0,%eax
240 80:
241 testl $1, 12(%esp)
242 jz 90f
243 roll $8, %eax
244 90:
245 popl %ebx
246 popl %esi
247 ret
248
249 #endif