Annotation of OpenXM_contrib/gmp/mpn/x86/k6/k62mmx/rshift.asm, Revision 1.1.1.2
1.1 maekawa 1: dnl AMD K6-2 mpn_rshift -- mpn right shift.
2:
1.1.1.2 ! ohara 3: dnl Copyright 1999, 2000, 2002 Free Software Foundation, Inc.
1.1 maekawa 4: dnl
5: dnl This file is part of the GNU MP Library.
6: dnl
7: dnl The GNU MP Library is free software; you can redistribute it and/or
8: dnl modify it under the terms of the GNU Lesser General Public License as
9: dnl published by the Free Software Foundation; either version 2.1 of the
10: dnl License, or (at your option) any later version.
11: dnl
12: dnl The GNU MP Library is distributed in the hope that it will be useful,
13: dnl but WITHOUT ANY WARRANTY; without even the implied warranty of
14: dnl MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15: dnl Lesser General Public License for more details.
16: dnl
17: dnl You should have received a copy of the GNU Lesser General Public
18: dnl License along with the GNU MP Library; see the file COPYING.LIB. If
19: dnl not, write to the Free Software Foundation, Inc., 59 Temple Place -
20: dnl Suite 330, Boston, MA 02111-1307, USA.
21:
22: include(`../config.m4')
23:
24:
1.1.1.2 ! ohara 25: C K6-2: 1.75 cycles/limb
! 26:
! 27:
1.1 maekawa 28: C mp_limb_t mpn_rshift (mp_ptr dst, mp_srcptr src, mp_size_t size,
29: C unsigned shift);
30: C
31:
32: defframe(PARAM_SHIFT,16)
33: defframe(PARAM_SIZE, 12)
34: defframe(PARAM_SRC, 8)
35: defframe(PARAM_DST, 4)
36: deflit(`FRAME',0)
37:
38: dnl Minimum 9, because the unrolled loop can't handle less.
39: dnl
40: deflit(UNROLL_THRESHOLD, 9)
41:
1.1.1.2 ! ohara 42: TEXT
1.1 maekawa 43: ALIGN(32)
44:
45: PROLOGUE(mpn_rshift)
46: deflit(`FRAME',0)
47:
48: C The 1 limb case can be done without the push %ebx, but it's then
49: C still the same speed. The push is left as a free helping hand for
50: C the two_or_more code.
51:
52: movl PARAM_SIZE, %eax
53: pushl %ebx FRAME_pushl()
54:
55: movl PARAM_SRC, %ebx
56: decl %eax
57:
58: movl PARAM_SHIFT, %ecx
59: jnz L(two_or_more)
60:
61: movl (%ebx), %edx C src limb
62: movl PARAM_DST, %ebx
63:
64: shrdl( %cl, %edx, %eax) C return value
65:
66: shrl %cl, %edx
67:
68: movl %edx, (%ebx) C dst limb
69: popl %ebx
70:
71: ret
72:
73:
74: C -----------------------------------------------------------------------------
75: ALIGN(16) C avoid offset 0x1f
76: L(two_or_more):
77: C eax size-1
78: C ebx src
79: C ecx shift
80: C edx
81:
82: movl (%ebx), %edx C src low limb
83: negl %ecx
84:
85: addl $32, %ecx
86: movd PARAM_SHIFT, %mm6
87:
88: shll %cl, %edx
89: cmpl $UNROLL_THRESHOLD-1, %eax
90:
91: jae L(unroll)
92:
93:
94: C eax size-1
95: C ebx src
96: C ecx 32-shift
97: C edx retval
98: C
99: C mm6 shift
100:
101: movl PARAM_DST, %ecx
102: leal (%ebx,%eax,4), %ebx
103:
104: leal -4(%ecx,%eax,4), %ecx
105: negl %eax
106:
107: C This loop runs at about 3 cycles/limb, which is the amount of
108: C decoding, and this is despite every second access being unaligned.
109:
110: L(simple):
111: C eax counter, -(size-1) to -1
112: C ebx &src[size-1]
113: C ecx &dst[size-1]
114: C edx retval
115: C
116: C mm0 scratch
117: C mm6 shift
118:
119: Zdisp( movq, 0,(%ebx,%eax,4), %mm0)
120: incl %eax
121:
122: psrlq %mm6, %mm0
123:
124: Zdisp( movd, %mm0, 0,(%ecx,%eax,4))
125: jnz L(simple)
126:
127:
128: movq %mm0, (%ecx)
129: movl %edx, %eax
130:
131: popl %ebx
132:
133: femms
134: ret
135:
136:
137: C -----------------------------------------------------------------------------
138: ALIGN(16)
139: L(unroll):
140: C eax size-1
141: C ebx src
142: C ecx 32-shift
143: C edx retval
144: C
145: C mm6 shift
146:
147: addl $32, %ecx
148: subl $7, %eax C size-8
149:
150: movd %ecx, %mm7
151: movl PARAM_DST, %ecx
152:
153: movq (%ebx), %mm2 C src low qword
154: leal (%ebx,%eax,4), %ebx C src end - 32
155:
156: testb $4, %cl
157: leal (%ecx,%eax,4), %ecx C dst end - 32
158:
159: notl %eax C -(size-7)
160: jz L(dst_aligned)
161:
162: psrlq %mm6, %mm2
163: incl %eax
164:
165: Zdisp( movd, %mm2, 0,(%ecx,%eax,4)) C dst low limb
166: movq 4(%ebx,%eax,4), %mm2 C new src low qword
167: L(dst_aligned):
168:
169: movq 12(%ebx,%eax,4), %mm0 C src second lowest qword
170: nop C avoid bad cache line crossing
171:
172:
173: C This loop is the important bit, the rest is just support for it.
174: C Four src limbs are held at the start, and four more will be read.
175: C Four dst limbs will be written. This schedule seems necessary for
176: C full speed.
177: C
178: C The use of -(size-7) lets the loop stop when %eax becomes >= 0 and
179: C and leaves 0 to 3 which can be tested with test $1 and $2.
180:
181: L(top):
182: C eax counter, -(size-7) step by +4 until >=0
183: C ebx src end - 32
184: C ecx dst end - 32
185: C edx retval
186: C
187: C mm0 src next qword
188: C mm1 scratch
189: C mm2 src prev qword
190: C mm6 shift
191: C mm7 64-shift
192:
193: psrlq %mm6, %mm2
194: addl $4, %eax
195:
196: movq %mm0, %mm1
197: psllq %mm7, %mm0
198:
199: por %mm0, %mm2
200: movq 4(%ebx,%eax,4), %mm0
201:
202: psrlq %mm6, %mm1
203: movq %mm2, -12(%ecx,%eax,4)
204:
205: movq %mm0, %mm2
206: psllq %mm7, %mm0
207:
208: por %mm0, %mm1
209: movq 12(%ebx,%eax,4), %mm0
210:
211: movq %mm1, -4(%ecx,%eax,4)
212: ja L(top) C jump if no carry and not zero
213:
214:
215:
216: C Now have the four limbs in mm2 (low) and mm0 (high), and %eax is 0
217: C to 3 representing respectively 3 to 0 further limbs.
218:
219: testl $2, %eax C testl to avoid bad cache line crossings
220: jnz L(finish_nottwo)
221:
222: C Two or three extra limbs: rshift mm2, OR it with lshifted mm0, mm0
223: C becomes new mm2 and a new mm0 is loaded.
224:
225: psrlq %mm6, %mm2
226: movq %mm0, %mm1
227:
228: psllq %mm7, %mm0
229: addl $2, %eax
230:
231: por %mm0, %mm2
232: movq 12(%ebx,%eax,4), %mm0
233:
234: movq %mm2, -4(%ecx,%eax,4)
235: movq %mm1, %mm2
236: L(finish_nottwo):
237:
238:
239: testb $1, %al
240: psrlq %mm6, %mm2
241:
242: movq %mm0, %mm1
243: psllq %mm7, %mm0
244:
245: por %mm0, %mm2
246: psrlq %mm6, %mm1
247:
248: movq %mm2, 4(%ecx,%eax,4)
249: jnz L(finish_even)
250:
251:
252: C one further extra limb to process
253:
254: movd 32-4(%ebx), %mm0 C src[size-1], most significant limb
255: popl %ebx
256:
257: movq %mm0, %mm2
258: psllq %mm7, %mm0
259:
260: por %mm0, %mm1
261: psrlq %mm6, %mm2
262:
263: movq %mm1, 32-12(%ecx) C dst[size-3,size-2]
264: movd %mm2, 32-4(%ecx) C dst[size-1]
265:
266: movl %edx, %eax C retval
267:
268: femms
269: ret
270:
271:
272: nop C avoid bad cache line crossing
273: L(finish_even):
274: C no further extra limbs
275:
276: movq %mm1, 32-8(%ecx) C dst[size-2,size-1]
277: movl %edx, %eax C retval
278:
279: popl %ebx
280:
281: femms
282: ret
283:
284: EPILOGUE()
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>