Annotation of OpenXM_contrib/gmp/mpn/sparc32/v9/addmul_1.asm, Revision 1.1.1.1
1.1 maekawa 1: dnl SPARC v9 32-bit mpn_addmul_1 -- Multiply a limb vector with a limb and
2: dnl add the result to a second limb vector.
3:
4: dnl Copyright (C) 1998, 2000 Free Software Foundation, Inc.
5:
6: dnl This file is part of the GNU MP Library.
7:
8: dnl The GNU MP Library is free software; you can redistribute it and/or modify
9: dnl it under the terms of the GNU Lesser General Public License as published
10: dnl by the Free Software Foundation; either version 2.1 of the License, or (at
11: dnl your option) any later version.
12:
13: dnl The GNU MP Library is distributed in the hope that it will be useful, but
14: dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15: dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
16: dnl License for more details.
17:
18: dnl You should have received a copy of the GNU Lesser General Public License
19: dnl along with the GNU MP Library; see the file COPYING.LIB. If not, write to
20: dnl the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
21: dnl MA 02111-1307, USA.
22:
23:
24: include(`../config.m4')
25:
26: C INPUT PARAMETERS
27: C res_ptr i0
28: C s1_ptr i1
29: C size i2
30: C s2_limb i3
31:
32: ASM_START()
33:
34: TEXT
35: ALIGN(4)
36: L(noll):
37: .word 0
38:
39: PROLOGUE(mpn_addmul_1)
40: save %sp,-256,%sp
41:
42: ifdef(`PIC',
43: `L(pc): rd %pc,%o7
44: ld [%o7+L(noll)-L(pc)],%f10',
45: ` sethi %hi(L(noll)),%g1
46: ld [%g1+%lo(L(noll))],%f10')
47:
48: sethi %hi(0xffff0000),%o0
49: andn %i3,%o0,%o0
50: st %o0,[%fp-16]
51: ld [%fp-16],%f11
52: fxtod %f10,%f6
53:
54: srl %i3,16,%o0
55: st %o0,[%fp-16]
56: ld [%fp-16],%f11
57: fxtod %f10,%f8
58:
59: mov 0,%g3 C cy = 0
60:
61: ld [%i1],%f11
62: subcc %i2,1,%i2
63: be,pn %icc,L(end1)
64: add %i1,4,%i1 C s1_ptr++
65:
66: fxtod %f10,%f2
67: ld [%i1],%f11
68: add %i1,4,%i1 C s1_ptr++
69: fmuld %f2,%f8,%f16
70: fmuld %f2,%f6,%f4
71: fdtox %f16,%f14
72: std %f14,[%fp-24]
73: fdtox %f4,%f12
74: subcc %i2,1,%i2
75: be,pn %icc,L(end2)
76: std %f12,[%fp-16]
77:
78: fxtod %f10,%f2
79: ld [%i1],%f11
80: add %i1,4,%i1 C s1_ptr++
81: fmuld %f2,%f8,%f16
82: fmuld %f2,%f6,%f4
83: fdtox %f16,%f14
84: std %f14,[%fp-40]
85: fdtox %f4,%f12
86: subcc %i2,1,%i2
87: be,pn %icc,L(end3)
88: std %f12,[%fp-32]
89:
90: fxtod %f10,%f2
91: ld [%i1],%f11
92: add %i1,4,%i1 C s1_ptr++
93: ld [%i0],%g5
94: ldx [%fp-24],%g2 C p16
95: fmuld %f2,%f8,%f16
96: ldx [%fp-16],%g1 C p0
97: fmuld %f2,%f6,%f4
98: sllx %g2,16,%g2 C align p16
99: fdtox %f16,%f14
100: add %g2,%g1,%g1 C add p16 to p0 (ADD1)
101: std %f14,[%fp-24]
102: fdtox %f4,%f12
103: add %i0,4,%i0 C res_ptr++
104: subcc %i2,1,%i2
105: be,pn %icc,L(end4)
106: std %f12,[%fp-16]
107:
108: b,a L(loopm)
109:
110: .align 16
111: C BEGIN LOOP
112: L(loop):
113: fxtod %f10,%f2
114: ld [%i1],%f11
115: add %i1,4,%i1 C s1_ptr++
116: add %g5,%g1,%g1 C add *res_ptr to p0 (ADD2)
117: add %g3,%g1,%g4 C p += cy
118: ld [%i0],%g5
119: srlx %g4,32,%g3
120: ldx [%fp-24],%g2 C p16
121: fmuld %f2,%f8,%f16
122: ldx [%fp-16],%g1 C p0
123: fmuld %f2,%f6,%f4
124: sllx %g2,16,%g2 C align p16
125: st %g4,[%i0-4]
126: fdtox %f16,%f14
127: add %g2,%g1,%g1 C add p16 to p0 (ADD1)
128: std %f14,[%fp-24]
129: fdtox %f4,%f12
130: std %f12,[%fp-16]
131: subcc %i2,1,%i2
132: be,pn %icc,L(loope)
133: add %i0,4,%i0 C res_ptr++
134: L(loopm):
135: fxtod %f10,%f2
136: ld [%i1],%f11
137: add %i1,4,%i1 C s1_ptr++
138: add %g5,%g1,%g1 C add *res_ptr to p0 (ADD2)
139: add %g3,%g1,%g4 C p += cy
140: ld [%i0],%g5
141: srlx %g4,32,%g3
142: ldx [%fp-40],%g2 C p16
143: fmuld %f2,%f8,%f16
144: ldx [%fp-32],%g1 C p0
145: fmuld %f2,%f6,%f4
146: sllx %g2,16,%g2 C align p16
147: st %g4,[%i0-4]
148: fdtox %f16,%f14
149: add %g2,%g1,%g1 C add p16 to p0 (ADD1)
150: std %f14,[%fp-40]
151: fdtox %f4,%f12
152: std %f12,[%fp-32]
153: subcc %i2,1,%i2
154: bne,pt %icc,L(loop)
155: add %i0,4,%i0 C res_ptr++
156: C END LOOP
157:
158: fxtod %f10,%f2
159: add %g5,%g1,%g1 C add *res_ptr to p0 (ADD2)
160: add %g3,%g1,%g4 C p += cy
161: ld [%i0],%g5
162: srlx %g4,32,%g3
163: ldx [%fp-24],%g2 C p16
164: fmuld %f2,%f8,%f16
165: ldx [%fp-16],%g1 C p0
166: fmuld %f2,%f6,%f4
167: sllx %g2,16,%g2 C align p16
168: st %g4,[%i0-4]
169: b,a L(xxx)
170: L(loope):
171: L(end4):
172: fxtod %f10,%f2
173: add %g5,%g1,%g1 C add *res_ptr to p0 (ADD2)
174: add %g3,%g1,%g4 C p += cy
175: ld [%i0],%g5
176: srlx %g4,32,%g3
177: ldx [%fp-40],%g2 C p16
178: fmuld %f2,%f8,%f16
179: ldx [%fp-32],%g1 C p0
180: fmuld %f2,%f6,%f4
181: sllx %g2,16,%g2 C align p16
182: st %g4,[%i0-4]
183: fdtox %f16,%f14
184: add %g2,%g1,%g1 C add p16 to p0 (ADD1)
185: std %f14,[%fp-40]
186: fdtox %f4,%f12
187: std %f12,[%fp-32]
188: add %i0,4,%i0 C res_ptr++
189:
190: add %g5,%g1,%g1 C add *res_ptr to p0 (ADD2)
191: add %g3,%g1,%g4 C p += cy
192: ld [%i0],%g5
193: srlx %g4,32,%g3
194: ldx [%fp-24],%g2 C p16
195: ldx [%fp-16],%g1 C p0
196: sllx %g2,16,%g2 C align p16
197: st %g4,[%i0-4]
198: b,a L(yyy)
199:
200: L(end3):
201: fxtod %f10,%f2
202: ld [%i0],%g5
203: ldx [%fp-24],%g2 C p16
204: fmuld %f2,%f8,%f16
205: ldx [%fp-16],%g1 C p0
206: fmuld %f2,%f6,%f4
207: sllx %g2,16,%g2 C align p16
208: L(xxx): fdtox %f16,%f14
209: add %g2,%g1,%g1 C add p16 to p0 (ADD1)
210: std %f14,[%fp-24]
211: fdtox %f4,%f12
212: std %f12,[%fp-16]
213: add %i0,4,%i0 C res_ptr++
214:
215: add %g5,%g1,%g1 C add *res_ptr to p0 (ADD2)
216: add %g3,%g1,%g4 C p += cy
217: ld [%i0],%g5
218: srlx %g4,32,%g3
219: ldx [%fp-40],%g2 C p16
220: ldx [%fp-32],%g1 C p0
221: sllx %g2,16,%g2 C align p16
222: st %g4,[%i0-4]
223: add %g2,%g1,%g1 C add p16 to p0 (ADD1)
224: add %i0,4,%i0 C res_ptr++
225:
226: add %g5,%g1,%g1 C add *res_ptr to p0 (ADD2)
227: add %g3,%g1,%g4 C p += cy
228: ld [%i0],%g5
229: srlx %g4,32,%g3
230: ldx [%fp-24],%g2 C p16
231: ldx [%fp-16],%g1 C p0
232: sllx %g2,16,%g2 C align p16
233: st %g4,[%i0-4]
234: add %g2,%g1,%g1 C add p16 to p0 (ADD1)
235: add %i0,4,%i0 C res_ptr++
236: b,a L(ret)
237:
238: L(end2):
239: fxtod %f10,%f2
240: fmuld %f2,%f8,%f16
241: fmuld %f2,%f6,%f4
242: fdtox %f16,%f14
243: std %f14,[%fp-40]
244: fdtox %f4,%f12
245: std %f12,[%fp-32]
246: ld [%i0],%g5
247: ldx [%fp-24],%g2 C p16
248: ldx [%fp-16],%g1 C p0
249: sllx %g2,16,%g2 C align p16
250: L(yyy): add %g2,%g1,%g1 C add p16 to p0 (ADD1)
251: add %i0,4,%i0 C res_ptr++
252:
253: add %g5,%g1,%g1 C add *res_ptr to p0 (ADD2)
254: add %g3,%g1,%g4 C p += cy
255: ld [%i0],%g5
256: srlx %g4,32,%g3
257: ldx [%fp-40],%g2 C p16
258: ldx [%fp-32],%g1 C p0
259: sllx %g2,16,%g2 C align p16
260: st %g4,[%i0-4]
261: add %g2,%g1,%g1 C add p16 to p0 (ADD1)
262: add %i0,4,%i0 C res_ptr++
263: b,a L(ret)
264:
265: L(end1):
266: fxtod %f10,%f2
267: fmuld %f2,%f8,%f16
268: fmuld %f2,%f6,%f4
269: fdtox %f16,%f14
270: std %f14,[%fp-24]
271: fdtox %f4,%f12
272: std %f12,[%fp-16]
273:
274: ld [%i0],%g5
275: ldx [%fp-24],%g2 C p16
276: ldx [%fp-16],%g1 C p0
277: sllx %g2,16,%g2 C align p16
278: add %g2,%g1,%g1 C add p16 to p0 (ADD1)
279: add %i0,4,%i0 C res_ptr++
280:
281: L(ret): add %g5,%g1,%g1 C add *res_ptr to p0 (ADD2)
282: add %g3,%g1,%g4 C p += cy
283: srlx %g4,32,%g3
284: st %g4,[%i0-4]
285:
286: ret
287: restore %g0,%g3,%o0 C sideeffect: put cy in retreg
288: EPILOGUE(mpn_addmul_1)
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>