Annotation of OpenXM_contrib2/asir2018/include/inline.h, Revision 1.3
1.1 noro 1: /*
2: * Copyright (c) 1994-2000 FUJITSU LABORATORIES LIMITED
3: * All rights reserved.
4: *
5: * FUJITSU LABORATORIES LIMITED ("FLL") hereby grants you a limited,
6: * non-exclusive and royalty-free license to use, copy, modify and
7: * redistribute, solely for non-commercial and non-profit purposes, the
8: * computer program, "Risa/Asir" ("SOFTWARE"), subject to the terms and
9: * conditions of this Agreement. For the avoidance of doubt, you acquire
10: * only a limited right to use the SOFTWARE hereunder, and FLL or any
11: * third party developer retains all rights, including but not limited to
12: * copyrights, in and to the SOFTWARE.
13: *
14: * (1) FLL does not grant you a license in any way for commercial
15: * purposes. You may use the SOFTWARE only for non-commercial and
16: * non-profit purposes only, such as academic, research and internal
17: * business use.
18: * (2) The SOFTWARE is protected by the Copyright Law of Japan and
19: * international copyright treaties. If you make copies of the SOFTWARE,
20: * with or without modification, as permitted hereunder, you shall affix
21: * to all such copies of the SOFTWARE the above copyright notice.
22: * (3) An explicit reference to this SOFTWARE and its copyright owner
23: * shall be made on your publication or presentation in any form of the
24: * results obtained by use of the SOFTWARE.
25: * (4) In the event that you modify the SOFTWARE, you shall notify FLL by
26: * e-mail at risa-admin@sec.flab.fujitsu.co.jp of the detailed specification
27: * for such modification or the source code of the modified part of the
28: * SOFTWARE.
29: *
30: * THE SOFTWARE IS PROVIDED AS IS WITHOUT ANY WARRANTY OF ANY KIND. FLL
31: * MAKES ABSOLUTELY NO WARRANTIES, EXPRESSED, IMPLIED OR STATUTORY, AND
32: * EXPRESSLY DISCLAIMS ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS
33: * FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT OF THIRD PARTIES'
34: * RIGHTS. NO FLL DEALER, AGENT, EMPLOYEES IS AUTHORIZED TO MAKE ANY
35: * MODIFICATIONS, EXTENSIONS, OR ADDITIONS TO THIS WARRANTY.
36: * UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, TORT, CONTRACT,
37: * OR OTHERWISE, SHALL FLL BE LIABLE TO YOU OR ANY OTHER PERSON FOR ANY
38: * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL
39: * DAMAGES OF ANY CHARACTER, INCLUDING, WITHOUT LIMITATION, DAMAGES
40: * ARISING OUT OF OR RELATING TO THE SOFTWARE OR THIS AGREEMENT, DAMAGES
41: * FOR LOSS OF GOODWILL, WORK STOPPAGE, OR LOSS OF DATA, OR FOR ANY
42: * DAMAGES, EVEN IF FLL SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF
43: * SUCH DAMAGES, OR FOR ANY CLAIM BY ANY OTHER PARTY. EVEN IF A PART
44: * OF THE SOFTWARE HAS BEEN DEVELOPED BY A THIRD PARTY, THE THIRD PARTY
45: * DEVELOPER SHALL HAVE NO LIABILITY IN CONNECTION WITH THE USE,
46: * PERFORMANCE OR NON-PERFORMANCE OF THE SOFTWARE.
47: *
1.3 ! noro 48: * $OpenXM: OpenXM_contrib2/asir2018/include/inline.h,v 1.2 2022/04/03 00:39:13 noro Exp $
1.1 noro 49: */
50: #define DMB(base,a1,a2,u,l) (l)=dmb(base,a1,a2,&(u));
51: #define DMAB(base,a1,a2,a3,u,l) (l)=dmab(base,a1,a2,a3,&(u));
52: #define DMAR(a1,a2,a3,d,r) (r)=dmar(a1,a2,a3,d);
53: #define DM27(a1,a2,u,l) (l)=dm_27(a1,a2,&(u));
54: #define DMA27(a1,a2,a3,u,l) (l)=dma_27(a1,a2,a3,&(u));
55: #define DSAB27(base,a1,a2,u,l) (l)=dmab(base,a1,BASE27,a2,&(u));
56:
1.3 ! noro 57: #if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__alpha) || defined(mips) || defined(_IBMR2) || defined(_WIN64) || defined(_M_ARM64)
1.1 noro 58: #define DM(a1,a2,u,l)\
59: {UL _t;\
60: _t=(UL)(a1)*(UL)(a2);\
61: (u)=(unsigned int)(_t>>BSH);\
62: (l)=(unsigned int)(_t&BMASK);}
63:
64: #define DMA(a1,a2,a3,u,l)\
65: {UL _t;\
66: _t=(UL)(a1)*(UL)(a2)+(UL)(a3);\
67: (u)=(unsigned int)(_t>>BSH);\
68: (l)=(unsigned int)(_t&BMASK);}
69:
70: #define DMA2(a1,a2,a3,a4,u,l)\
71: {UL _t;\
72: _t=(UL)(a1)*(UL)(a2)+(UL)(a3)+(UL)(a4);\
73: (u)=(unsigned int)(_t>>BSH);\
74: (l)=(unsigned int)(_t&BMASK);}
75:
76: #define DSAB(base,a1,a2,u,l)\
77: {UL _t;\
78: _t=(((UL)(a1))<<BSH)|((UL)(a2));\
79: (u)=(unsigned int)(_t/((UL)(base)));\
80: (l)=(unsigned int)(_t-((UL)(base)*(UL)(u)));}
1.3 ! noro 81:
! 82: #undef DMAR
! 83: #define DMAR(a1,a2,a3,d,r)\
! 84: {(r)=(unsigned int)(((UL)(a1)*(UL)(a2)+(UL)(a3))%(UL)(d));}
! 85:
1.1 noro 86: #else
87: #define DM(a1,a2,u,l) (l)=dm(a1,a2,&(u));
88: #define DMA(a1,a2,a3,u,l) (l)=dma(a1,a2,a3,&(u));
89: #define DMA2(a1,a2,a3,a4,u,l) (l)=dma2(a1,a2,a3,a4,&(u));
90: #define DSAB(base,a1,a2,u,l) (l)=dsab(base,a1,a2,&(u));
91: #endif
92:
93: #define DQR(a,b,q,r)\
94: (q)=(a)/(b);\
95: (r)=(a)-(b)*(q);
96:
97: #if defined(sparc)
98: #undef DSAB
99: #undef DSAB27
100:
101: #define DSAB(base,a1,a2,u,l)\
102: {unsigned int _t;\
103: asm volatile("wr %0,%%g0,%%y" : : "r"(a1) );\
104: asm volatile("udiv %1,%2,%0" :"=r"(_t): "r"(a2),"r"(base) );\
105: (l)=(unsigned int)(a2)-(unsigned int)(_t)*(unsigned int)(base);\
106: (u)=(_t);}
107:
108: #define DSAB27(base,a1,a2,u,l) (l)=dsab_27(base,a1,a2,&(u));
109: #endif
110:
111: #if defined(_M_IX86) && !defined(__MINGW32__)
112:
113: #undef DM
114: #undef DMA
115: #undef DMA2
116: #undef DMB
117: #undef DMAB
118: #undef DMAR
119: #undef DSAB
120: #undef DM27
121: #undef DMA27
122: #undef DSAB27
123:
124: #define DSAB(base,a1,a2,u,l)\
125: {\
126: unsigned int _t1,_t2,_d;\
127: _t1=a1;_t2=a2;_d=base;\
128: __asm\
129: {\
130: __asm mov edx,_t1\
131: __asm mov eax,_t2\
132: __asm div _d\
133: __asm mov _t1,eax\
134: __asm mov _t2,edx\
135: }\
136: u=_t1;l=_t2;\
137: }
138:
139: #define DM(a1,a2,u,l)\
140: {\
141: unsigned int _t1,_t2;\
142: _t1=a1;_t2=a2;\
143: __asm\
144: {\
145: __asm mov eax,_t1\
146: __asm mul _t2\
147: __asm mov _t1,edx\
148: __asm mov _t2,eax\
149: }\
150: u=_t1;l=_t2;\
151: }
152:
153: #define DMA(a1,a2,a3,u,l)\
154: {\
155: unsigned int _t1,_t2,_t3;\
156: _t1=a1;_t2=a2;_t3=a3;\
157: __asm\
158: {\
159: __asm mov eax,_t1\
160: __asm mul _t2\
161: __asm add eax,_t3\
162: __asm adc edx,0\
163: __asm mov _t1,edx\
164: __asm mov _t2,eax\
165: }\
166: u=_t1;l=_t2;\
167: }
168:
169: #define DMA2(a1,a2,a3,a4,u,l)\
170: {\
171: unsigned int _t1,_t2,_t3,_t4;\
172: _t1=a1;_t2=a2;_t3=a3;_t4=a4;\
173: __asm\
174: {\
175: __asm mov eax,_t1\
176: __asm mul _t2\
177: __asm add eax,_t3\
178: __asm adc edx,0\
179: __asm add eax,_t4\
180: __asm adc edx,0\
181: __asm mov _t1,edx\
182: __asm mov _t2,eax\
183: }\
184: u=_t1;l=_t2;\
185: }
186:
187: #define DMB(base,a1,a2,u,l)\
188: {\
189: unsigned int _t1,_t2,_d;\
190: _t1=a1;_t2=a2;_d=base;\
191: __asm\
192: {\
193: __asm mov eax,_t1\
194: __asm mul _t2\
195: __asm div _d\
196: __asm mov _t1,eax\
197: __asm mov _t2,edx\
198: }\
199: u=_t1;l=_t2;\
200: }
201:
202: #define DMAB(base,a1,a2,a3,u,l)\
203: {\
204: unsigned int _t1,_t2,_t3,_d;\
205: _t1=a1;_t2=a2;_t3=a3;_d=base;\
206: __asm\
207: {\
208: __asm mov eax,_t1\
209: __asm mul _t2\
210: __asm add eax,_t3\
211: __asm adc edx,0\
212: __asm div _d\
213: __asm mov _t1,eax\
214: __asm mov _t2,edx\
215: }\
216: u=_t1;l=_t2;\
217: }
218:
219: #define DMAR(a1,a2,a3,d,r)\
220: {\
221: unsigned int _t1,_t2,_t3,_d;\
222: _t1=a1;_t2=a2;_t3=a3,_d=d;\
223: __asm\
224: {\
225: __asm mov eax,_t1\
226: __asm mul _t2\
227: __asm add eax,_t3\
228: __asm adc edx,0\
229: __asm div _d\
230: __asm mov _t1,edx\
231: }\
232: r=_t1;\
233: }
234:
235: #define DSAB27(base,a1,a2,u,l)\
236: {\
237: unsigned int _t1,_t2,_d;\
238: _t1=a1;_t2=a2;_d=base;\
239: __asm\
240: {\
241: __asm mov edx,_t1\
242: __asm mov eax,_t2\
243: __asm mov ecx,edx\
244: __asm shl ecx,27\
245: __asm or eax,ecx\
246: __asm shr edx,5\
247: __asm div _d\
248: __asm mov _t1,eax\
249: __asm mov _t2,edx\
250: }\
251: u=_t1;l=_t2;\
252: }
253:
254: #define DM27(a1,a2,u,l)\
255: {\
256: unsigned int _t1,_t2;\
257: _t1=a1;_t2=a2;\
258: __asm\
259: {\
260: __asm mov eax,_t1\
261: __asm mul _t2\
262: __asm shl edx,5\
263: __asm mov ecx,eax\
264: __asm shr ecx,27\
265: __asm or edx,ecx\
266: __asm and eax,134217727\
267: __asm mov _t1,edx\
268: __asm mov _t2,eax\
269: }\
270: u=_t1;l=_t2;\
271: }
272:
273: #define DMA27(a1,a2,a3,u,l)\
274: {\
275: unsigned int _t1,_t2,_t3;\
276: _t1=a1;_t2=a2;_t3=a3;\
277: __asm\
278: {\
279: __asm mov eax,_t1\
280: __asm mul _t2\
281: __asm add eax,_t3\
282: __asm adc edx,0\
283: __asm shl edx,5\
284: __asm mov ecx,eax\
285: __asm shr ecx,27\
286: __asm or edx,ecx\
287: __asm and eax,134217727\
288: __asm mov _t1,edx\
289: __asm mov _t2,eax\
290: }\
291: u=_t1;l=_t2;\
292: }
293: #endif
294:
295: #if !defined(VISUAL) && defined(i386) && (defined(linux) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__CYGWIN__) || defined(__DARWIN__))
296:
297: #if 0
298: #undef DMA
299: #define DMA(a1,a2,a3,u,l)\
300: asm volatile("movl %0,%%eax" : : "g" (a1) : "ax");\
301: asm volatile("mull %0" : : "g" (a2) : "ax","dx");\
302: asm volatile("addl %0,%%eax" : : "g" (a3) : "ax" );\
303: asm volatile("adcl $0,%%edx" : : : "dx" );\
304: asm volatile("movl %%edx,%0" : "=g" (u) : : "ax","dx");\
305: asm volatile("movl %%eax,%0" : "=g" (l) : : "ax" );
306: #endif
307:
308: #undef DM
309: #undef DMA
310: #undef DMB
311: #undef DMAB
312: #undef DMAR
313: #undef DSAB
314: #undef DM27
315: #undef DMA27
316:
317: #define DM27(a1,a2,u,l)\
318: asm volatile(" movl %2,%%eax; mull %3; shll $5,%%edx; movl %%eax,%%ecx; shrl $27,%%ecx; orl %%ecx,%%edx; andl $134217727,%%eax; movl %%edx,%0; movl %%eax,%1" :"=g"(u),"=g"(l) :"g"(a1),"g"(a2) :"ax","bx","cx","dx");
319:
320: #define DMA27(a1,a2,a3,u,l)\
321: asm volatile(" movl %2,%%eax; mull %3; addl %4,%%eax; adcl $0,%%edx; shll $5,%%edx; movl %%eax,%%ecx; shrl $27,%%ecx; orl %%ecx,%%edx; andl $134217727,%%eax; movl %%edx,%0; movl %%eax,%1" :"=g"(u),"=g"(l) :"g"(a1),"g"(a2),"g"(a3) :"ax","bx","cx","dx");
322:
323: #define DSAB(base,a1,a2,u,l)\
324: asm volatile(" movl %2,%%edx; movl %3,%%eax; divl %4; movl %%edx,%0; movl %%eax,%1" :"=g"(l),"=g"(u) :"g"(a1),"g"(a2),"g"(base) :"ax","dx");
325:
326: #define DM(a1,a2,u,l)\
327: asm volatile(" movl %2,%%eax; mull %3; movl %%edx,%0; movl %%eax,%1" :"=g"(u),"=g"(l) :"g"(a1),"g"(a2) :"ax","dx");
328:
329: #define DMA(a1,a2,a3,u,l)\
330: asm volatile("movl %2,%%eax; mull %3; addl %4,%%eax; adcl $0,%%edx; movl %%edx,%0; movl %%eax,%1" :"=g"(u), "=g"(l) :"g"(a1),"g"(a2),"g"(a3) :"ax","dx");
331:
332: #define DMB(base,a1,a2,u,l)\
333: asm volatile(" movl %2,%%eax;" mull %3;" divl %4;" movl %%edx,%0;" movl %%eax,%0" :"=g"(l),"=g"(u) :"g"(a1),"g"(a2),"g"(base) :"ax","dx");
334:
335: #define DMAB(base,a1,a2,a3,u,l)\
336: asm volatile("movl %2,%%eax; mull %3; addl %4,%%eax; adcl $0,%%edx; divl %5; movl %%edx,%0; movl %%eax,%1" :"=g"(l),"=g"(u) :"g"(a1),"g"(a2),"g"(a3),"g"(base) :"ax","dx");
337:
338: #define DMAR(a1,a2,a3,d,r)\
339: asm volatile("movl %1,%%eax; mull %2; addl %3,%%eax; adcl $0,%%edx; divl %4; movl %%edx,%0" :"=g"(r) :"g"(a1),"g"(a2),"g"(a3),"g"(d) :"ax","dx");
340: #endif
341:
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>