Annotation of OpenXM_contrib2/asir2018/engine/PUM.c, Revision 1.3
1.1 noro 1: /*
2: * Copyright (c) 1994-2000 FUJITSU LABORATORIES LIMITED
3: * All rights reserved.
4: *
5: * FUJITSU LABORATORIES LIMITED ("FLL") hereby grants you a limited,
6: * non-exclusive and royalty-free license to use, copy, modify and
7: * redistribute, solely for non-commercial and non-profit purposes, the
8: * computer program, "Risa/Asir" ("SOFTWARE"), subject to the terms and
9: * conditions of this Agreement. For the avoidance of doubt, you acquire
10: * only a limited right to use the SOFTWARE hereunder, and FLL or any
11: * third party developer retains all rights, including but not limited to
12: * copyrights, in and to the SOFTWARE.
13: *
14: * (1) FLL does not grant you a license in any way for commercial
15: * purposes. You may use the SOFTWARE only for non-commercial and
16: * non-profit purposes only, such as academic, research and internal
17: * business use.
18: * (2) The SOFTWARE is protected by the Copyright Law of Japan and
19: * international copyright treaties. If you make copies of the SOFTWARE,
20: * with or without modification, as permitted hereunder, you shall affix
21: * to all such copies of the SOFTWARE the above copyright notice.
22: * (3) An explicit reference to this SOFTWARE and its copyright owner
23: * shall be made on your publication or presentation in any form of the
24: * results obtained by use of the SOFTWARE.
25: * (4) In the event that you modify the SOFTWARE, you shall notify FLL by
26: * e-mail at risa-admin@sec.flab.fujitsu.co.jp of the detailed specification
27: * for such modification or the source code of the modified part of the
28: * SOFTWARE.
29: *
30: * THE SOFTWARE IS PROVIDED AS IS WITHOUT ANY WARRANTY OF ANY KIND. FLL
31: * MAKES ABSOLUTELY NO WARRANTIES, EXPRESSED, IMPLIED OR STATUTORY, AND
32: * EXPRESSLY DISCLAIMS ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS
33: * FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT OF THIRD PARTIES'
34: * RIGHTS. NO FLL DEALER, AGENT, EMPLOYEES IS AUTHORIZED TO MAKE ANY
35: * MODIFICATIONS, EXTENSIONS, OR ADDITIONS TO THIS WARRANTY.
36: * UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, TORT, CONTRACT,
37: * OR OTHERWISE, SHALL FLL BE LIABLE TO YOU OR ANY OTHER PERSON FOR ANY
38: * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL
39: * DAMAGES OF ANY CHARACTER, INCLUDING, WITHOUT LIMITATION, DAMAGES
40: * ARISING OUT OF OR RELATING TO THE SOFTWARE OR THIS AGREEMENT, DAMAGES
41: * FOR LOSS OF GOODWILL, WORK STOPPAGE, OR LOSS OF DATA, OR FOR ANY
42: * DAMAGES, EVEN IF FLL SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF
43: * SUCH DAMAGES, OR FOR ANY CLAIM BY ANY OTHER PARTY. EVEN IF A PART
44: * OF THE SOFTWARE HAS BEEN DEVELOPED BY A THIRD PARTY, THE THIRD PARTY
45: * DEVELOPER SHALL HAVE NO LIABILITY IN CONNECTION WITH THE USE,
46: * PERFORMANCE OR NON-PERFORMANCE OF THE SOFTWARE.
47: *
1.3 ! noro 48: * $OpenXM: OpenXM_contrib2/asir2018/engine/PUM.c,v 1.2 2018/09/28 08:20:28 noro Exp $
1.1 noro 49: */
50: #include "ca.h"
51:
52: void gcdcmp(), sprsm();
53:
54: void detmp(VL vl,int mod,P **rmat,int n,P *dp)
55: {
56: int i,j,k,sgn;
57: P mjj,mij,t,s,u,d;
58: P **mat;
59: P *mi,*mj;
60:
61: mat = (P **)almat_pointer(n,n);
62: for ( i = 0; i < n; i++ )
63: for ( j = 0; j < n; j++ )
64: mat[i][j] = rmat[i][j];
65: for ( j = 0, d = (P)ONEM, sgn = 1; j < n; j++ ) {
66: for ( i = j; (i < n) && !mat[i][j]; i++ );
67: if ( i == n ) {
68: *dp = 0; return;
69: }
70: for ( k = i; k < n; k++ )
71: if ( mat[k][j] && (nmonop(mat[k][j]) < nmonop(mat[i][j]) ) )
72: i = k;
73: if ( j != i ) {
74: mj = mat[j]; mat[j] = mat[i]; mat[i] = mj; sgn = -sgn;
75: }
76: for ( i = j + 1, mj = mat[j], mjj = mj[j]; i < n; i++ )
77: for ( k = j + 1, mi = mat[i], mij = mi[j]; k < n; k++ ) {
78: mulmp(vl,mod,mi[k],mjj,&t); mulmp(vl,mod,mj[k],mij,&s);
79: submp(vl,mod,t,s,&u); divsmp(vl,mod,u,d,&mi[k]);
80: }
81: d = mjj;
82: }
83: if ( sgn < 0 )
84: chsgnmp(mod,d,dp);
85: else
86: *dp = d;
87: }
88:
89: void resultmp(VL vl,int mod,V v,P p1,P p2,P *pr)
90: {
91: P q1,q2,m,m1,m2,lc,q,r,t,g1,g2,adj;
92: int d,d1,d2,j,k;
93: VL nvl;
94: Z dq;
95: MQ mq;
96:
97: if ( !p1 || !p2 ) {
98: *pr = 0; return;
99: }
100: reordvar(vl,v,&nvl);
101: reordermp(nvl,mod,vl,p1,&q1); reordermp(nvl,mod,vl,p2,&q2);
102:
103: if ( VR(q1) != v )
104: if ( VR(q2) != v ) {
105: *pr = 0;
106: return;
107: } else {
1.2 noro 108: d = deg(v,q2); STOZ(d,dq);
1.1 noro 109: pwrmp(vl,mod,q1,dq,pr);
110: return;
111: }
112: else if ( VR(q2) != v ) {
1.2 noro 113: d = deg(v,q1); STOZ(d,dq);
1.1 noro 114: pwrmp(vl,mod,q2,dq,pr);
115: return;
116: }
117:
118: if ( ( VR(q1) != v ) || ( VR(q2) != v ) ) {
119: *pr = 0;
120: return;
121: }
122:
123: d1 = deg(v,q1); d2 = deg(v,q2);
124: if ( d1 > d2 ) {
125: g1 = q1; g2 = q2; adj = (P)ONEM;
126: } else if ( d1 < d2 ) {
127: g2 = q1; g1 = q2;
128: if ( (d1 % 2) && (d2 % 2) ) {
129: STOMQ(-1,mq); adj = (P)mq;
130: } else
131: adj = (P)ONEM;
132: } else {
133: premmp(nvl,mod,q1,q2,&t);
1.2 noro 134: d = deg(v,t); STOZ(d,dq); pwrmp(nvl,mod,LC(q2),dq,&adj);
1.1 noro 135: g1 = q2; g2 = t;
136: if ( d1 % 2 ) {
137: chsgnmp(mod,adj,&t); adj = t;
138: }
139: }
140: d1 = deg(v,g1); j = d1 - 1;
141:
142: for ( lc = (P)ONEM; ; ) {
143: if ( ( k = deg(v,g2) ) < 0 ) {
144: *pr = 0;
145: return;
146: }
147:
148: if ( k == j )
149: if ( !k ) {
150: divsmp(nvl,mod,g2,adj,pr);
151: return;
152: } else {
153: premmp(nvl,mod,g1,g2,&r); mulmp(nvl,mod,lc,lc,&m);
154: divsmp(nvl,mod,r,m,&q);
155: g1 = g2; g2 = q;
156: lc = LC(g1); /* g1 is not const */
157: j = k - 1;
158: }
159: else {
1.2 noro 160: d = j - k; STOZ(d,dq);
1.1 noro 161: pwrmp(nvl,mod,(VR(g2)==v?LC(g2):g2),dq,&m);
162: mulmp(nvl,mod,g2,m,&m1);
163: pwrmp(nvl,mod,lc,dq,&m); divsmp(nvl,mod,m1,m,&t);
164: if ( k == 0 ) {
165: divsmp(nvl,mod,t,adj,pr);
166: return;
167: } else {
168: premmp(nvl,mod,g1,g2,&r);
169: mulmp(nvl,mod,lc,lc,&m1); mulmp(nvl,mod,m,m1,&m2);
170: divsmp(nvl,mod,r,m2,&q);
171: g1 = t; g2 = q;
172: if ( d % 2 ) {
173: chsgnmp(mod,g2,&t); g2 = t;
174: }
175: lc = LC(g1); /* g1 is not const */
176: j = k - 1;
177: }
178: }
179: }
180: }
181:
182: void premmp(VL vl,int mod,P p1,P p2,P *pr)
183: {
184: P m,m1,m2;
185: P *pw;
186: DCP dc;
187: V v1,v2;
188: register int i,j;
189: int n1,n2,d;
190:
191: if ( NUM(p1) )
192: if ( NUM(p2) )
193: *pr = 0;
194: else
195: *pr = p1;
196: else if ( NUM(p2) )
197: *pr = 0;
198: else if ( ( v1 = VR(p1) ) == ( v2 = VR(p2) ) ) {
199: n1 = deg(v1,p1); n2 = deg(v1,p2);
200: pw = (P *)ALLOCA((n1+1)*sizeof(P));
201: bzero((char *)pw,(int)((n1+1)*sizeof(P)));
202:
203: for ( dc = DC(p1); dc; dc = NEXT(dc) )
1.2 noro 204: pw[ZTOS(DEG(dc))] = COEF(dc);
1.1 noro 205:
206: for ( i = n1; i >= n2; i-- ) {
207: if ( pw[i] ) {
208: chsgnmp(mod,pw[i],&m);
209: for ( j = i; j >= 0; j-- ) {
210: mulmp(vl,mod,pw[j],LC(p2),&m1); pw[j] = m1;
211: }
212:
213: for ( dc = DC(p2), d = i - n2; dc; dc = NEXT(dc) ) {
214: mulmp(vl,mod,COEF(dc),m,&m1);
1.2 noro 215: addmp(vl,mod,pw[ZTOS(DEG(dc))+d],m1,&m2);
216: pw[ZTOS(DEG(dc))+d] = m2;
1.1 noro 217: }
218: } else
219: for ( j = i; j >= 0; j-- )
220: if ( pw[j] ) {
221: mulmp(vl,mod,pw[j],LC(p2),&m1); pw[j] = m1;
222: }
223: }
224: plisttop(pw,v1,n2-1,pr);
225: } else {
226: while ( v1 != vl->v && v2 != vl->v )
227: vl = NEXT(vl);
228: if ( v1 == vl->v )
229: *pr = 0;
230: else
231: *pr = p1;
232: }
233: }
234:
235: void srchmp(VL vl,int mod,V v,P p1,P p2,P *pr)
236: {
237: P a,b,q1,q2,x,t,s,d,bg,c,c0,db;
238: int i,m,k;
239: V v0;
240: VL nvl,tvl,nvl1,nvl2;
241: Z dq;
242: MQ q;
243:
244: if ( vl->v != v ) {
245: reordvar(vl,v,&tvl);
246: reordermp(tvl,mod,vl,p1,&q1); reordermp(tvl,mod,vl,p2,&q2);
247: } else {
248: q1 = p1; q2 = p2; tvl = vl;
249: }
250: clctv(tvl,q1,&nvl1); clctv(tvl,q2,&nvl2); mergev(tvl,nvl1,nvl2,&nvl);
251: if ( VR(q1) != v )
252: if ( VR(q2) != v )
253: *pr = 0;
254: else {
1.2 noro 255: m = getdeg(v,q2); STOZ(m,dq); pwrmp(vl,mod,q1,dq,pr);
1.1 noro 256: }
257: else if ( VR(q2) != v ) {
1.2 noro 258: m = getdeg(v,q1); STOZ(m,dq); pwrmp(vl,mod,q2,dq,pr);
1.1 noro 259: } else if ( !NEXT(nvl) )
260: srchump(mod,p1,p2,pr);
261: else {
262: v0 = NEXT(nvl)->v;
263: k = getdeg(v,q1)*getdeg(v0,q2)+getdeg(v,q2)*getdeg(v0,q1)+1;
264: for ( i = 0, c = 0, d = (P)ONEM, MKMV(v0,x);
265: ( i < mod ) && (getdeg(v0,d) < k) ; i++ ) {
266: STOMQ(i,q),bg = (P)q; substmp(nvl,mod,LC(q1),v0,bg,&t);
267: if ( !t )
268: continue;
269: substmp(nvl,mod,LC(q2),v0,bg,&t);
270: if ( !t )
271: continue;
272: substmp(nvl,mod,q1,v0,bg,&a); substmp(nvl,mod,q2,v0,bg,&b);
273: srchmp(nvl,mod,v,a,b,&c0); substmp(nvl,mod,c,v0,bg,&t);
274: submp(nvl,mod,c0,t,&s); mulmp(nvl,mod,s,d,&t);
275: substmp(nvl,mod,d,v0,bg,&db);
276: divsmp(nvl,mod,t,db,&s); addmp(nvl,mod,s,c,&t); c = t;
277: submp(nvl,mod,x,bg,&t); mulmp(nvl,mod,d,t,&s); d = s;
278: }
279: if ( i == mod )
280: error("srchmp : ???");
281: *pr = c;
282: }
283: }
284:
285: int ucmpp(P p,P q)
286: {
287: DCP dcp,dcq;
288:
289: if ( !p )
290: if ( !q )
291: return ( 0 );
292: else
293: return ( 1 );
294: else if ( !q )
295: return ( 1 );
296: else if ( NUM(p) )
297: if ( !NUM(q) )
298: return ( 1 );
299: else
300: return ( cmpq((Q)p,(Q)q) );
301: else if ( NUM(q) )
302: return ( 1 );
303: else {
304: for ( dcp = DC(p), dcq = DC(q); dcp && dcq;
305: dcp = NEXT(dcp), dcq = NEXT(dcq) )
306: if ( cmpz(DEG(dcp),DEG(dcq) ) )
307: return ( 1 );
308: else if ( cmpq((Q)COEF(dcp),(Q)COEF(dcq) ) )
309: return ( 1 );
310: if ( dcp || dcq )
311: return ( 1 );
312: else
313: return ( 0 );
314: }
315: }
316:
317: #if 0
318: srchump(mod,p1,p2,pr)
319: int mod;
320: P p1,p2,*pr;
321: {
322: int r;
323: MQ mq;
324:
325: r = eucresum(mod,p1,p2);
326: STOMQ(r,mq); *pr = (P)mq;
327: }
328: #endif
329:
330: void srchump(int mod,P p1,P p2,P *pr)
331: {
332: UM m,m1,q,r,t,g1,g2;
1.3 ! noro 333: int lc,d,d1,d2,i,j,k,l,l1,l2,adj;
! 334: unsigned int tmp;
1.1 noro 335: V v;
336:
337: v = VR(p1); d = MAX(UDEG(p1),UDEG(p2));
338: g1 = W_UMALLOC(d); g2 = W_UMALLOC(d);
339: bzero((char *)g1,(int)((d+2)*sizeof(int))); bzero((char *)g2,(int)((d+2)*sizeof(int)));
340: if ( d == (int)UDEG(p1) ) {
341: mptoum(p1,g1); mptoum(p2,g2);
342: } else {
343: mptoum(p2,g1); mptoum(p1,g2);
344: }
345: if ( ( d1 = DEG(g1) ) > ( d2 = DEG(g2) ) ) {
346: j = d1 - 1; adj = 1;
347: } else
348: j = d2;
349: lc = 1;
350: r = W_UMALLOC(d1+d2); q = W_UMALLOC(d1+d2);
351: m1 = W_UMALLOC(d1+d2); t = W_UMALLOC(d1+d2);
352: bzero((char *)r,(int)((d1+d2+2)*sizeof(int))); bzero((char *)q,(int)((d1+d2+2)*sizeof(int)));
353: bzero((char *)m1,(int)((d1+d2+2)*sizeof(int))); bzero((char *)t,(int)((d1+d2+2)*sizeof(int)));
354: m = W_UMALLOC(0); bzero((char *)m,(int)(2*sizeof(int)));
355: adj = pwrm(mod,COEF(g2)[DEG(g2)],DEG(g1));
356: DEG(m) = 0; COEF(m)[0] = invm(COEF(g2)[DEG(g2)],mod);
357: mulum(mod,g2,m,r); cpyum(r,g2);
358: while ( 1 ) {
359: if ( ( k = DEG(g2) ) < 0 ) {
360: *pr = 0;
361: return;
362: }
363: if ( k == j ) {
364: if ( k == 0 ) {
365: DEG(m) = 0; COEF(m)[0] = adj;
366: mulum(mod,g2,m,r); umtomp(v,r,pr);
367: return;
368: } else {
369: DEG(m) = 0;
370: COEF(m)[0] = pwrm(mod,COEF(g2)[k],DEG(g1)-k+1);
371: mulum(mod,g1,m,r); DEG(r) = divum(mod,r,g2,t);
372: DEG(m) = 0; COEF(m)[0] = dmb(mod,lc,lc,&tmp);
373: divum(mod,r,m,q); cpyum(g2,g1); cpyum(q,g2);
374: lc = COEF(g1)[DEG(g1)]; j = k - 1;
375: }
376: } else {
377: d = j - k;
378: DEG(m) = 0; COEF(m)[0] = pwrm(mod,COEF(g2)[DEG(g2)],d);
379: mulum(mod,g2,m,m1); l = pwrm(mod,lc,d);
380: DEG(m) = 0; COEF(m)[0] = l; divum(mod,m1,m,t);
381: if ( k == 0 ) {
382: DEG(m) = 0; COEF(m)[0] = adj;
383: mulum(mod,t,m,r); umtomp(v,r,pr);
384: return;
385: } else {
386: DEG(m) = 0;
387: COEF(m)[0] = pwrm(mod,COEF(g2)[k],DEG(g1)-k+1);
388: mulum(mod,g1,m,r); DEG(r) = divum(mod,r,g2,q);
389: l1 = dmb(mod,lc,lc,&tmp); l2 = dmb(mod,l,l1,&tmp);
390: DEG(m) = 0; COEF(m)[0] = l2;
391: divum(mod,r,m,q); cpyum(t,g1); cpyum(q,g2);
392: if ( d % 2 )
393: for ( i = DEG(g2); i >= 0; i-- )
394: COEF(g2)[i] = ( mod - COEF(g2)[i] ) % mod;
395: lc = COEF(g1)[DEG(g1)]; j = k - 1;
396: }
397: }
398: }
399: }
400:
401: void substmp(VL vl,int mod,P p,V v0,P p0,P *pr)
402: {
403: P x,t,m,c,s,a;
404: DCP dc;
405: Z d,z;
406:
407: if ( !p )
408: *pr = 0;
409: else if ( NUM(p) )
410: *pr = p;
411: else if ( VR(p) != v0 ) {
412: MKMV(VR(p),x);
413: for ( c = 0, dc = DC(p); dc; dc = NEXT(dc) ) {
414: substmp(vl,mod,COEF(dc),v0,p0,&t);
415: if ( DEG(dc) ) {
416: pwrmp(vl,mod,x,DEG(dc),&s); mulmp(vl,mod,s,t,&m);
417: addmp(vl,mod,m,c,&a);
418: c = a;
419: } else {
420: addmp(vl,mod,t,c,&a);
421: c = a;
422: }
423: }
424: *pr = c;
425: } else {
426: dc = DC(p);
427: c = COEF(dc);
428: for ( d = DEG(dc), dc = NEXT(dc);
429: dc; d = DEG(dc), dc = NEXT(dc) ) {
430: subz(d,DEG(dc),&z); pwrmp(vl,mod,p0,z,&s);
431: mulmp(vl,mod,s,c,&m);
432: addmp(vl,mod,m,COEF(dc),&c);
433: }
434: if ( d ) {
435: pwrmp(vl,mod,p0,d,&t); mulmp(vl,mod,t,c,&m);
436: c = m;
437: }
438: *pr = c;
439: }
440: }
441:
442: void reordermp(VL nvl,int mod,VL ovl,P p,P *pr)
443: {
444: DCP dc;
445: P x,m,s,t,c;
446:
447: if ( !p )
448: *pr = 0;
449: else if ( NUM(p) )
450: *pr = p;
451: else {
452: MKMV(VR(p),x);
453: for ( s = 0, dc = DC(p); dc; dc = NEXT(dc) ) {
454: reordermp(nvl,mod,ovl,COEF(dc),&c);
455: if ( DEG(dc) ) {
456: pwrmp(nvl,mod,x,DEG(dc),&t); mulmp(nvl,mod,c,t,&m);
457: addmp(nvl,mod,m,s,&t);
458: } else
459: addmp(nvl,mod,s,c,&t);
460: s = t;
461: }
462: *pr = s;
463: }
464: }
465:
466: void chnremp(VL vl,int mod,P p,Z q,P c,P *r)
467: {
468: P tg,sg,ug;
469: P t,u;
470: MQ mq;
471:
472: ptomp(mod,p,&tg); submp(vl,mod,c,tg,&sg);
473: UTOMQ(remqi((Q)q,mod),mq),tg = (P)mq; divsmp(vl,mod,sg,tg,&ug);
474: normalizemp(mod,ug);
475: mptop(ug,&u); mulp(vl,u,(P)q,&t); addp(vl,t,p,r);
476: }
477:
478: /* XXX strange behavior of invm() on SPARC */
479:
480: void chnrem(int mod,V v,P c,Z q,UM t,P *cr,Z *qr)
481: {
1.3 ! noro 482: int n,m,i,d,a,sd;
! 483: unsigned int tmp;
1.1 noro 484: Z b,s,z;
485: Z *pc,*pcr;
486: DCP dc;
487:
488: if ( !c || NUM(c) )
489: n = 0;
490: else
491: n = UDEG(c);
492: m = DEG(t); d = MAX(n,m); W_CALLOC(n,Z,pc); W_CALLOC(d,Z,pcr);
493: if ( !c )
494: pc[0] = 0;
495: else if ( NUM(c) )
496: pc[0] = (Z)c;
497: else
498: for ( dc = DC(c); dc; dc = NEXT(dc) )
1.2 noro 499: pc[ZTOS(DEG(dc))] = (Z)COEF(dc);
1.1 noro 500: for ( i = 0; i <= d; i++ ) {
501: b = (i>n?0:pc[i]); a = (i>m?0:COEF(t)[i]);
502: if ( b )
503: a = (a-((int)remqi((Q)pc[i],mod)))%mod;
504: sd = dmb(mod,(a>=0?a:a+mod),invm(remqi((Q)q,mod),mod),&tmp);
505: if ( ( 2 * sd ) > mod )
506: sd -= mod;
1.2 noro 507: STOZ(sd,z); mulz(z,q,&s); addz(s,b,&pcr[i]);
1.1 noro 508: }
1.2 noro 509: STOZ(mod,z); mulz(q,z,qr); plisttop((P *)pcr,v,d,cr);
1.1 noro 510: }
511:
512: void normalizemp(int mod,P g)
513: {
514: DCP dc;
515:
516: if ( !g )
517: return;
518: else if ( NUM(g) ) {
519: if ( 2 * CONT((MQ)g) > mod )
520: CONT((MQ)g) -= mod;
521: return;
522: } else
523: for ( dc = DC(g); dc; dc = NEXT(dc) )
524: normalizemp(mod,COEF(dc));
525: }
526:
527: void norm(P p,Q *r)
528: {
529: Q t,s;
530: DCP dc;
531:
532: for ( dc = DC(p), t = (Q)ONE; dc; dc = NEXT(dc) ) {
533: absq((Q)COEF(dc),&s);
534: if ( cmpq(s,t) > 0 ) t = s;
535: }
536: *r = t;
537: }
538:
539: void norm1(P p,P *r)
540: {
541: DCP dc;
542: P t,s,u;
543: Q q;
544:
545: if ( NUM(p) )
546: absq((Q)p,(Q *)r);
547: else {
548: for ( t = 0, dc = DC(p); dc; dc = NEXT(dc) ) {
549: norm1(COEF(dc),&s); addq((Q)t,(Q)s,(Q *)&u); t = u;
550: }
551: *r = t;
552: }
553: }
554:
555: void norm1c(P p,Q *r)
556: {
557: Q t,s;
558: DCP dc;
559:
560: if ( NUM(p) )
561: norm1(p,(P *)r);
562: else {
563: for ( dc = DC(p), t = (Q)ONE; dc; dc = NEXT(dc) ) {
564: norm1(COEF(dc),(P *)&s);
565: if ( cmpq(s,t) > 0 ) t = s;
566: }
567: *r = t;
568: }
569: }
570:
571: void gcdprsmp(VL vl,int mod,P p1,P p2,P *pr)
572: {
573: P g1,g2,gc1,gc2,gp1,gp2,g,gc,gp,gcr;
574: V v1,v2;
575:
576: if ( !p1 )
577: *pr = p2;
578: else if ( !p2 )
579: *pr = p1;
580: else if ( NUM(p1) || NUM(p2) )
581: *pr = (P)ONEM;
582: else {
583: g1 = p1; g2 = p2;
584: if ( ( v1 = VR(g1) ) == ( v2 = VR(g2) ) ) {
585: gcdcmp(vl,mod,g1,&gc1); divsmp(vl,mod,g1,gc1,&gp1);
586: gcdcmp(vl,mod,g2,&gc2); divsmp(vl,mod,g2,gc2,&gp2);
587: gcdprsmp(vl,mod,gc1,gc2,&gcr);
588: sprsm(vl,mod,v1,gp1,gp2,&g);
589:
590: if ( VR(g) == v1 ) {
591: gp = g;
592: gcdcmp(vl,mod,gp,&gc); divsmp(vl,mod,gp,gc,&gp1);
593: mulmp(vl,mod,gp1,gcr,pr);
594: } else
595: *pr = gcr;
596: } else {
597: while ( v1 != vl->v && v2 != vl->v )
598: vl = NEXT(vl);
599: if ( v1 == vl->v ) {
600: gcdcmp(vl,mod,g1,&gc1); gcdprsmp(vl,mod,gc1,g2,pr);
601: } else {
602: gcdcmp(vl,mod,g2,&gc2); gcdprsmp(vl,mod,gc2,g1,pr);
603: }
604: }
605: }
606: }
607:
608: void gcdcmp(VL vl,int mod,P p,P *pr)
609: {
610: P g,g1;
611: DCP dc;
612:
613: if ( NUM(p) )
614: *pr = (P)ONEM;
615: else {
616: dc = DC(p);
617: g = COEF(dc);
618: for ( dc = NEXT(dc); dc; dc = NEXT(dc) ) {
619: gcdprsmp(vl,mod,g,COEF(dc),&g1);
620: g = g1;
621: }
622: *pr = g;
623: }
624: }
625:
626: void sprsm(VL vl,int mod,V v,P p1,P p2,P *pr)
627: {
628: P q1,q2,m,m1,m2,x,h,r,g1,g2;
629: int d;
630: Z dq;
631: VL nvl;
632:
633: reordvar(vl,v,&nvl);
634: reordermp(nvl,mod,vl,p1,&q1); reordermp(nvl,mod,vl,p2,&q2);
635:
636: if ( ( VR(q1) != v ) || ( VR(q2) != v ) ) {
637: *pr = 0;
638: return;
639: }
640:
641: if ( deg(v,q1) >= deg(v,q2) ) {
642: g1 = q1; g2 = q2;
643: } else {
644: g2 = q1; g1 = q2;
645: }
646:
647: for ( h = (P)ONEM, x = (P)ONEM; ; ) {
648: if ( !deg(v,g2) )
649: break;
650:
651: premmp(nvl,mod,g1,g2,&r);
652: if ( !r )
653: break;
654:
1.2 noro 655: d = deg(v,g1) - deg(v,g2); STOZ(d,dq);
1.1 noro 656: pwrmp(nvl,mod,h,dq,&m); mulmp(nvl,mod,m,x,&m1); g1 = g2;
657: divsmp(nvl,mod,r,m1,&g2); x = LC(g1); /* g1 is not const w.r.t v */
658: pwrmp(nvl,mod,x,dq,&m1); mulmp(nvl,mod,m1,h,&m2);
659: divsmp(nvl,mod,m2,m,&h);
660: }
661: *pr = g2;
662: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>