Annotation of OpenXM_contrib2/asir2018/engine/PUM.c, Revision 1.2
1.1 noro 1: /*
2: * Copyright (c) 1994-2000 FUJITSU LABORATORIES LIMITED
3: * All rights reserved.
4: *
5: * FUJITSU LABORATORIES LIMITED ("FLL") hereby grants you a limited,
6: * non-exclusive and royalty-free license to use, copy, modify and
7: * redistribute, solely for non-commercial and non-profit purposes, the
8: * computer program, "Risa/Asir" ("SOFTWARE"), subject to the terms and
9: * conditions of this Agreement. For the avoidance of doubt, you acquire
10: * only a limited right to use the SOFTWARE hereunder, and FLL or any
11: * third party developer retains all rights, including but not limited to
12: * copyrights, in and to the SOFTWARE.
13: *
14: * (1) FLL does not grant you a license in any way for commercial
15: * purposes. You may use the SOFTWARE only for non-commercial and
16: * non-profit purposes only, such as academic, research and internal
17: * business use.
18: * (2) The SOFTWARE is protected by the Copyright Law of Japan and
19: * international copyright treaties. If you make copies of the SOFTWARE,
20: * with or without modification, as permitted hereunder, you shall affix
21: * to all such copies of the SOFTWARE the above copyright notice.
22: * (3) An explicit reference to this SOFTWARE and its copyright owner
23: * shall be made on your publication or presentation in any form of the
24: * results obtained by use of the SOFTWARE.
25: * (4) In the event that you modify the SOFTWARE, you shall notify FLL by
26: * e-mail at risa-admin@sec.flab.fujitsu.co.jp of the detailed specification
27: * for such modification or the source code of the modified part of the
28: * SOFTWARE.
29: *
30: * THE SOFTWARE IS PROVIDED AS IS WITHOUT ANY WARRANTY OF ANY KIND. FLL
31: * MAKES ABSOLUTELY NO WARRANTIES, EXPRESSED, IMPLIED OR STATUTORY, AND
32: * EXPRESSLY DISCLAIMS ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS
33: * FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT OF THIRD PARTIES'
34: * RIGHTS. NO FLL DEALER, AGENT, EMPLOYEES IS AUTHORIZED TO MAKE ANY
35: * MODIFICATIONS, EXTENSIONS, OR ADDITIONS TO THIS WARRANTY.
36: * UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, TORT, CONTRACT,
37: * OR OTHERWISE, SHALL FLL BE LIABLE TO YOU OR ANY OTHER PERSON FOR ANY
38: * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL
39: * DAMAGES OF ANY CHARACTER, INCLUDING, WITHOUT LIMITATION, DAMAGES
40: * ARISING OUT OF OR RELATING TO THE SOFTWARE OR THIS AGREEMENT, DAMAGES
41: * FOR LOSS OF GOODWILL, WORK STOPPAGE, OR LOSS OF DATA, OR FOR ANY
42: * DAMAGES, EVEN IF FLL SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF
43: * SUCH DAMAGES, OR FOR ANY CLAIM BY ANY OTHER PARTY. EVEN IF A PART
44: * OF THE SOFTWARE HAS BEEN DEVELOPED BY A THIRD PARTY, THE THIRD PARTY
45: * DEVELOPER SHALL HAVE NO LIABILITY IN CONNECTION WITH THE USE,
46: * PERFORMANCE OR NON-PERFORMANCE OF THE SOFTWARE.
47: *
1.2 ! noro 48: * $OpenXM: OpenXM_contrib2/asir2018/engine/PUM.c,v 1.1 2018/09/19 05:45:07 noro Exp $
1.1 noro 49: */
50: #include "ca.h"
51:
52: void gcdcmp(), sprsm();
53:
54: void detmp(VL vl,int mod,P **rmat,int n,P *dp)
55: {
56: int i,j,k,sgn;
57: P mjj,mij,t,s,u,d;
58: P **mat;
59: P *mi,*mj;
60:
61: mat = (P **)almat_pointer(n,n);
62: for ( i = 0; i < n; i++ )
63: for ( j = 0; j < n; j++ )
64: mat[i][j] = rmat[i][j];
65: for ( j = 0, d = (P)ONEM, sgn = 1; j < n; j++ ) {
66: for ( i = j; (i < n) && !mat[i][j]; i++ );
67: if ( i == n ) {
68: *dp = 0; return;
69: }
70: for ( k = i; k < n; k++ )
71: if ( mat[k][j] && (nmonop(mat[k][j]) < nmonop(mat[i][j]) ) )
72: i = k;
73: if ( j != i ) {
74: mj = mat[j]; mat[j] = mat[i]; mat[i] = mj; sgn = -sgn;
75: }
76: for ( i = j + 1, mj = mat[j], mjj = mj[j]; i < n; i++ )
77: for ( k = j + 1, mi = mat[i], mij = mi[j]; k < n; k++ ) {
78: mulmp(vl,mod,mi[k],mjj,&t); mulmp(vl,mod,mj[k],mij,&s);
79: submp(vl,mod,t,s,&u); divsmp(vl,mod,u,d,&mi[k]);
80: }
81: d = mjj;
82: }
83: if ( sgn < 0 )
84: chsgnmp(mod,d,dp);
85: else
86: *dp = d;
87: }
88:
89: void resultmp(VL vl,int mod,V v,P p1,P p2,P *pr)
90: {
91: P q1,q2,m,m1,m2,lc,q,r,t,g1,g2,adj;
92: int d,d1,d2,j,k;
93: VL nvl;
94: Z dq;
95: MQ mq;
96:
97: if ( !p1 || !p2 ) {
98: *pr = 0; return;
99: }
100: reordvar(vl,v,&nvl);
101: reordermp(nvl,mod,vl,p1,&q1); reordermp(nvl,mod,vl,p2,&q2);
102:
103: if ( VR(q1) != v )
104: if ( VR(q2) != v ) {
105: *pr = 0;
106: return;
107: } else {
1.2 ! noro 108: d = deg(v,q2); STOZ(d,dq);
1.1 noro 109: pwrmp(vl,mod,q1,dq,pr);
110: return;
111: }
112: else if ( VR(q2) != v ) {
1.2 ! noro 113: d = deg(v,q1); STOZ(d,dq);
1.1 noro 114: pwrmp(vl,mod,q2,dq,pr);
115: return;
116: }
117:
118: if ( ( VR(q1) != v ) || ( VR(q2) != v ) ) {
119: *pr = 0;
120: return;
121: }
122:
123: d1 = deg(v,q1); d2 = deg(v,q2);
124: if ( d1 > d2 ) {
125: g1 = q1; g2 = q2; adj = (P)ONEM;
126: } else if ( d1 < d2 ) {
127: g2 = q1; g1 = q2;
128: if ( (d1 % 2) && (d2 % 2) ) {
129: STOMQ(-1,mq); adj = (P)mq;
130: } else
131: adj = (P)ONEM;
132: } else {
133: premmp(nvl,mod,q1,q2,&t);
1.2 ! noro 134: d = deg(v,t); STOZ(d,dq); pwrmp(nvl,mod,LC(q2),dq,&adj);
1.1 noro 135: g1 = q2; g2 = t;
136: if ( d1 % 2 ) {
137: chsgnmp(mod,adj,&t); adj = t;
138: }
139: }
140: d1 = deg(v,g1); j = d1 - 1;
141:
142: for ( lc = (P)ONEM; ; ) {
143: if ( ( k = deg(v,g2) ) < 0 ) {
144: *pr = 0;
145: return;
146: }
147:
148: if ( k == j )
149: if ( !k ) {
150: divsmp(nvl,mod,g2,adj,pr);
151: return;
152: } else {
153: premmp(nvl,mod,g1,g2,&r); mulmp(nvl,mod,lc,lc,&m);
154: divsmp(nvl,mod,r,m,&q);
155: g1 = g2; g2 = q;
156: lc = LC(g1); /* g1 is not const */
157: j = k - 1;
158: }
159: else {
1.2 ! noro 160: d = j - k; STOZ(d,dq);
1.1 noro 161: pwrmp(nvl,mod,(VR(g2)==v?LC(g2):g2),dq,&m);
162: mulmp(nvl,mod,g2,m,&m1);
163: pwrmp(nvl,mod,lc,dq,&m); divsmp(nvl,mod,m1,m,&t);
164: if ( k == 0 ) {
165: divsmp(nvl,mod,t,adj,pr);
166: return;
167: } else {
168: premmp(nvl,mod,g1,g2,&r);
169: mulmp(nvl,mod,lc,lc,&m1); mulmp(nvl,mod,m,m1,&m2);
170: divsmp(nvl,mod,r,m2,&q);
171: g1 = t; g2 = q;
172: if ( d % 2 ) {
173: chsgnmp(mod,g2,&t); g2 = t;
174: }
175: lc = LC(g1); /* g1 is not const */
176: j = k - 1;
177: }
178: }
179: }
180: }
181:
182: void premmp(VL vl,int mod,P p1,P p2,P *pr)
183: {
184: P m,m1,m2;
185: P *pw;
186: DCP dc;
187: V v1,v2;
188: register int i,j;
189: int n1,n2,d;
190:
191: if ( NUM(p1) )
192: if ( NUM(p2) )
193: *pr = 0;
194: else
195: *pr = p1;
196: else if ( NUM(p2) )
197: *pr = 0;
198: else if ( ( v1 = VR(p1) ) == ( v2 = VR(p2) ) ) {
199: n1 = deg(v1,p1); n2 = deg(v1,p2);
200: pw = (P *)ALLOCA((n1+1)*sizeof(P));
201: bzero((char *)pw,(int)((n1+1)*sizeof(P)));
202:
203: for ( dc = DC(p1); dc; dc = NEXT(dc) )
1.2 ! noro 204: pw[ZTOS(DEG(dc))] = COEF(dc);
1.1 noro 205:
206: for ( i = n1; i >= n2; i-- ) {
207: if ( pw[i] ) {
208: chsgnmp(mod,pw[i],&m);
209: for ( j = i; j >= 0; j-- ) {
210: mulmp(vl,mod,pw[j],LC(p2),&m1); pw[j] = m1;
211: }
212:
213: for ( dc = DC(p2), d = i - n2; dc; dc = NEXT(dc) ) {
214: mulmp(vl,mod,COEF(dc),m,&m1);
1.2 ! noro 215: addmp(vl,mod,pw[ZTOS(DEG(dc))+d],m1,&m2);
! 216: pw[ZTOS(DEG(dc))+d] = m2;
1.1 noro 217: }
218: } else
219: for ( j = i; j >= 0; j-- )
220: if ( pw[j] ) {
221: mulmp(vl,mod,pw[j],LC(p2),&m1); pw[j] = m1;
222: }
223: }
224: plisttop(pw,v1,n2-1,pr);
225: } else {
226: while ( v1 != vl->v && v2 != vl->v )
227: vl = NEXT(vl);
228: if ( v1 == vl->v )
229: *pr = 0;
230: else
231: *pr = p1;
232: }
233: }
234:
235: void srchmp(VL vl,int mod,V v,P p1,P p2,P *pr)
236: {
237: P a,b,q1,q2,x,t,s,d,bg,c,c0,db;
238: int i,m,k;
239: V v0;
240: VL nvl,tvl,nvl1,nvl2;
241: Z dq;
242: MQ q;
243:
244: if ( vl->v != v ) {
245: reordvar(vl,v,&tvl);
246: reordermp(tvl,mod,vl,p1,&q1); reordermp(tvl,mod,vl,p2,&q2);
247: } else {
248: q1 = p1; q2 = p2; tvl = vl;
249: }
250: clctv(tvl,q1,&nvl1); clctv(tvl,q2,&nvl2); mergev(tvl,nvl1,nvl2,&nvl);
251: if ( VR(q1) != v )
252: if ( VR(q2) != v )
253: *pr = 0;
254: else {
1.2 ! noro 255: m = getdeg(v,q2); STOZ(m,dq); pwrmp(vl,mod,q1,dq,pr);
1.1 noro 256: }
257: else if ( VR(q2) != v ) {
1.2 ! noro 258: m = getdeg(v,q1); STOZ(m,dq); pwrmp(vl,mod,q2,dq,pr);
1.1 noro 259: } else if ( !NEXT(nvl) )
260: srchump(mod,p1,p2,pr);
261: else {
262: v0 = NEXT(nvl)->v;
263: k = getdeg(v,q1)*getdeg(v0,q2)+getdeg(v,q2)*getdeg(v0,q1)+1;
264: for ( i = 0, c = 0, d = (P)ONEM, MKMV(v0,x);
265: ( i < mod ) && (getdeg(v0,d) < k) ; i++ ) {
266: STOMQ(i,q),bg = (P)q; substmp(nvl,mod,LC(q1),v0,bg,&t);
267: if ( !t )
268: continue;
269: substmp(nvl,mod,LC(q2),v0,bg,&t);
270: if ( !t )
271: continue;
272: substmp(nvl,mod,q1,v0,bg,&a); substmp(nvl,mod,q2,v0,bg,&b);
273: srchmp(nvl,mod,v,a,b,&c0); substmp(nvl,mod,c,v0,bg,&t);
274: submp(nvl,mod,c0,t,&s); mulmp(nvl,mod,s,d,&t);
275: substmp(nvl,mod,d,v0,bg,&db);
276: divsmp(nvl,mod,t,db,&s); addmp(nvl,mod,s,c,&t); c = t;
277: submp(nvl,mod,x,bg,&t); mulmp(nvl,mod,d,t,&s); d = s;
278: }
279: if ( i == mod )
280: error("srchmp : ???");
281: *pr = c;
282: }
283: }
284:
285: int ucmpp(P p,P q)
286: {
287: DCP dcp,dcq;
288:
289: if ( !p )
290: if ( !q )
291: return ( 0 );
292: else
293: return ( 1 );
294: else if ( !q )
295: return ( 1 );
296: else if ( NUM(p) )
297: if ( !NUM(q) )
298: return ( 1 );
299: else
300: return ( cmpq((Q)p,(Q)q) );
301: else if ( NUM(q) )
302: return ( 1 );
303: else {
304: for ( dcp = DC(p), dcq = DC(q); dcp && dcq;
305: dcp = NEXT(dcp), dcq = NEXT(dcq) )
306: if ( cmpz(DEG(dcp),DEG(dcq) ) )
307: return ( 1 );
308: else if ( cmpq((Q)COEF(dcp),(Q)COEF(dcq) ) )
309: return ( 1 );
310: if ( dcp || dcq )
311: return ( 1 );
312: else
313: return ( 0 );
314: }
315: }
316:
317: #if 0
318: srchump(mod,p1,p2,pr)
319: int mod;
320: P p1,p2,*pr;
321: {
322: int r;
323: MQ mq;
324:
325: r = eucresum(mod,p1,p2);
326: STOMQ(r,mq); *pr = (P)mq;
327: }
328: #endif
329:
330: void srchump(int mod,P p1,P p2,P *pr)
331: {
332: UM m,m1,q,r,t,g1,g2;
333: int lc,d,d1,d2,i,j,k,l,l1,l2,tmp,adj;
334: V v;
335:
336: v = VR(p1); d = MAX(UDEG(p1),UDEG(p2));
337: g1 = W_UMALLOC(d); g2 = W_UMALLOC(d);
338: bzero((char *)g1,(int)((d+2)*sizeof(int))); bzero((char *)g2,(int)((d+2)*sizeof(int)));
339: if ( d == (int)UDEG(p1) ) {
340: mptoum(p1,g1); mptoum(p2,g2);
341: } else {
342: mptoum(p2,g1); mptoum(p1,g2);
343: }
344: if ( ( d1 = DEG(g1) ) > ( d2 = DEG(g2) ) ) {
345: j = d1 - 1; adj = 1;
346: } else
347: j = d2;
348: lc = 1;
349: r = W_UMALLOC(d1+d2); q = W_UMALLOC(d1+d2);
350: m1 = W_UMALLOC(d1+d2); t = W_UMALLOC(d1+d2);
351: bzero((char *)r,(int)((d1+d2+2)*sizeof(int))); bzero((char *)q,(int)((d1+d2+2)*sizeof(int)));
352: bzero((char *)m1,(int)((d1+d2+2)*sizeof(int))); bzero((char *)t,(int)((d1+d2+2)*sizeof(int)));
353: m = W_UMALLOC(0); bzero((char *)m,(int)(2*sizeof(int)));
354: adj = pwrm(mod,COEF(g2)[DEG(g2)],DEG(g1));
355: DEG(m) = 0; COEF(m)[0] = invm(COEF(g2)[DEG(g2)],mod);
356: mulum(mod,g2,m,r); cpyum(r,g2);
357: while ( 1 ) {
358: if ( ( k = DEG(g2) ) < 0 ) {
359: *pr = 0;
360: return;
361: }
362: if ( k == j ) {
363: if ( k == 0 ) {
364: DEG(m) = 0; COEF(m)[0] = adj;
365: mulum(mod,g2,m,r); umtomp(v,r,pr);
366: return;
367: } else {
368: DEG(m) = 0;
369: COEF(m)[0] = pwrm(mod,COEF(g2)[k],DEG(g1)-k+1);
370: mulum(mod,g1,m,r); DEG(r) = divum(mod,r,g2,t);
371: DEG(m) = 0; COEF(m)[0] = dmb(mod,lc,lc,&tmp);
372: divum(mod,r,m,q); cpyum(g2,g1); cpyum(q,g2);
373: lc = COEF(g1)[DEG(g1)]; j = k - 1;
374: }
375: } else {
376: d = j - k;
377: DEG(m) = 0; COEF(m)[0] = pwrm(mod,COEF(g2)[DEG(g2)],d);
378: mulum(mod,g2,m,m1); l = pwrm(mod,lc,d);
379: DEG(m) = 0; COEF(m)[0] = l; divum(mod,m1,m,t);
380: if ( k == 0 ) {
381: DEG(m) = 0; COEF(m)[0] = adj;
382: mulum(mod,t,m,r); umtomp(v,r,pr);
383: return;
384: } else {
385: DEG(m) = 0;
386: COEF(m)[0] = pwrm(mod,COEF(g2)[k],DEG(g1)-k+1);
387: mulum(mod,g1,m,r); DEG(r) = divum(mod,r,g2,q);
388: l1 = dmb(mod,lc,lc,&tmp); l2 = dmb(mod,l,l1,&tmp);
389: DEG(m) = 0; COEF(m)[0] = l2;
390: divum(mod,r,m,q); cpyum(t,g1); cpyum(q,g2);
391: if ( d % 2 )
392: for ( i = DEG(g2); i >= 0; i-- )
393: COEF(g2)[i] = ( mod - COEF(g2)[i] ) % mod;
394: lc = COEF(g1)[DEG(g1)]; j = k - 1;
395: }
396: }
397: }
398: }
399:
400: void substmp(VL vl,int mod,P p,V v0,P p0,P *pr)
401: {
402: P x,t,m,c,s,a;
403: DCP dc;
404: Z d,z;
405:
406: if ( !p )
407: *pr = 0;
408: else if ( NUM(p) )
409: *pr = p;
410: else if ( VR(p) != v0 ) {
411: MKMV(VR(p),x);
412: for ( c = 0, dc = DC(p); dc; dc = NEXT(dc) ) {
413: substmp(vl,mod,COEF(dc),v0,p0,&t);
414: if ( DEG(dc) ) {
415: pwrmp(vl,mod,x,DEG(dc),&s); mulmp(vl,mod,s,t,&m);
416: addmp(vl,mod,m,c,&a);
417: c = a;
418: } else {
419: addmp(vl,mod,t,c,&a);
420: c = a;
421: }
422: }
423: *pr = c;
424: } else {
425: dc = DC(p);
426: c = COEF(dc);
427: for ( d = DEG(dc), dc = NEXT(dc);
428: dc; d = DEG(dc), dc = NEXT(dc) ) {
429: subz(d,DEG(dc),&z); pwrmp(vl,mod,p0,z,&s);
430: mulmp(vl,mod,s,c,&m);
431: addmp(vl,mod,m,COEF(dc),&c);
432: }
433: if ( d ) {
434: pwrmp(vl,mod,p0,d,&t); mulmp(vl,mod,t,c,&m);
435: c = m;
436: }
437: *pr = c;
438: }
439: }
440:
441: void reordermp(VL nvl,int mod,VL ovl,P p,P *pr)
442: {
443: DCP dc;
444: P x,m,s,t,c;
445:
446: if ( !p )
447: *pr = 0;
448: else if ( NUM(p) )
449: *pr = p;
450: else {
451: MKMV(VR(p),x);
452: for ( s = 0, dc = DC(p); dc; dc = NEXT(dc) ) {
453: reordermp(nvl,mod,ovl,COEF(dc),&c);
454: if ( DEG(dc) ) {
455: pwrmp(nvl,mod,x,DEG(dc),&t); mulmp(nvl,mod,c,t,&m);
456: addmp(nvl,mod,m,s,&t);
457: } else
458: addmp(nvl,mod,s,c,&t);
459: s = t;
460: }
461: *pr = s;
462: }
463: }
464:
465: void chnremp(VL vl,int mod,P p,Z q,P c,P *r)
466: {
467: P tg,sg,ug;
468: P t,u;
469: MQ mq;
470:
471: ptomp(mod,p,&tg); submp(vl,mod,c,tg,&sg);
472: UTOMQ(remqi((Q)q,mod),mq),tg = (P)mq; divsmp(vl,mod,sg,tg,&ug);
473: normalizemp(mod,ug);
474: mptop(ug,&u); mulp(vl,u,(P)q,&t); addp(vl,t,p,r);
475: }
476:
477: /* XXX strange behavior of invm() on SPARC */
478:
479: void chnrem(int mod,V v,P c,Z q,UM t,P *cr,Z *qr)
480: {
481: int n,m,i,d,a,sd,tmp;
482: Z b,s,z;
483: Z *pc,*pcr;
484: DCP dc;
485:
486: if ( !c || NUM(c) )
487: n = 0;
488: else
489: n = UDEG(c);
490: m = DEG(t); d = MAX(n,m); W_CALLOC(n,Z,pc); W_CALLOC(d,Z,pcr);
491: if ( !c )
492: pc[0] = 0;
493: else if ( NUM(c) )
494: pc[0] = (Z)c;
495: else
496: for ( dc = DC(c); dc; dc = NEXT(dc) )
1.2 ! noro 497: pc[ZTOS(DEG(dc))] = (Z)COEF(dc);
1.1 noro 498: for ( i = 0; i <= d; i++ ) {
499: b = (i>n?0:pc[i]); a = (i>m?0:COEF(t)[i]);
500: if ( b )
501: a = (a-((int)remqi((Q)pc[i],mod)))%mod;
502: sd = dmb(mod,(a>=0?a:a+mod),invm(remqi((Q)q,mod),mod),&tmp);
503: if ( ( 2 * sd ) > mod )
504: sd -= mod;
1.2 ! noro 505: STOZ(sd,z); mulz(z,q,&s); addz(s,b,&pcr[i]);
1.1 noro 506: }
1.2 ! noro 507: STOZ(mod,z); mulz(q,z,qr); plisttop((P *)pcr,v,d,cr);
1.1 noro 508: }
509:
510: void normalizemp(int mod,P g)
511: {
512: DCP dc;
513:
514: if ( !g )
515: return;
516: else if ( NUM(g) ) {
517: if ( 2 * CONT((MQ)g) > mod )
518: CONT((MQ)g) -= mod;
519: return;
520: } else
521: for ( dc = DC(g); dc; dc = NEXT(dc) )
522: normalizemp(mod,COEF(dc));
523: }
524:
525: void norm(P p,Q *r)
526: {
527: Q t,s;
528: DCP dc;
529:
530: for ( dc = DC(p), t = (Q)ONE; dc; dc = NEXT(dc) ) {
531: absq((Q)COEF(dc),&s);
532: if ( cmpq(s,t) > 0 ) t = s;
533: }
534: *r = t;
535: }
536:
537: void norm1(P p,P *r)
538: {
539: DCP dc;
540: P t,s,u;
541: Q q;
542:
543: if ( NUM(p) )
544: absq((Q)p,(Q *)r);
545: else {
546: for ( t = 0, dc = DC(p); dc; dc = NEXT(dc) ) {
547: norm1(COEF(dc),&s); addq((Q)t,(Q)s,(Q *)&u); t = u;
548: }
549: *r = t;
550: }
551: }
552:
553: void norm1c(P p,Q *r)
554: {
555: Q t,s;
556: DCP dc;
557:
558: if ( NUM(p) )
559: norm1(p,(P *)r);
560: else {
561: for ( dc = DC(p), t = (Q)ONE; dc; dc = NEXT(dc) ) {
562: norm1(COEF(dc),(P *)&s);
563: if ( cmpq(s,t) > 0 ) t = s;
564: }
565: *r = t;
566: }
567: }
568:
569: void gcdprsmp(VL vl,int mod,P p1,P p2,P *pr)
570: {
571: P g1,g2,gc1,gc2,gp1,gp2,g,gc,gp,gcr;
572: V v1,v2;
573:
574: if ( !p1 )
575: *pr = p2;
576: else if ( !p2 )
577: *pr = p1;
578: else if ( NUM(p1) || NUM(p2) )
579: *pr = (P)ONEM;
580: else {
581: g1 = p1; g2 = p2;
582: if ( ( v1 = VR(g1) ) == ( v2 = VR(g2) ) ) {
583: gcdcmp(vl,mod,g1,&gc1); divsmp(vl,mod,g1,gc1,&gp1);
584: gcdcmp(vl,mod,g2,&gc2); divsmp(vl,mod,g2,gc2,&gp2);
585: gcdprsmp(vl,mod,gc1,gc2,&gcr);
586: sprsm(vl,mod,v1,gp1,gp2,&g);
587:
588: if ( VR(g) == v1 ) {
589: gp = g;
590: gcdcmp(vl,mod,gp,&gc); divsmp(vl,mod,gp,gc,&gp1);
591: mulmp(vl,mod,gp1,gcr,pr);
592: } else
593: *pr = gcr;
594: } else {
595: while ( v1 != vl->v && v2 != vl->v )
596: vl = NEXT(vl);
597: if ( v1 == vl->v ) {
598: gcdcmp(vl,mod,g1,&gc1); gcdprsmp(vl,mod,gc1,g2,pr);
599: } else {
600: gcdcmp(vl,mod,g2,&gc2); gcdprsmp(vl,mod,gc2,g1,pr);
601: }
602: }
603: }
604: }
605:
606: void gcdcmp(VL vl,int mod,P p,P *pr)
607: {
608: P g,g1;
609: DCP dc;
610:
611: if ( NUM(p) )
612: *pr = (P)ONEM;
613: else {
614: dc = DC(p);
615: g = COEF(dc);
616: for ( dc = NEXT(dc); dc; dc = NEXT(dc) ) {
617: gcdprsmp(vl,mod,g,COEF(dc),&g1);
618: g = g1;
619: }
620: *pr = g;
621: }
622: }
623:
624: void sprsm(VL vl,int mod,V v,P p1,P p2,P *pr)
625: {
626: P q1,q2,m,m1,m2,x,h,r,g1,g2;
627: int d;
628: Z dq;
629: VL nvl;
630:
631: reordvar(vl,v,&nvl);
632: reordermp(nvl,mod,vl,p1,&q1); reordermp(nvl,mod,vl,p2,&q2);
633:
634: if ( ( VR(q1) != v ) || ( VR(q2) != v ) ) {
635: *pr = 0;
636: return;
637: }
638:
639: if ( deg(v,q1) >= deg(v,q2) ) {
640: g1 = q1; g2 = q2;
641: } else {
642: g2 = q1; g1 = q2;
643: }
644:
645: for ( h = (P)ONEM, x = (P)ONEM; ; ) {
646: if ( !deg(v,g2) )
647: break;
648:
649: premmp(nvl,mod,g1,g2,&r);
650: if ( !r )
651: break;
652:
1.2 ! noro 653: d = deg(v,g1) - deg(v,g2); STOZ(d,dq);
1.1 noro 654: pwrmp(nvl,mod,h,dq,&m); mulmp(nvl,mod,m,x,&m1); g1 = g2;
655: divsmp(nvl,mod,r,m1,&g2); x = LC(g1); /* g1 is not const w.r.t v */
656: pwrmp(nvl,mod,x,dq,&m1); mulmp(nvl,mod,m1,h,&m2);
657: divsmp(nvl,mod,m2,m,&h);
658: }
659: *pr = g2;
660: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>