Annotation of OpenXM_contrib2/asir2000/engine/PUM.c, Revision 1.4
1.2 noro 1: /*
2: * Copyright (c) 1994-2000 FUJITSU LABORATORIES LIMITED
3: * All rights reserved.
4: *
5: * FUJITSU LABORATORIES LIMITED ("FLL") hereby grants you a limited,
6: * non-exclusive and royalty-free license to use, copy, modify and
7: * redistribute, solely for non-commercial and non-profit purposes, the
8: * computer program, "Risa/Asir" ("SOFTWARE"), subject to the terms and
9: * conditions of this Agreement. For the avoidance of doubt, you acquire
10: * only a limited right to use the SOFTWARE hereunder, and FLL or any
11: * third party developer retains all rights, including but not limited to
12: * copyrights, in and to the SOFTWARE.
13: *
14: * (1) FLL does not grant you a license in any way for commercial
15: * purposes. You may use the SOFTWARE only for non-commercial and
16: * non-profit purposes only, such as academic, research and internal
17: * business use.
18: * (2) The SOFTWARE is protected by the Copyright Law of Japan and
19: * international copyright treaties. If you make copies of the SOFTWARE,
20: * with or without modification, as permitted hereunder, you shall affix
21: * to all such copies of the SOFTWARE the above copyright notice.
22: * (3) An explicit reference to this SOFTWARE and its copyright owner
23: * shall be made on your publication or presentation in any form of the
24: * results obtained by use of the SOFTWARE.
25: * (4) In the event that you modify the SOFTWARE, you shall notify FLL by
1.3 noro 26: * e-mail at risa-admin@sec.flab.fujitsu.co.jp of the detailed specification
1.2 noro 27: * for such modification or the source code of the modified part of the
28: * SOFTWARE.
29: *
30: * THE SOFTWARE IS PROVIDED AS IS WITHOUT ANY WARRANTY OF ANY KIND. FLL
31: * MAKES ABSOLUTELY NO WARRANTIES, EXPRESSED, IMPLIED OR STATUTORY, AND
32: * EXPRESSLY DISCLAIMS ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS
33: * FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT OF THIRD PARTIES'
34: * RIGHTS. NO FLL DEALER, AGENT, EMPLOYEES IS AUTHORIZED TO MAKE ANY
35: * MODIFICATIONS, EXTENSIONS, OR ADDITIONS TO THIS WARRANTY.
36: * UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, TORT, CONTRACT,
37: * OR OTHERWISE, SHALL FLL BE LIABLE TO YOU OR ANY OTHER PERSON FOR ANY
38: * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL
39: * DAMAGES OF ANY CHARACTER, INCLUDING, WITHOUT LIMITATION, DAMAGES
40: * ARISING OUT OF OR RELATING TO THE SOFTWARE OR THIS AGREEMENT, DAMAGES
41: * FOR LOSS OF GOODWILL, WORK STOPPAGE, OR LOSS OF DATA, OR FOR ANY
42: * DAMAGES, EVEN IF FLL SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF
43: * SUCH DAMAGES, OR FOR ANY CLAIM BY ANY OTHER PARTY. EVEN IF A PART
44: * OF THE SOFTWARE HAS BEEN DEVELOPED BY A THIRD PARTY, THE THIRD PARTY
45: * DEVELOPER SHALL HAVE NO LIABILITY IN CONNECTION WITH THE USE,
46: * PERFORMANCE OR NON-PERFORMANCE OF THE SOFTWARE.
47: *
1.4 ! noro 48: * $OpenXM: OpenXM_contrib2/asir2000/engine/PUM.c,v 1.3 2000/08/22 05:04:04 noro Exp $
1.2 noro 49: */
1.1 noro 50: #include "ca.h"
51:
52: void gcdcmp(), sprsm();
53:
54: void detmp(vl,mod,rmat,n,dp)
55: VL vl;
56: int mod;
57: P **rmat;
58: int n;
59: P *dp;
60: {
61: int i,j,k,sgn;
62: P mjj,mij,t,s,u,d;
63: P **mat;
64: P *mi,*mj;
65:
66: mat = (P **)almat_pointer(n,n);
67: for ( i = 0; i < n; i++ )
68: for ( j = 0; j < n; j++ )
69: mat[i][j] = rmat[i][j];
70: for ( j = 0, d = (P)ONEM, sgn = 1; j < n; j++ ) {
71: for ( i = j; (i < n) && !mat[i][j]; i++ );
72: if ( i == n ) {
73: *dp = 0; return;
74: }
75: for ( k = i; k < n; k++ )
76: if ( mat[k][j] && (nmonop(mat[k][j]) < nmonop(mat[i][j]) ) )
77: i = k;
78: if ( j != i ) {
79: mj = mat[j]; mat[j] = mat[i]; mat[i] = mj; sgn = -sgn;
80: }
81: for ( i = j + 1, mj = mat[j], mjj = mj[j]; i < n; i++ )
82: for ( k = j + 1, mi = mat[i], mij = mi[j]; k < n; k++ ) {
83: mulmp(vl,mod,mi[k],mjj,&t); mulmp(vl,mod,mj[k],mij,&s);
84: submp(vl,mod,t,s,&u); divsmp(vl,mod,u,d,&mi[k]);
85: }
86: d = mjj;
87: }
88: if ( sgn < 0 )
89: chsgnmp(mod,d,dp);
90: else
91: *dp = d;
92: }
93:
94: void resultmp(vl,mod,v,p1,p2,pr)
95: VL vl;
96: int mod;
97: V v;
98: P p1,p2,*pr;
99: {
100: P q1,q2,m,m1,m2,lc,q,r,t,g1,g2,adj;
101: int d,d1,d2,j,k;
102: VL nvl;
103: Q dq;
104: MQ mq;
105:
106: if ( !p1 || !p2 ) {
107: *pr = 0; return;
108: }
109: reordvar(vl,v,&nvl);
110: reordermp(nvl,mod,vl,p1,&q1); reordermp(nvl,mod,vl,p2,&q2);
111:
112: if ( VR(q1) != v )
113: if ( VR(q2) != v ) {
114: *pr = 0;
115: return;
116: } else {
117: d = deg(v,q2); STOQ(d,dq);
118: pwrmp(vl,mod,q1,dq,pr);
119: return;
120: }
121: else if ( VR(q2) != v ) {
122: d = deg(v,q1); STOQ(d,dq);
123: pwrmp(vl,mod,q2,dq,pr);
124: return;
125: }
126:
127: if ( ( VR(q1) != v ) || ( VR(q2) != v ) ) {
128: *pr = 0;
129: return;
130: }
131:
132: d1 = deg(v,q1); d2 = deg(v,q2);
133: if ( d1 > d2 ) {
134: g1 = q1; g2 = q2; adj = (P)ONEM;
135: } else if ( d1 < d2 ) {
136: g2 = q1; g1 = q2;
137: if ( (d1 % 2) && (d2 % 2) ) {
138: STOMQ(-1,mq); adj = (P)mq;
139: } else
140: adj = (P)ONEM;
141: } else {
142: premmp(nvl,mod,q1,q2,&t);
143: d = deg(v,t); STOQ(d,dq); pwrmp(nvl,mod,LC(q2),dq,&adj);
144: g1 = q2; g2 = t;
145: if ( d1 % 2 ) {
146: chsgnmp(mod,adj,&t); adj = t;
147: }
148: }
149: d1 = deg(v,g1); j = d1 - 1;
150:
151: for ( lc = (P)ONEM; ; ) {
152: if ( ( k = deg(v,g2) ) < 0 ) {
153: *pr = 0;
154: return;
155: }
156:
157: if ( k == j )
158: if ( !k ) {
159: divsmp(nvl,mod,g2,adj,pr);
160: return;
161: } else {
162: premmp(nvl,mod,g1,g2,&r); mulmp(nvl,mod,lc,lc,&m);
163: divsmp(nvl,mod,r,m,&q);
164: g1 = g2; g2 = q;
165: lc = LC(g1); /* g1 is not const */
166: j = k - 1;
167: }
168: else {
169: d = j - k; STOQ(d,dq);
170: pwrmp(nvl,mod,(VR(g2)==v?LC(g2):g2),dq,&m);
171: mulmp(nvl,mod,g2,m,&m1);
172: pwrmp(nvl,mod,lc,dq,&m); divsmp(nvl,mod,m1,m,&t);
173: if ( k == 0 ) {
174: divsmp(nvl,mod,t,adj,pr);
175: return;
176: } else {
177: premmp(nvl,mod,g1,g2,&r);
178: mulmp(nvl,mod,lc,lc,&m1); mulmp(nvl,mod,m,m1,&m2);
179: divsmp(nvl,mod,r,m2,&q);
180: g1 = t; g2 = q;
181: if ( d % 2 ) {
182: chsgnmp(mod,g2,&t); g2 = t;
183: }
184: lc = LC(g1); /* g1 is not const */
185: j = k - 1;
186: }
187: }
188: }
189: }
190:
191: void premmp(vl,mod,p1,p2,pr)
192: VL vl;
193: int mod;
194: P p1,p2,*pr;
195: {
196: P m,m1,m2;
197: P *pw;
198: DCP dc;
199: V v1,v2;
200: register int i,j;
201: int n1,n2,d;
202:
203: if ( NUM(p1) )
204: if ( NUM(p2) )
205: *pr = 0;
206: else
207: *pr = p1;
208: else if ( NUM(p2) )
209: *pr = 0;
210: else if ( ( v1 = VR(p1) ) == ( v2 = VR(p2) ) ) {
211: n1 = deg(v1,p1); n2 = deg(v1,p2);
212: pw = (P *)ALLOCA((n1+1)*sizeof(P));
213: bzero((char *)pw,(int)((n1+1)*sizeof(P)));
214:
215: for ( dc = DC(p1); dc; dc = NEXT(dc) )
216: pw[QTOS(DEG(dc))] = COEF(dc);
217:
218: for ( i = n1; i >= n2; i-- ) {
219: if ( pw[i] ) {
1.4 ! noro 220: chsgnmp(mod,pw[i],&m);
1.1 noro 221: for ( j = i; j >= 0; j-- ) {
222: mulmp(vl,mod,pw[j],LC(p2),&m1); pw[j] = m1;
223: }
224:
225: for ( dc = DC(p2), d = i - n2; dc; dc = NEXT(dc) ) {
226: mulmp(vl,mod,COEF(dc),m,&m1);
1.4 ! noro 227: addmp(vl,mod,pw[QTOS(DEG(dc))+d],m1,&m2);
1.1 noro 228: pw[QTOS(DEG(dc))+d] = m2;
229: }
230: } else
231: for ( j = i; j >= 0; j-- )
232: if ( pw[j] ) {
233: mulmp(vl,mod,pw[j],LC(p2),&m1); pw[j] = m1;
234: }
235: }
236: plisttop(pw,v1,n2-1,pr);
237: } else {
238: while ( v1 != vl->v && v2 != vl->v )
239: vl = NEXT(vl);
240: if ( v1 == vl->v )
241: *pr = 0;
242: else
243: *pr = p1;
244: }
245: }
246:
247: void srchmp(vl,mod,v,p1,p2,pr)
248: VL vl;
249: int mod;
250: V v;
251: P p1,p2,*pr;
252: {
253: P a,b,q1,q2,x,t,s,d,bg,c,c0,db;
254: int i,m,k;
255: V v0;
256: VL nvl,tvl,nvl1,nvl2;
257: Q dq;
258: MQ q;
259:
260: if ( vl->v != v ) {
261: reordvar(vl,v,&tvl);
262: reordermp(tvl,mod,vl,p1,&q1); reordermp(tvl,mod,vl,p2,&q2);
263: } else {
264: q1 = p1; q2 = p2; tvl = vl;
265: }
266: clctv(tvl,q1,&nvl1); clctv(tvl,q2,&nvl2); mergev(tvl,nvl1,nvl2,&nvl);
267: if ( VR(q1) != v )
268: if ( VR(q2) != v )
269: *pr = 0;
270: else {
271: m = getdeg(v,q2); STOQ(m,dq); pwrmp(vl,mod,q1,dq,pr);
272: }
273: else if ( VR(q2) != v ) {
274: m = getdeg(v,q1); STOQ(m,dq); pwrmp(vl,mod,q2,dq,pr);
275: } else if ( !NEXT(nvl) )
276: srchump(mod,p1,p2,pr);
277: else {
278: v0 = NEXT(nvl)->v;
279: k = getdeg(v,q1)*getdeg(v0,q2)+getdeg(v,q2)*getdeg(v0,q1)+1;
280: for ( i = 0, c = 0, d = (P)ONEM, MKMV(v0,x);
281: ( i < mod ) && (getdeg(v0,d) < k) ; i++ ) {
282: STOMQ(i,q),bg = (P)q; substmp(nvl,mod,LC(q1),v0,bg,&t);
283: if ( !t )
284: continue;
285: substmp(nvl,mod,LC(q2),v0,bg,&t);
286: if ( !t )
287: continue;
288: substmp(nvl,mod,q1,v0,bg,&a); substmp(nvl,mod,q2,v0,bg,&b);
289: srchmp(nvl,mod,v,a,b,&c0); substmp(nvl,mod,c,v0,bg,&t);
290: submp(nvl,mod,c0,t,&s); mulmp(nvl,mod,s,d,&t);
291: substmp(nvl,mod,d,v0,bg,&db);
292: divsmp(nvl,mod,t,db,&s); addmp(nvl,mod,s,c,&t); c = t;
293: submp(nvl,mod,x,bg,&t); mulmp(nvl,mod,d,t,&s); d = s;
294: }
295: if ( i == mod )
296: error("srchmp : ???");
297: *pr = c;
298: }
299: }
300:
301: int ucmpp(p,q)
302: P p,q;
303: {
304: DCP dcp,dcq;
305:
306: if ( !p )
307: if ( !q )
308: return ( 0 );
309: else
310: return ( 1 );
311: else if ( !q )
312: return ( 1 );
313: else if ( NUM(p) )
314: if ( !NUM(q) )
315: return ( 1 );
316: else
317: return ( cmpq((Q)p,(Q)q) );
318: else if ( NUM(q) )
319: return ( 1 );
320: else {
321: for ( dcp = DC(p), dcq = DC(q); dcp && dcq;
322: dcp = NEXT(dcp), dcq = NEXT(dcq) )
323: if ( cmpq(DEG(dcp),DEG(dcq) ) )
324: return ( 1 );
325: else if ( cmpq((Q)COEF(dcp),(Q)COEF(dcq) ) )
326: return ( 1 );
327: if ( dcp || dcq )
328: return ( 1 );
329: else
330: return ( 0 );
331: }
332: }
333:
334: #if 0
335: srchump(mod,p1,p2,pr)
336: int mod;
337: P p1,p2,*pr;
338: {
339: int r;
340: MQ mq;
341:
342: r = eucresum(mod,p1,p2);
343: STOMQ(r,mq); *pr = (P)mq;
344: }
345: #endif
346:
347: void srchump(mod,p1,p2,pr)
348: int mod;
349: P p1,p2,*pr;
350: {
351: UM m,m1,q,r,t,g1,g2;
352: int lc,d,d1,d2,i,j,k,l,l1,l2,tmp,adj;
353: V v;
354:
355: v = VR(p1); d = MAX(UDEG(p1),UDEG(p2));
356: g1 = W_UMALLOC(d); g2 = W_UMALLOC(d);
357: bzero((char *)g1,(int)((d+2)*sizeof(int))); bzero((char *)g2,(int)((d+2)*sizeof(int)));
358: if ( d == (int)UDEG(p1) ) {
359: mptoum(p1,g1); mptoum(p2,g2);
360: } else {
361: mptoum(p2,g1); mptoum(p1,g2);
362: }
363: if ( ( d1 = DEG(g1) ) > ( d2 = DEG(g2) ) ) {
364: j = d1 - 1; adj = 1;
365: } else
366: j = d2;
367: lc = 1;
368: r = W_UMALLOC(d1+d2); q = W_UMALLOC(d1+d2);
369: m1 = W_UMALLOC(d1+d2); t = W_UMALLOC(d1+d2);
370: bzero((char *)r,(int)((d1+d2+2)*sizeof(int))); bzero((char *)q,(int)((d1+d2+2)*sizeof(int)));
371: bzero((char *)m1,(int)((d1+d2+2)*sizeof(int))); bzero((char *)t,(int)((d1+d2+2)*sizeof(int)));
372: m = W_UMALLOC(0); bzero((char *)m,(int)(2*sizeof(int)));
373: adj = pwrm(mod,COEF(g2)[DEG(g2)],DEG(g1));
374: DEG(m) = 0; COEF(m)[0] = invm(COEF(g2)[DEG(g2)],mod);
375: mulum(mod,g2,m,r); cpyum(r,g2);
376: while ( 1 ) {
377: if ( ( k = DEG(g2) ) < 0 ) {
378: *pr = 0;
379: return;
380: }
381: if ( k == j ) {
382: if ( k == 0 ) {
383: DEG(m) = 0; COEF(m)[0] = adj;
384: mulum(mod,g2,m,r); umtomp(v,r,pr);
385: return;
386: } else {
387: DEG(m) = 0;
388: COEF(m)[0] = pwrm(mod,COEF(g2)[k],DEG(g1)-k+1);
389: mulum(mod,g1,m,r); DEG(r) = divum(mod,r,g2,t);
390: DEG(m) = 0; COEF(m)[0] = dmb(mod,lc,lc,&tmp);
391: divum(mod,r,m,q); cpyum(g2,g1); cpyum(q,g2);
392: lc = COEF(g1)[DEG(g1)]; j = k - 1;
393: }
394: } else {
395: d = j - k;
396: DEG(m) = 0; COEF(m)[0] = pwrm(mod,COEF(g2)[DEG(g2)],d);
397: mulum(mod,g2,m,m1); l = pwrm(mod,lc,d);
398: DEG(m) = 0; COEF(m)[0] = l; divum(mod,m1,m,t);
399: if ( k == 0 ) {
400: DEG(m) = 0; COEF(m)[0] = adj;
401: mulum(mod,t,m,r); umtomp(v,r,pr);
402: return;
403: } else {
404: DEG(m) = 0;
405: COEF(m)[0] = pwrm(mod,COEF(g2)[k],DEG(g1)-k+1);
406: mulum(mod,g1,m,r); DEG(r) = divum(mod,r,g2,q);
407: l1 = dmb(mod,lc,lc,&tmp); l2 = dmb(mod,l,l1,&tmp);
408: DEG(m) = 0; COEF(m)[0] = l2;
409: divum(mod,r,m,q); cpyum(t,g1); cpyum(q,g2);
410: if ( d % 2 )
411: for ( i = DEG(g2); i >= 0; i-- )
412: COEF(g2)[i] = ( mod - COEF(g2)[i] ) % mod;
413: lc = COEF(g1)[DEG(g1)]; j = k - 1;
414: }
415: }
416: }
417: }
418:
419: void substmp(vl,mod,p,v0,p0,pr)
420: VL vl;
421: int mod;
422: V v0;
423: P p,p0;
424: P *pr;
425: {
426: P x,t,m,c,s,a;
427: DCP dc;
428: Q d;
429:
430: if ( !p )
431: *pr = 0;
432: else if ( NUM(p) )
433: *pr = p;
434: else if ( VR(p) != v0 ) {
435: MKMV(VR(p),x);
436: for ( c = 0, dc = DC(p); dc; dc = NEXT(dc) ) {
437: substmp(vl,mod,COEF(dc),v0,p0,&t);
438: if ( DEG(dc) ) {
439: pwrmp(vl,mod,x,DEG(dc),&s); mulmp(vl,mod,s,t,&m);
440: addmp(vl,mod,m,c,&a);
441: c = a;
442: } else {
443: addmp(vl,mod,t,c,&a);
444: c = a;
445: }
446: }
447: *pr = c;
448: } else {
449: dc = DC(p);
450: c = COEF(dc);
451: for ( d = DEG(dc), dc = NEXT(dc);
452: dc; d = DEG(dc), dc = NEXT(dc) ) {
453: subq(d,DEG(dc),(Q *)&t); pwrmp(vl,mod,p0,(Q)t,&s);
454: mulmp(vl,mod,s,c,&m);
455: addmp(vl,mod,m,COEF(dc),&c);
456: }
457: if ( d ) {
458: pwrmp(vl,mod,p0,d,&t); mulmp(vl,mod,t,c,&m);
459: c = m;
460: }
461: *pr = c;
462: }
463: }
464:
465: void reordermp(nvl,mod,ovl,p,pr)
466: VL nvl,ovl;
467: int mod;
468: P p;
469: P *pr;
470: {
471: DCP dc;
472: P x,m,s,t,c;
473:
474: if ( !p )
475: *pr = 0;
476: else if ( NUM(p) )
477: *pr = p;
478: else {
479: MKMV(VR(p),x);
480: for ( s = 0, dc = DC(p); dc; dc = NEXT(dc) ) {
481: reordermp(nvl,mod,ovl,COEF(dc),&c);
482: if ( DEG(dc) ) {
483: pwrmp(nvl,mod,x,DEG(dc),&t); mulmp(nvl,mod,c,t,&m);
484: addmp(nvl,mod,m,s,&t);
485: } else
486: addmp(nvl,mod,s,c,&t);
487: s = t;
488: }
489: *pr = s;
490: }
491: }
492:
493: void chnremp(vl,mod,p,q,c,r)
494: VL vl;
495: int mod;
496: P p;
497: Q q;
498: P c;
499: P *r;
500: {
501: P tg,sg,ug;
502: P t,u;
503: MQ mq;
504:
505: ptomp(mod,p,&tg); submp(vl,mod,c,tg,&sg);
506: UTOMQ(rem(NM(q),mod),mq),tg = (P)mq; divsmp(vl,mod,sg,tg,&ug);
507: normalizemp(mod,ug);
508: mptop(ug,&u); mulp(vl,u,(P)q,&t); addp(vl,t,p,r);
509: }
510:
511: /* XXX strange behavior of invm() on SPARC */
512:
513: void chnrem(mod,v,c,q,t,cr,qr)
514: int mod;
515: V v;
516: UM t;
517: Q q,*qr;
518: P c,*cr;
519: {
520: int n,m,i,d,a,sd,tmp;
521: Q b,s,z;
522: Q *pc,*pcr;
523: DCP dc;
524:
525: if ( !c || NUM(c) )
526: n = 0;
527: else
528: n = UDEG(c);
529: m = DEG(t); d = MAX(n,m); W_CALLOC(n,Q,pc); W_CALLOC(d,Q,pcr);
530: if ( !c )
531: pc[0] = 0;
532: else if ( NUM(c) )
533: pc[0] = (Q)c;
534: else
535: for ( dc = DC(c); dc; dc = NEXT(dc) )
536: pc[QTOS(DEG(dc))] = (Q)COEF(dc);
537: for ( i = 0; i <= d; i++ ) {
538: b = (i>n?0:pc[i]); a = (i>m?0:COEF(t)[i]);
539: if ( b )
540: a = (a-SGN(pc[i])*((int)rem(NM(pc[i]),mod)))%mod;
541: sd = dmb(mod,(a>=0?a:a+mod),invm(rem(NM(q),mod),mod),&tmp);
542: if ( ( 2 * sd ) > mod )
543: sd -= mod;
544: STOQ(sd,z); mulq(z,q,&s); addq(s,b,&pcr[i]);
545: }
546: STOQ(mod,z); mulq(q,z,qr); plisttop((P *)pcr,v,d,cr);
547: }
548:
549: void normalizemp(mod,g)
550: int mod;
551: P g;
552: {
553: DCP dc;
554:
555: if ( !g )
556: return;
557: else if ( NUM(g) ) {
558: if ( 2 * CONT((MQ)g) > mod )
559: CONT((MQ)g) -= mod;
560: return;
561: } else
562: for ( dc = DC(g); dc; dc = NEXT(dc) )
563: normalizemp(mod,COEF(dc));
564: }
565:
566: void norm(p,r)
567: P p;
568: Q *r;
569: {
570: N t;
571: DCP dc;
572:
573: for ( dc = DC(p), t = ONEN; dc; dc = NEXT(dc) )
574: if ( cmpn(NM((Q)COEF(dc)),t) > 0 ) {
575: t = NM((Q)COEF(dc));
576: }
577: NTOQ(t,1,*r);
578: }
579:
580: void norm1(p,r)
581: P p,*r;
582: {
583: DCP dc;
584: P t,s,u;
585: Q q;
586:
587: if ( NUM(p) )
588: NTOQ(NM((Q)p),1,q),*r = (P)q;
589: else {
590: for ( t = 0, dc = DC(p); dc; dc = NEXT(dc) ) {
591: norm1(COEF(dc),&s); addq((Q)t,(Q)s,(Q *)&u); t = u;
592: }
593: *r = t;
594: }
595: }
596:
597: void norm1c(p,r)
598: P p;
599: Q *r;
600: {
601: N t;
602: Q s;
603: DCP dc;
604:
605: if ( NUM(p) )
606: norm1(p,(P *)r);
607: else {
608: for ( dc = DC(p), t = ONEN; dc; dc = NEXT(dc) ) {
609: norm1(COEF(dc),(P *)&s);
610: if ( cmpn(NM(s),t) > 0 )
611: t = NM(s);
612: }
613: NTOQ(t,1,*r);
614: }
615: }
616:
617: void gcdprsmp(vl,mod,p1,p2,pr)
618: VL vl;
619: int mod;
620: P p1,p2,*pr;
621: {
622: P g1,g2,gc1,gc2,gp1,gp2,g,gc,gp,gcr;
623: V v1,v2;
624:
625: if ( !p1 )
626: *pr = p2;
627: else if ( !p2 )
628: *pr = p1;
629: else if ( NUM(p1) || NUM(p2) )
630: *pr = (P)ONEM;
631: else {
632: g1 = p1; g2 = p2;
633: if ( ( v1 = VR(g1) ) == ( v2 = VR(g2) ) ) {
634: gcdcmp(vl,mod,g1,&gc1); divsmp(vl,mod,g1,gc1,&gp1);
635: gcdcmp(vl,mod,g2,&gc2); divsmp(vl,mod,g2,gc2,&gp2);
636: gcdprsmp(vl,mod,gc1,gc2,&gcr);
637: sprsm(vl,mod,v1,gp1,gp2,&g);
638:
639: if ( VR(g) == v1 ) {
640: gp = g;
641: gcdcmp(vl,mod,gp,&gc); divsmp(vl,mod,gp,gc,&gp1);
642: mulmp(vl,mod,gp1,gcr,pr);
643: } else
644: *pr = gcr;
645: } else {
646: while ( v1 != vl->v && v2 != vl->v )
647: vl = NEXT(vl);
648: if ( v1 == vl->v ) {
649: gcdcmp(vl,mod,g1,&gc1); gcdprsmp(vl,mod,gc1,g2,pr);
650: } else {
651: gcdcmp(vl,mod,g2,&gc2); gcdprsmp(vl,mod,gc2,g1,pr);
652: }
653: }
654: }
655: }
656:
657: void gcdcmp(vl,mod,p,pr)
658: VL vl;
659: int mod;
660: P p,*pr;
661: {
662: P g,g1;
663: DCP dc;
664:
665: if ( NUM(p) )
666: *pr = (P)ONEM;
667: else {
668: dc = DC(p);
669: g = COEF(dc);
670: for ( dc = NEXT(dc); dc; dc = NEXT(dc) ) {
671: gcdprsmp(vl,mod,g,COEF(dc),&g1);
672: g = g1;
673: }
674: *pr = g;
675: }
676: }
677:
678: void sprsm(vl,mod,v,p1,p2,pr)
679: VL vl;
680: int mod;
681: V v;
682: P p1,p2,*pr;
683: {
684: P q1,q2,m,m1,m2,x,h,r,g1,g2;
685: int d;
686: Q dq;
687: VL nvl;
688:
689: reordvar(vl,v,&nvl);
690: reordermp(nvl,mod,vl,p1,&q1); reordermp(nvl,mod,vl,p2,&q2);
691:
692: if ( ( VR(q1) != v ) || ( VR(q2) != v ) ) {
693: *pr = 0;
694: return;
695: }
696:
697: if ( deg(v,q1) >= deg(v,q2) ) {
698: g1 = q1; g2 = q2;
699: } else {
700: g2 = q1; g1 = q2;
701: }
702:
703: for ( h = (P)ONEM, x = (P)ONEM; ; ) {
704: if ( !deg(v,g2) )
705: break;
706:
707: premmp(nvl,mod,g1,g2,&r);
708: if ( !r )
709: break;
710:
711: d = deg(v,g1) - deg(v,g2); STOQ(d,dq);
712: pwrmp(nvl,mod,h,dq,&m); mulmp(nvl,mod,m,x,&m1); g1 = g2;
713: divsmp(nvl,mod,r,m1,&g2); x = LC(g1); /* g1 is not const w.r.t v */
714: pwrmp(nvl,mod,x,dq,&m1); mulmp(nvl,mod,m1,h,&m2);
715: divsmp(nvl,mod,m2,m,&h);
716: }
717: *pr = g2;
718: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>