version 1.2, 2001/06/20 01:43:12 |
version 1.3, 2001/06/20 02:39:25 |
|
|
% $OpenXM: OpenXM/doc/ascm2001p/homogeneous-network.tex,v 1.1 2001/06/19 07:32:58 noro Exp $ |
% $OpenXM: OpenXM/doc/ascm2001p/homogeneous-network.tex,v 1.2 2001/06/20 01:43:12 noro Exp $ |
|
|
\subsection{Distributed computation with homogeneous servers} |
\subsection{Distributed computation with homogeneous servers} |
\label{section:homog} |
\label{section:homog} |
Line 9 not include communication between servers, one cannot |
|
Line 9 not include communication between servers, one cannot |
|
the maximal parallel speedup. However it is possible to execute |
the maximal parallel speedup. However it is possible to execute |
several types of distributed computation as follows. |
several types of distributed computation as follows. |
|
|
\subsubsection{Nesting of client-server communication} |
\subsubsection{Competitive distributed computation by various strategies} |
|
|
Under OpenXM-RFC 100 an OpenXM server can be a client of other servers. |
SINGULAR \cite{Singular} implements {\it MP} interface for distributed |
Figure \ref{tree} illustrates a tree-like structure of an OpenXM |
computation and a competitive Gr\"obner basis computation is |
client-server communication. |
illustrated as an example of distributed computation. |
\begin{figure} |
Such a distributed computation is also possible on OpenXM as follows: |
\label{tree} |
|
\begin{center} |
|
\begin{picture}(200,70)(0,0) |
|
\put(70,70){\framebox(40,15){client}} |
|
\put(20,30){\framebox(40,15){server}} |
|
\put(70,30){\framebox(40,15){server}} |
|
\put(120,30){\framebox(40,15){server}} |
|
\put(0,0){\framebox(40,15){server}} |
|
\put(50,0){\framebox(40,15){server}} |
|
\put(150,0){\framebox(40,15){server}} |
|
|
|
\put(90,70){\vector(-2,-1){43}} |
The client creates two servers and it requests |
\put(90,70){\vector(0,-1){21}} |
Gr\"obner basis comutations by the Buchberger algorithm the $F_4$ algorithm |
\put(90,70){\vector(2,-1){43}} |
to the servers for the same input. |
\put(40,30){\vector(-2,-1){22}} |
The client watches the streams by {\tt ox\_select()} |
\put(40,30){\vector(2,-1){22}} |
and the result which is returned first is taken. Then the remaining |
\put(140,30){\vector(2,-1){22}} |
server is reset. |
\end{picture} |
|
\caption{Tree-like structure of client-server communication} |
|
\end{center} |
|
\end{figure} |
|
Such a computational model is useful for parallel implementation of |
|
algorithms whose task can be divided into subtasks recursively. |
|
|
|
|
\begin{verbatim} |
|
extern Proc1,Proc2$ |
|
Proc1 = -1$ Proc2 = -1$ |
|
/* G:set of polys; V:list of variables */ |
|
/* Mod: the Ground field GF(Mod); O:type of order */ |
|
def dgr(G,V,Mod,O) |
|
{ |
|
/* invoke servers if necessary */ |
|
if ( Proc1 == -1 ) Proc1 = ox_launch(); |
|
if ( Proc2 == -1 ) Proc2 = ox_launch(); |
|
P = [Proc1,Proc2]; |
|
map(ox_reset,P); /* reset servers */ |
|
/* P0 executes Buchberger algorithm over GF(Mod) */ |
|
ox_cmo_rpc(P[0],"dp_gr_mod_main",G,V,0,Mod,O); |
|
/* P1 executes F4 algorithm over GF(Mod) */ |
|
ox_cmo_rpc(P[1],"dp_f4_mod_main",G,V,Mod,O); |
|
map(ox_push_cmd,P,262); /* 262 = OX_popCMO */ |
|
F = ox_select(P); /* wait for data */ |
|
/* F[0] is a server's id which is ready */ |
|
R = ox_get(F[0]); |
|
if ( F[0] == P[0] ) { Win = "Buchberger"; Lose = P[1]; } |
|
else { Win = "F4"; Lose = P[0]; } |
|
ox_reset(Lose); /* reset the loser */ |
|
return [Win,R]; |
|
} |
|
\end{verbatim} |
|
|
|
%\subsubsection{Nesting of client-server communication} |
|
% |
|
%Under OpenXM-RFC 100 an OpenXM server can be a client of other servers. |
|
%Figure \ref{tree} illustrates a tree-like structure of an OpenXM |
|
%client-server communication. |
|
%\begin{figure} |
|
%\label{tree} |
|
%\begin{center} |
|
%\begin{picture}(200,70)(0,0) |
|
%\put(70,70){\framebox(40,15){client}} |
|
%\put(20,30){\framebox(40,15){server}} |
|
%\put(70,30){\framebox(40,15){server}} |
|
%\put(120,30){\framebox(40,15){server}} |
|
%\put(0,0){\framebox(40,15){server}} |
|
%\put(50,0){\framebox(40,15){server}} |
|
%\put(150,0){\framebox(40,15){server}} |
|
% |
|
%\put(90,70){\vector(-2,-1){43}} |
|
%\put(90,70){\vector(0,-1){21}} |
|
%\put(90,70){\vector(2,-1){43}} |
|
%\put(40,30){\vector(-2,-1){22}} |
|
%\put(40,30){\vector(2,-1){22}} |
|
%\put(140,30){\vector(2,-1){22}} |
|
%\end{picture} |
|
%\caption{Tree-like structure of client-server communication} |
|
%\end{center} |
|
%\end{figure} |
|
%Such a computational model is useful for parallel implementation of |
|
%algorithms whose task can be divided into subtasks recursively. |
|
% |
%A typical example is {\it quicksort}, where an array to be sorted is |
%A typical example is {\it quicksort}, where an array to be sorted is |
%partitioned into two sub-arrays and the algorithm is applied to each |
%partitioned into two sub-arrays and the algorithm is applied to each |
%sub-array. In each level of recursion, two subtasks are generated |
%sub-array. In each level of recursion, two subtasks are generated |
Line 90 algorithms whose task can be divided into subtasks rec |
|
Line 131 algorithms whose task can be divided into subtasks rec |
|
% } |
% } |
%} |
%} |
%\end{verbatim} |
%\end{verbatim} |
|
% |
A typical example is a parallelization of the Cantor-Zassenhaus |
%A typical example is a parallelization of the Cantor-Zassenhaus |
algorithm for polynomial factorization over finite fields. |
%algorithm for polynomial factorization over finite fields. |
which is a recursive algorithm. |
%which is a recursive algorithm. |
At each level of the recursion, a given polynomial can be |
%At each level of the recursion, a given polynomial can be |
divided into two non-trivial factors with some probability by using |
%divided into two non-trivial factors with some probability by using |
a randomly generated polynomial as a {\it separator}. |
%a randomly generated polynomial as a {\it separator}. |
We can apply the following simple parallelization: |
%We can apply the following simple parallelization: |
When two non-trivial factors are generated on a server, |
%When two non-trivial factors are generated on a server, |
one is sent to another server and the other factor is factorized on the server |
%one is sent to another server and the other factor is factorized on the server |
itself. |
%itself. |
%\begin{verbatim} |
%\begin{verbatim} |
%/* factorization of F */ |
%/* factorization of F */ |
%/* E = degree of irreducible factors in F */ |
%/* E = degree of irreducible factors in F */ |