\documentclass[11pt]{article}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amsthm}
\newtheorem{theorem}{Theorem}
\newtheorem{example}[theorem]{Example}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{algorithm}[theorem]{Algorithm}
\begin{document}
\begin{center}{\Large Varieties in $(\mathbf{P}^1(\overline{\mathbf{F}}))^n$\\
by Elimination and Extension}\end{center}
\begin{center}{\large Douglas A. Leonard\\
Department of Mathematics and Statistics\\
Auburn University}\end{center}
\begin{abstract}
This paper contains a theory of elimination and extension to compute varieties symbolically,
based on using {\em coordinates} from $(\mathbf{P}^1(\overline{\mathbf{F}}))^n$
and disjoint {\em parts} of varieties,
leading to a recursive algorithm to compute said varieties
by extension at the level of {\em parts} of a variety.
{\sc Macaulay2} code for this is included below.
This is a first step in the author's project of giving a purely algebraic
theory of desingularization of function fields,
in that that relies heavily on using this type of coordinates for function field elements
and on partitioning the set of valuations into disjoint sets.
% (though based on differing valuations instead of whether some polynomial evaluates to $0$ or not).
\end{abstract}
\section{Introduction}
Given an ordered set of coordinate functions $(x_n,\ldots,x_1)$,
and an ideal $I:=I(x_n,\ldots,x_1)\subseteq \overline{\mathbf{F}}[x_n,\ldots,x_1]$
of all the polynomial relations among them,
it is of interest to consider the variety
\[V(I):=\{ (\overline{x}_n,\ldots,\overline{x}_1)\in T^n\ :\
b(\overline{x}_n,\ldots,\overline{x}_1)=0\mbox{ for all }b\in I\}\]
For any affine coordinate functions used here $T=\overline{\mathbf{F}}$, an algebraically closed field,
but for rational coordinate functions (elements of a function field),
$T=\mathbf{P}^1(\overline{\mathbf{F}})$, the projective line over that algebraically closed field,
in that for $x_j=g_j/h_j$, it is natural to expect
$\overline{g_j/h_j}\in T$ to be the inverse of $\overline{h_j/g_j}\in T$,
even in the case that one is $0/1$ and the other $1/0$.
We will primarily be dealing with coordinate values from
$(\mathbf{P}^1(\overline{\mathbf{F}}))^n$ in this paper,
though we will embed this problem into an affine problem with coordinates in
$\overline{\mathbf{F}}^{2n}$ to do the extension.
The philosophy behind determining {\em all} the elements of a variety
by elimination and extension is to work one coordinate at a time,
finding all possibilities for coordinate $\overline{x}_1\in T$ first,
and then recursively finding all possibilities
for coordinates $(\overline{x}_{j+1},\ldots,\overline{x}_1)\in T^{j+1}$
given $(\overline{x}_j,\ldots,\overline{x}_1)\in T^j$.
This is analagous to row-reduction and back-substitution in linear algebra.
It should be expected to produce exactly the elements of the variety,
and it should produce the same set of elements
for any choice (out of $n!$ possibilities) of the ordering of the variables in the lex monomial ordering used.
Extension doesn't really work at the level of varieties,
but rather at the level of disjoint parts $S$ of a partition of the variety,
each part defined by a (finite) set of polynomial equality constraints $EQ(S)$
and a (finite, possibly empty) set of inequality constraints $NEQ(S)$.
Such partitions are crucial in doing desingularization of function fields as well, \cite{Leon}.
For instance, the Whitney umbrella, Example 3.6.1 in \cite{Ko}:
\[V:=\{(\overline{x}_3,\overline{x}_2,\overline{x}_1)\in\overline{\mathbf{F}}^3\ :\ \overline{x}_3\overline{x}_2^2-\overline{x}_1^2=0\}\]
is {\em singular} along the line
\[L:=\{(\overline{x}_3,\overline{x}_2,\overline{x}_1)\in\overline{\mathbf{F}}^3\ :\ \overline{x}_2=0=\overline{x}_1\},\]
but has a more complicated {\em singularity} at the point $P$ with $\overline{x}_3=\overline{x}_2=\overline{x}_1=0$.
The discussion ensuing in \cite{Ko} is then in terms of whether to {\em blow up} the variety, $L$ or the variety $P$,
rather than dealing with the disjoint parts $P$, $L\backslash P$, and even the part $L^c$ consisting of the non-singular points.
So we'll start with notation to describe what elimination and extension should look like in general,
then consider how to deal with this relative to partitioning the variety.
The actual theorem and its proof are relatively short, just explaining how $(\overline{x}_j,\ldots,\overline{x}_1)\in T^j$
satisfying the constraints of a part $S$ extend to $(\overline{x}_{j+1},\ldots,\overline{x}_1)\in T^{j+1}$
satisfying the constraints of a part $S^*$.
Even the {\sc Macaulay2} code given to implement this is not very long by code standards.
\newpage
\section{Notation for elimination and extension}
Let $\overline{\mathbf{F}}$ be an algebraically closed field
(here for computational reasons with $\mathbf{F}$ restricted to being
the rationals, $\mathbf{Q}$, in characteristic $0$
or the finite field of $p$ elements, $\mathbf{F}_p$, in characteristic $p>0$).
Let $R:=\overline{\mathbf{F}}[x_n,\ldots,x_1]$
with {\em lex} $x_n\succ\cdots\succ x_1$ monomial ordering
(an example of an {\em elimination order}).
Let $B$ be a minimal, reduced (hence finite) (lex) Gr\"obner basis
for the ideal $I$ of $R$ that it generates.
Define
\[R_j:=\overline{\mathbf{F}}[x_j,\ldots,x_1];\]
\[I_j:=I\cap R_j;\]
\[B_j:=B\cap R_j;\]
\[V_j:=\{(\overline{x}_j,\ldots,\overline{x}_1)\in T^j\ :\
b(\overline{x}_j,\ldots,\overline{x}_1)=0\mbox{ for all }b\in B_j\}\]
What $T$ is is a central point of this paper.
Then the general form of elimination and extension would be roughly as follows.
\begin{theorem}[Elimination]
$\ $
\begin{enumerate}
\item $I_j$ is an $($elimination$)$ ideal of $R_j$, $1\leq j\leq n$.
\item $B_j$ is a $($lex$)$ Gr\"obner basis for $I_j$, $1\leq j\leq n$.
\item $\{ (\overline{x}_j,\ldots,\overline{x}_1)\in T^j\ :\ (\overline{x}_n,\ldots,\overline{x}_1)\in V_n \}\subseteq V_j$, $1\leq j\leq n$.
\end{enumerate}
\end{theorem}
The proof should be a straight-forward exercise.
[A proof given for the affine case in \cite{CLO} is rather short,
but the advantage of the reader trying this is to see
where the lex ordering is used
and in trying to understand that the third item is not always an equality,
though it will be for the coordinates used here.]
\begin{theorem}[Extension]
$\ $
\begin{enumerate}
\item If $(\overline{x}_j,\ldots,\overline{x}_1)\in V_j\subseteq T^j$,
then there is at least one $\overline{x}_{j+1}\in T$
such that $(\overline{x}_{j+1},\ldots,\overline{x}_1)\in V_{j+1}
\subseteq T^{j+1}$.
\item All such $\overline{x}_{j+1}$ can be computed symbolically.
\item $V_j=\{ (\overline{x}_j,\ldots,\overline{x}_1)\in T^j\ :\
(\overline{x}_n,\ldots,\overline{x}_1)\in V_n\}$.
\end{enumerate}
\end{theorem}
The proof of extension is another matter altogether,
in that this is not always the case for affine varieties
(meaning $V_n\subseteq \overline{\mathbf{F}}^n$).
The simple example $B:=(x_2x_1-1)=B_2$, $B_1=\emptyset$
has $V_1=\overline{\mathbf{F}}$,
and $(0)\in V_1$ does not extend to $(\overline{x}_2,0)\in V_2$
since $\overline{x}_2\cdot 0-1=-1\neq 0$.
[Of course, it is the claim here that
$((0:1))\in(\mathbf{P}^1(\overline{\mathbf{F}}))^1$ extends to
$((1:0),(0:1))\in(\mathbf{P}^1(\overline{\mathbf{F}}))^2$.]
Another such simple example $B:=(x_2x_1)=B_2$, $B_1=\emptyset$
has $V_1=\overline{\mathbf{F}}$,
and $(0)\in V_1$ should extend to $(\overline{x}_2,0)\in V_2$
for any $\overline{x}_2\in\overline{\mathbf{F}}$
(with $\overline{x}_1\neq 0$ extending to $(0,\overline{x}_1)\in V_2$).
But a theorem such as \cite{CLO} [Theorem 3.1.3]
that tries to deal with this example
by trying only to extend if $\overline{x}_1\neq 0$, would miss the former case.
So here varieties will be subsets of $(\mathbf{P}^1(\overline{\mathbf{F}}))^n$.
Then such varieties will be partitioned into (disjoint) {\em parts}, with part $S$,
defined by a finite set $EQ(S)$ of equality constraints on the coordinates $((g_n:h_n),\ldots,(g_1:h_1))$
and a finite (possibly empty) set $NEQ(S)$ of inequality constraints as well
(again as opposed to having varieties $V$ only defined by equality constraints given by $I(V)$).
$EQ(S)$ will include the non-homogeneous equality constraints
$h_i(h_i-1),\ (g_i-1)(h_i-1)$ for each $1\leq i\leq n$
that force a canonical representative $(1:0)$ or $(\overline{g}_i:1)$
for each point of the projective line.
The only other ingredients will be a mapping
\[ \phi\ :\ \overline{\mathbf{F}}[g_n,h_n,\ldots,g_1,h_1]\to
\overline{\mathbf{F}}[y_{2n},y_{2n-1},\ldots,y_2,y_1]\]
to blur the distinction between the $g_j$s and the $h_j$s in doing extension;
and the further mappings
\[ \phi_j\ :\ \overline{\mathbf{F}}[y_{2n},\ldots,y_1]\to
\overline{\mathbf{F}}[\overline{y}_j,\ldots \overline{y}_1][y_{2n},\ldots,y_{j+1}]\]
used to identify leading coeficients $lc(f)\in \overline{\mathbf{F}}[\overline{y}_j,\ldots, \overline{y}_1]$
that lead to different extensions depending on whether $lc(f)$ can be $0$ or not.
Some parts $S$ will then be partitioned into two (disjoint) parts
by appending the constraint $lc(f)$ to $EQ(S)$ or $NEQ(S)$,
based on whether such leading coefficient takes on the value $0$ or not,
if $lc(f)$ is not already known to be non-zero.
(This leads to computing a (finite) Gr\"obner basis for
either $\langle EQ(S)\rangle +\langle lc(f)\rangle$
or $saturation(\langle EQ(S)\rangle,\langle lc(f)\rangle$ respectively to get the new equality constraints,
and/or appending $lc(f)$ to $NEQ(S)$ in the latter to get the new inequality constraints.)
So, given a variety $V=\mathbf{V}(I)$ for $I$ an ideal of $\overline{\mathbf{F}}[x_n,\ldots,x_1]$,
first replace each $x_j$ by $g_j/h_j$ to symbolically view $x_j$ as a rational function.
Then turn the generator polynomials $b$ of $I$ into polynomials:
\[ b^*(g_n,h_n,\ldots,g_1,h_1):=\left(\prod_{j=1}^nh_j^{deg(b,x_j)}\right)b(g_n/h_n,\ldots,g_1/h_1)\]
that are homogeneous in each pair $(g_k,h_k)$, $1\leq k\leq n$.
Use the map
\[ \phi\ :\ \overline{\mathbf{F}}[g_n,h_n,\ldots,g_1,h_1]\to \overline{\mathbf{F}}[y_{2n},\ldots,y_1]\]
defined by $\phi(g_j):=y_{2j}$ and $\phi(h_j):=y_{2j-1}$ for $1\leq j\leq n$.
Append the non-homogeneous equality constraints $y_{2j-1}(y_{2j-1}-1)=0$, and
$(y_{2j}-1)(y_{2j-1}-1)=0$ for $1\leq j\leq n$ to force a canonical choice for
representatives of the elements of the projective line as either $(1:0)$ or $(\overline{y_{k}}:1)$ for $1\leq k\leq 2n$.
Consider the further maps
\[ \phi_j\ :\ \overline{\mathbf{F}}[y_{2n},\ldots,y_1]\to
\overline{\mathbf{F}}[\overline{y}_{j},\ldots,\overline{y}_1][y_{2n},\ldots,y_{j+1}]\]
defined by $\phi_j(y_k):=\overline{y}_k$ for $k\leq j$ and $\phi_j(y_k):=y_k$ for $k> j$.
Computations will be done symbolically in these subrings
\[R_j:=\overline{\mathbf{F}}[\overline{y}_{j},\ldots,\overline{y}_1][y_{2n},\ldots,y_{j+1}]\]
though ultimately any $(\overline{y}_{2n},\ldots,\overline{y}_1)\in\overline{\mathbf{F}}^{2n}$
will have to be reinterpreted as an element of $(\mathbf{P}^1(\overline{\mathbf{F}}))^n$
by viewing each $(y_{2k},y_{2k-1})\in\overline{\mathbf{F}}^2$
as $(y_{2k}:y_{2k-1})\in\mathbf{P}^1(\overline{\mathbf{F}})$ for $1\leq k\leq n$.
[Actually, computationally we can get away with using only the ring
\[R:=\overline{\mathbf{F}}[z_{2n},\ldots,z_1][y_{2n},\ldots,y_1]\]
so as to cut down on the number of rings and ring maps needed.]
\newpage
\section{Theorem}
\begin{theorem}[The Extension Theorem for coordinates in $(\mathbf{P}^1(\overline{\mathbf{F}}))^n$]
$\ $
Given the preceding setup, suppose that for some part $S$,
\[ S|_{R_j}:=\{(\overline{y}_j,\ldots,\overline{y}_1)\in\overline{\mathbf{F}}^j\ :\]
\[ b(\overline{y}_j,\ldots,\overline{y}_1)=0,\mbox{ for all } b\in(EQ(S)\cap R_j)\]
\[\mbox{ and }
b(\overline{y}_j,\ldots,\overline{y}_1)\neq 0,\mbox{ for all } b\in(NEQ(S)\cap R_j)\}\]
is known, and is to be extended to one or more parts of the form
\[ S^*|_{R_{j+1}}:=\{(\overline{y}_{j+1},\ldots,\overline{y}_1)\in\overline{\mathbf{F}}^{j+1}\ :\]
\[
b(\overline{y}_{j+1},\ldots,\overline{y}_1)=0,\mbox{ for all } b\in(EQ(S^*)\cap R_{j+1})\]
\[\mbox{ and }
b(\overline{y}_{j+1},\ldots,\overline{y}_1)\neq 0,\mbox{ for all } b\in(NEQ(S^*)\cap R_{j+1})\}\]
by finding polynomial restrictions on the choice of $\overline{y}_{j+1}$ for each such part $S^*$.
This can be done as follows:
\begin{enumerate}
\item Consider those $b_i(y_{j+1})\in (\phi_j(EQ(S))\cap\phi_j(R_{j+1}))\backslash \phi_j(R_j)$
in increasing lex monomial order, with $d_i:=degree(b_i,y_{j+1})$.
\item Let $lc_i:=LC(b_i(\overline{y}_j,\ldots,\overline{y}_1))\in \phi_j(R_j)$.
\item If $lc_1$ could take on a non-zero or a zero value, then $S$ needs to be partitioned
into two $($disjoint$)$ parts relative to $lc_1$ being non-zero or not before proceeding.
But assuming that $lc_1$ can only take on non-zero values,
either because it is explicitly a non-zero field element
or because it is a factor of an element in $NEQ(S)$,
choose $\overline{y}_{j+1}$ to be a $($symbolic$)$ root of $b_1(y_{j+1})$
$($even if the explicit roots could be computed$)$.
\end{enumerate}
\end{theorem}
\begin{proof} Suppose there were some $b_s(y_{j+1})$ for which $b_s(\overline{y}_{j+1})\neq 0$.
Assume $s$ is chosen smallest relative to this.
Then $lc_1b_s(y_{j+1})-lc_sb_1(y_{j+1})$ has degree less than $d_s$, so is reducible to $0$
using only elements of $EQ(S)$ preceding $b_s$ in the lex monomial ordering.
But all of these are $0$ at $(\overline{y}_{j+1},\ldots,\overline{y}_1)$, as is $b_1$.
So $lc_1b_s$ is $0$ as well.
But $lc_1\neq 0$, forcing $b_s(\overline{y}_{j+1})= 0$, a contradiction.
\end{proof}
\newpage
\begin{example}
Consider the ideal $I=\langle x_1(x_3^2x_2+x_3+1),x_3(x_3^2x_2+x_3+1)\rangle$,
and its $($affine$)$ variety $V$.
Since $B_1=B_2=\emptyset$,
$V_1=\overline{\mathbf{F}}^1$ and $V_2=\overline{\mathbf{F}}^2$.
If $\overline{x}_2\neq 0$, then the affine extension theorem in \cite{CLO} would extend this
correctly for $\overline{x}_3\ :\ \overline{x}_3^2\overline{x}_2+\overline{x}_3+1=0$.
But it does not apply to the case $\overline{x}_2=0$.
In this case, $(0,0)$ should extend to either $(0,0,0)$ or $(-1,0,0)$, while
$(0,\overline{x}_1)$ with $\overline{x}_1\neq 0$ can be extended to $(-1,0,\overline{x}_1)$ only.
This example is worked out using the {\sc Macaulay2} code below, with the edited result given at the end. The affine result can then be singled out.
\end{example}
\newpage
\section{Macaulay2 code}
What follows is the author's {\sc Macaulay2} code and its application to this example
(with $z_i$ for $\overline{y}_i$, and $EQ\# i$ and $NEQ\# i$ for $EQ(S_i)$ and $NEQ(S_i)$).
Everything happens inside the one ring $R$ to save having to map elements and ideals of one ring into
another all the time.
The part numbered $17$ is the affine part that the affine CLO theorem 3.1.3
mentioned above doesn't deal with;
$14,16$ and half of $15$ are the other affine parts that it would deal with;
and $8,10,11,12,18$ and the other half of $15$
have at least one non-affine coordinate.
\begin{verbatim}
--A Gr\"obner basis as an ideal instead of a matrix
GB:=(I)->ideal flatten entries gens gb I
---------------------------------------------------
--symbolic LC that could be zero
redCoeff:=(LC,NEQk)->(
if NEQk !={} then(
ilc=ideal(promote(LC,ring(NEQk#0)));
for i to #NEQk-1 do(
ilc=saturate(ilc,ideal(NEQk#i));
);
lc=(gens(ilc))_(0,0);
)
else(
lc=LC;
);
lc
)
-------------------------------------------------------------
rad:=(f,R)->flatten entries gens radical promote(ideal(f),R)
-------------------------------------------------------------
multihomRing:=(n,field)->(
--reverse ordering of subscripts-------------------------
l=for i to 2*n-1 list 2*n-i;
--subscripted variables
ll=for i to #l-1 list y_(l#i);
--subcripted coordinate values
lll=for i to #l-1 list z_(l#i);
--ring of subcripted coordinate values
F=field[lll,MonomialOrder=>Lex];
--ring of subcripted variables
R=F[ll,MonomialOrder=>Lex]
);
---------------------------------------------------------
multihomVariety:=(n,R,multihom)->(
--non-homogeneous constraints to force canonical reps for elements of P^1
nonhom=ideal(
for i to 2*n-1 list
if i%2==0 then y_(i+1)*(y_(i+1)-1)
else (y_(i+1)-1)*(y_i-1));
EQ={GB radical (multihom+nonhom)};
NEQ={{}};
PREV={-1};
----------------------------------------------
phi:=(j)->map(R,R,matrix{
for i to 2*n-1 list(
if i>= 2*n-j
then z_(2*n-i)
else y_(2*n-i)
)}
);
---------------------------------------------------
psi=map(R,R,matrix{gens(R)}|matrix{gens(R)});
---------------------------------------------------------------------------------------------
currentnode=0;
nextnode=1;
sizeEQ=1;
while currentnode < sizeEQ do(
varno=1;
found=0;
while found==0 and varno< 2*n do(
EQk=psi(EQ#currentnode);
NEQk=NEQ#currentnode;
p=(phi(varno))(EQk);
for i to numgens(p)-1 do(
if leadMonomial(p_i)!=1 then(
m=redCoeff(leadCoefficient(p_i),NEQk);
if m!=0 then(
degm=for i from 1 to 2*n list degree(z_i,lift(m,F));
if degm!=for i to 2*n-1 list 0 then(
J=(gens radical ideal promote(m,R))_(0,0);
found=varno;
break;
);
);
);
if found>0 then break;
);
varno=varno+1;
);
if found >0 and found < 2*n then(
I=GB(radical((phi(found))(EQk+ideal(J))));
NEQk=unique(for i to #NEQk-1 list (
gens radical saturate(ideal(NEQk#i),I))_(0,0));
NEQkk=for i to #NEQk-1 list (NEQk#i)%I;
if member(0,NEQkk) == false then(
EQ=append(EQ,I);
NEQ=append(NEQ,NEQk);
PREV=append(PREV,currentnode);
nextnode=nextnode+1;
);
I=GB(radical(saturate((phi(found))(EQk),ideal(J))));
NEQk=NEQk|{J};
NEQk=unique(for i to #NEQk-1 list (
gens radical saturate(ideal(NEQk#i),I))_(0,0));
NEQkk=for i to #NEQk-1 list (NEQk#i)%I;
if member(0,NEQkk) ==false then(
EQ=append(EQ,I);
NEQ=append(NEQ,NEQk);
PREV=append(PREV,currentnode);
nextnode=nextnode+1;
);
);
currentnode=currentnode+1;
sizeEQ=#EQ;
);
(EQ,NEQ,PREV)
);
--------------------------------------------------------------
multihomPrint:=(V,R,n)->(
eq=V#0;
neq=V#1;
prev=V#2;
for j to n-1 do(neq=for i to #neq-1 list
delete(promote(-z_(2*j+1)+1,R),neq#i));
for i to #(eq)-1 do if eq#i!=1 then
print(prev#i,i,toString(eq#i),toString(neq#i))
)
---------------------------------------------------------------------
--Example 4 above----------------------------------------------------
R=multihomRing(3,QQ);
V=multihomVariety(3,R,
ideal(y_2*(y_6^2*y_4+y_6*y_5*y_3+y_5^2*y_3),
y_6*(y_6^2*y_4+y_6*y_5*y_3+y_5^2*y_3)));
multihomPrint(V,R,3)
\end{verbatim}
\begin{verbatim}
(0, 2, 6, ideal(z_1,z_2-1,
z_3,y_4-1,
y_5-1,y_6),
{})
(0, 1, 3, 8, ideal(z_1-1,z_2,
z_3,y_4-1,
y_5-1,y_6),
{})
(0, 1, 4, 10, ideal(z_1-1,
z_3,y_4-1,
y_5-1,y_6),
{z_2})
(0, 2, 5, 11, ideal(z_1,z_2-1,
z_3-1,z_4,
y_5^2-y_5,y_6+2*y_5-1),
{})
(0, 2, 5, 12, ideal(z_1,z_2-1,
z_3-1,
y_5-1,z_4*y_6^2+y_6+1),
{z_4})
(0, 1, 3 ,7, 14, ideal(z_1-1,z_2,
z_3-1,
y_5-1,z_4*y_6^3+y_6^2+y_6),
{z_4})
(0, 1, 4 ,9, 15, ideal(z_1-1,
z_3-1,z_4,
y_5^2-y_5,y_6+2*y_5-1),
{z_2})
(0, 1, 4, 9, 16, ideal(z_1-1,
z_3-1,
y_5-1,z_4*y_6^2+y_6+1),
{z_2, z_4})
(0, 1, 3, 7, 13, 17, ideal(z_1-1,z_2,
z_3-1,z_4,
z_5-1,y_6^2+y_6),
{})
(0, 1, 3, 7 ,13, 18, ideal(z_1-1,z_2,
z_3-1,z_4,
z_5,y_6-1),
{})
\end{verbatim}
So from node 6,
$(\overline{y}_1:\overline{y}_2)=(0:1)$,
$(\overline{y}_3:\overline{y}_4)=(0:1)$,
$(\overline{y}_5:\overline{y}_6)=(1:0)$.
From node 8,
$(\overline{y}_2:\overline{y}_1)=(0:1)$,
$(\overline{y}_4:\overline{y}_3)=(1:0)$,
$(\overline{y}_6:\overline{y}_5)=(0:1)$.
From node 10,
$(\overline{y}_2:\overline{y}_1)=(\overline{y}_2:1)\ :\ \overline{y}_2\neq 0$,
$(\overline{y}_4:\overline{y}_3)=(1:0)$,
$(\overline{y}_6:\overline{y}_5)=(0:1)$.
From node 11,
$(\overline{y}_2:\overline{y}_1)=(1:0)$,
$(\overline{y}_4:\overline{y}_3)=(0:1)$,
$(\overline{y}_6:\overline{y}_5)\ :\
\overline{y}_5(\overline{y}_5-1)=0=\overline{y}_6+2\overline{y}_5-1$.
From node 12,
$(\overline{y}_2:\overline{y}_1)=(1:0)$,
$(\overline{y}_4:\overline{y}_3)=(\overline{y}_4:1)\ :\
\overline{y}_4\neq 0$,
$(\overline{y}_6:\overline{y}_5)=(\overline{y}_6:1)\ :\
\overline{y}_4\overline{y}_6^2+\overline{y}_6+1)=0$.
From node 14,
$(\overline{y}_2:\overline{y}_1)=(0:1)$,
$(\overline{y}_4:\overline{y}_3)=(\overline{y}_4:1)\ :\
\overline{y}_4\neq 0$,
$(\overline{y}_6:\overline{y}_5)=(\overline{y}_6:1)\ :\
\overline{y}_4\overline{y}_6^3+\overline{y}_6^2+\overline{y}_6)=0$.
From node 15,
$(\overline{y}_2:\overline{y}_1)=(\overline{y}_2:1)\ :\
\overline{y}_2\neq 0$,
$(\overline{y}_4:\overline{y}_3)=(0:1)$,
$(\overline{y}_6:\overline{y}_5)\ :\
\overline{y}_5(\overline{y}_5-1)=0=\overline{y}_6+2\overline{y}_5-1$.
From node 16,
$(\overline{y}_2:\overline{y}_1)=(\overline{y}_2:1)\ :\
\overline{y}_2\neq 0$,
$(\overline{y}_4:\overline{y}_3)=(\overline{y}_4:1)\ :\
\overline{y}_2,\overline{y}_4\neq 0$,
$(\overline{y}_6:\overline{y}_5)=(\overline{y}_6:1)\ :\
\overline{y}_4\overline{y}_6^2+\overline{y}_6+1=0$.
From node 17,
$(\overline{y}_2:\overline{y}_1)=(0:1)$,
$(\overline{y}_4:\overline{y}_3)=(0:1)$,
$(\overline{y}_6:\overline{y}_5)=(\overline{y}_6:1)\ :\
\overline{y}_6^2+\overline{y}_6=0$.
From node 18,
$(\overline{y}_2:\overline{y}_1)=(0:1)$,
$(\overline{y}_4:\overline{y}_3)=(0:1)$,
$(\overline{y}_6:\overline{y}_5)=(1:0)$.
\newpage
\begin{thebibliography}{99}
\bibitem{CLO} David Cox, John Little, and Donal O'Shea
{\em Ideals, Varieties, and Algorithms}
Springer-Verlag, 1992
\bibitem{M2} Daniel R. Grayson and Michael E. Stillman
{\em Macaulay2, a software system for research in algebraic geometry}
Available at {https://faculty.math.illinois.edu/Macaulay2}
%\bibitem{Harr} Joe Harris
% {\em Algebraic Geometry : a first course}
%Graduate texts in mathematics : 133
%Springer-Verlag, 1992
%\bibitem{Hart} Robin Hartshorne
% {\em Algebraic Geometry}
%Graduate texts in mathematics : 52
%Springer-Verlag, 1977
\bibitem{Ko} J\'anos Koll\'ar
{\em Lectures on Resolution of Singularities}
Annals of Mathematical Studies, 166,
Princeton University Press, 2007
\bibitem{Leon} Douglas A. Leonard
Desingularization of Function Fields,
Arxiv
\end{thebibliography}
\end{document}
\newpage
\section{Addendum}
So what is wrong with using other types of coordinates?
\begin{enumerate}
\item Our first claim is that by considering them as
coordinates, values of variables in our polynomial ring,
we are already ruling out so-called {\em projective coordinates},
as the $j$th entry is not a value of the $j$th variable
as for affine coordinates, but rather the ratio of the $i$th
entry and the $j$th entry might be a ratio of the value of the $i$th
variable and the $j$th variable.
\item Even if we overlook this, a hybrid of projective coordinates
and affine coordinates is worse in that it means that certain variables
are treated differently from others.
[We do do this for, say, polar coordinates since the variables themselves
have a different flavor, but algebraically, this should be a no-no.]
This is done for blow-ups, of which I am not a fan.
Just try to find a worked out example of a sequence of blow-ups in the literature to realize that one must switch from a hybrid projective, affine object
to an affine cover by considering those elements with $i$th projective coordinate non-zero, before attempting a further blow-up.
\item Projective coordinates were not meant to be recursive.
It is easy to find examples wherein some element with coordinates $(-1:1)$
would extend to an element with coordinates $(1:0:0)$ instead of $(\infty:-1:1)$. \cite{CLO} example 8.5.1 is one such, wherein $(1:0:0)$ should have been considered as a double point representing both $(\infty:-1:1)$ and $(\infty:1:1)$.
\item Would that affine coordinates were sufficient, but the so-called
``missing points'' in \cite{CLO} should be called non-affine points
to highlight what is really wrong.
\item Sometimes it is instructive to work in positive characteristic.
Example 8.5.1 really describes a genus $0$ curve, so should have $q+1$
points rational over $\mathbf{F}_q$. For $q=5$ the points $(0,1)$, $(2,3)$
and $(3,3)$ are affine, CLO might have found $(\infty,1)$ and $(\infty,4)$,
but would have missed $(0,\infty)$ by restricting the $y$coordinate to $\mathbf{F}_5$ while allowing the $x$-coordinate to live in $\mathbf{P}^1(\mathbf{F}_5)$.
\item Not choosing canonical representatives for elements of $\mathbf{P}^1$
is a further problem. Example 8.5.3 can be easily explained
as $y_4y_1+y_3y_2=0$, $y_4(y_2+y_1)=0$ by using $(y_4-1)(y_3-1)=0$
$y_3(y_3-1)=0$, $(y_2-1)(y_1-1)=0$, and $y_1(y_1-1)=0$, even if you
decide that you want $y_1=1$.
This would make most of Chapter 8 section 5 moot.
\end{enumerate}
As a motivational example to understand the problem at hand, consider
the affine variety
\[ V:=\{(a_2,a_1)\in\overline{\mathbf{F}}^2\ :\ a_2^5(a_1^5+1)+a_1^5=0\}\]
of the ideal
\[ I:=\langle x_2^5(x_1^5+1)+x_1^5\rangle\subset \overline{\mathbf{F}}[x_2,x_1]\]
with a lex $x_2\succ x_1$ monomial order.
Elimination produces
\[ I_1:=I\cap\overline{\mathbf{F}}[x_1]=\langle 0\rangle\subset \overline{\mathbf{F}}[x_1]\mbox{ and }I_2:=I.\]
So $V(I_1):=\{a_1\in\overline{\mathbf{F}}\ :\ 0=0\}=\overline{\mathbf{F}}$,
but extension is different for $a_1^5+1=0$ than for $a_1^5+1\neq 0$.
In the first case there is no affine extension, whereas there is for any $a_1$
in the second case.
If one thinks that elimination and extension should always allow extension
and that all extensions should be described, then one thought might be to use
$\mathbf{P}^2(\overline{\mathbf{F}})$. But in Cox, Little, and O'Shea,
chapter 8, section 5, there is a supposedly preferrable choice of
$\mathbf{P}^1(\overline{\mathbf{F}})\times\overline{\mathbf{F}}$.
Our choice will be to use $(\mathbf{P}^1(\overline{\mathbf{F}}))^2$
because coordinates should all be of the same flavor and should be values
that a rational function could take on.
Let's see how each of these three options does on this small example,
before wriitng out the general theory proposed.
\newpage
First, projective coordinates can't really be a good choice for this
type of problem in that $(a_1:a_0)\in\mathbf{P}^1(\overline{\mathbf{F}})$
shouldn't really be expected to extend to
$(a_2:a_1:a_0)\in\mathbf{P}^2(\overline{\mathbf{F}})$, and
certainly $(1:0:0)$ should project onto $(0:0)$.
Even if we tried
\[ V^{hom}:=\{(a_2:a_1:a_0)\in\mathbf{P}^2(\overline{\mathbf{F}})\ :\
a_2^5(a_1^5+a_0^5)+a_1^5a_0^5=0\}\]
$a_1^5+a_0^5=0$ would imply that $a_1^5a_0^5=0$, so $a_1=0$ or $a_0=$,
$a_1=0$ and $a_0=0$. This doesn't give any new extension.
If we try the hybrid coordinates of CLO, then
\[ V^{CLO}:=\{((a_2:a_0),a_1)\in\mathbf{P}^1(\overline{\mathbf{F}})\times\overline{\mathbf{F}}\ :\
a_2^5(a_1^5+a_0^5)+a_1^5a_0^5=0\}\]
$a_1^5+a_0^5=0$ would imply that $a_1^5a_0^5=0$, so $a_1=0$ or $a_0=$,
$a_1=0$, $a_0=0$, and $a_2=1$. This doesn't give any extension
in the trouble some part of the affine problem either.
But if we try multi-homogeneous coordinates, then
\[ V^{CLO}:=\{((\alpha_2:\beta_2)),(\alpha_1:\beta_1))\in(\mathbf{P}^1(\overline{\mathbf{F}}))^2 :\
\alpha_2^5(\alpha_1^5+\beta_1^5)+\beta_2^5\alpha_1^5=0\}\]
$\alpha_1^5+\beta_1^5=0$ would imply that $\beta_2^5\alpha_1^5=0$, so $\alpha_1=0$ or $\beta_2=0$.
This does give the extension of $(\alpha_1:1)$ to $((1:0),(\alpha_1:1))$
when $\alpha_1^5+1=0$.
It also give the extension of $(1:0)$ to $((\alpha_2:1),(1:0)$
with $\alpha_2^5+1=0$.
Moreover, what we have done is partition the projective line into three
disjoint parts and extend each the way they should be expected to extend.
This suggests that were we to do an example with more variables, and
hence recursive steps, we should be applying those steps to partitioning a part
and extending that. So maybe instead of defining varieties as is traditional,
we should be defining {\em parts} $S$ by having not only a finite set
$EQ(S)$ of equality constraints but also a finite (possibly empty) set
$NEQ(S)$ of inequality constraints. ( In our example above, we produced $3$
parts with $EQ(S_1):=\{ \beta_1,\alpha_1-1, \beta_2-1, \alpha_2^5+1\}$,
$EQ(S_2):=\{ \beta_2,\alpha_2-1, \beta_1-1, \alpha_1^5+1\}$, and
$EQ(S_3):=\{ \beta_2-1, \beta_1-1, \alpha_2(\alpha_1^5+1)+a_1^5\}$ and
$NEQ(S_3):=\{ \alpha_1^5+1\}$.
[If you just can't live without varieties, think of each part as describing
the part of a variety described by $EQ(S)$ not contained in the union of finitely many other varieties, each described by one element of $NEQ(S)$.]
Consider the extension theorem from CLO 3.1.
Written in our notation this is
{\bf theorem}[CLO: The Extension Theorem]
Write the elements of a Gr\"obner basis for {$I:=I_n\subset R:=R_n:=\overline{\mathbf{F}}[x_n,\ldots,x_1]$}
as {$f_{j,k}\in R_j\backslash R_{j-1}$} for {$1\leq k\leq k(j)$},
each viewed as an element of {$R_{j-1}[x_j]$} with leading coefficient {$g_{j,k}\in R_{j-1}$}.
If {$(a_{j-1},\ldots,a_1)\in V(I_{j-1})$} and {$g_{j,k}(a_{j-1},\ldots,a_1)\neq 0$} for some {$k$},
then there is at least one b{$a_j\in\overline{\mathbf{F}}$} such that {$(a_j,\ldots,a_1)\in V(I_j)$}.
What is wrong with this?
First, it is not constructive, and it doesn't really say that {$V(I_n)$} is gotten by extension as our theorem does.
The example considered there has {$f_{2,1}:=x_2-x_1$}, {$f_{3,1}:=x_3x_1-1$} and {$f_{3,2}:=x_3x_2-1$}.
While any {$a_1\in\overline{\mathbf{F}}$} extends to {$a_2=a_1$}, this only extends to {$a_3=1/a_1$} when {$a_1\neq 0$}.
Were this done in rational terms, {$f_{2,1}:=x_2h_1-x_1h_2$}, {$f_{3,1}:=x_3x_1-h_3h_1$} and {$f_{3,2}:=x_3x_2-h_3h_2$}.
Any {$(a_1:b_1)\in{\cal P}(\overline{\mathbf{F}})$} extends to {$(a_2:b_2)=(a_1:b_1)$},
then to {$(a_3:b_3)=(b_1:a_1)$}.
But consider another example, with
{$f_{2,1}:=x_2^3+x_2x_1+x_1^5$}, {$f_{3,1}:=x_3x_1-x_2^2$}, and {$f_{3,2}:=x_3x_2+x_2+x_1^4$}.
Then any {$a_1\in\overline{\mathbf{F}}$} extends to {$a_2$} such that {$a_2^3+a_2a_1+a_1^5=0$},
and there are between 1 and 3 such values.
But the CLO extension theorem would say that this extends to {$a_3:=a_2^2/a_1$} for {$a_1\neq 0$} (and {$a_2\neq 0$}).
It says nothing whatsoever about what happens when {$a_1=0=a_2$}, yet this should extend to any {$a_3\in\overline{\mathbf{F}}$}.
Our theorem says that constructively
{$f_{2,1}(x_2)=x_2^3+x_2a_1+a_1^5=0$} produces the values {$a_2$} extending {$a_1$}.
Then for {$a_1=0=a_2$}, {$f_{3,1}(x_3,0,0)=0=f_{3,2}(x_3,0,0)$} means any {$a_3\in\overline{\mathbf{F}}$} extends;
while for {$a_1\neq 0\neq a_2$}, {$f_{3,1}(x_3,a_2,a_1)=x_3a_1-a_2^2=0$} and {$f_{3,2}(x_3,a_2,a_1)=x_3a_2+a_2+a_1^4=0$}
have common solution {$a_3=a_2^2/a_1=-(a_2+a_1^4)/a_2$} as extension.
So were one to write a theorem of this flavor but constructively,
do it in rational terms (with statement essentially the proof).
{\bf Theorem}[{Constructive rational extension}]
Let \[{d_j(x_j,h_j):=gcd\{ f_{j,k}(x_j,h_j,a_{j-1},b_{j-1},\ldots,a_1,b_1)\ :\ 1\leq k\leq k(j)\}}.\]
\begin{itemize}
\item
If {$d_j(x_j,h_j)$} depends on {$x_j$},
then there is extension to {$(a_j:1)$} for {$a_j$} any root of {$d_j(x_j,1)$}.
\item If it doesn't depend on {$x_j$} but does depend on {$h_j$},
then there is extension to {$(a_j:b_j)=(1:0)$}.
%and there is extension to \txb{$(a_j:b_j)$} when \txb{$d_j(x_j,1)=0$}.
\item If it is independent of both,
then there is extension to any {$(a_j:b_j)\in{\cal P}^1(\overline{\mathbf{F}})$}.
\end{itemize}
\end{document}
\end{document}
Let $\overline{\mathbf{F}}$ be an algebraically closed field
(here with $\mathbf{F}$ the rationals $\mathbf{Q}$ in characteristic $0$
or the finite field of $p$ elements $\mathbf{F}_p$ in characteristic $p>0$
for computational purposes).
Let $R:=\overline{\mathbf{F}}[x_n,\ldots,x_1]$
with {\em lex} $x_n\succ\cdots\succ x_1$ monomial ordering.
Let $B$ be a (finite) Gr\"obner basis for the ideal $I$ of $R$
that it generates.
Define $R_j:=\overline{\mathbf{F}}[x_j,\ldots,x_1]$,
$I_j:=I\cap R_j$,
$B_j:=B\cap R_j$ (or $0$ if $B\cap R_j=\emptyset$),
and $V_j:=\{(\overline{x}_j,\ldots,\overline{x}_1)\ :\
b((\overline{x}_j,\ldots,\overline{x}_1)=0\mbox{ for all }b\in B_j\}$.
{\em Elimination} merely means that $I_j$ is an (elimination) ideal
with lex Gr\"obner basis $B_j$ and variety $V_j$.
{\em Extension} means that for
$(\overline{x}_j,\ldots,\overline{x}_1)\in V_j$
there exists $\overline{x}_{j+1}$ such that
$(\overline{x}_{j+1},\ldots,\overline{x}_1)\in V_{j+1}$.
Would that this could be applied to $V_j\subseteq \overline{\mathbf{F}}^j$,
but the simple example $B:=(x_2x_1-1)=B_2$, $B_1=(0)$
has $V_1=\overline{\mathbf{F}}$,
and $(0)\in V_1$ does not extend to $(\overline{x}_2,0)\in V_2$
since $\overline{x}_2\cdot 0-1=-1\neq 0$.
\newpage
%Of course if $\overline{x}_2=1/0$ were allowed,
%then maybe $(1/0,0)$ would be an extension.
%This suggests that
%$\overline{x}_j\in \mathbf{P}^1(\overline{\mathbf{F}})$
%and $V_j\subseteq (\mathbf{P}^1(\overline{\mathbf{F}}))^j$.
%This has all the advantages that affine coordinates afford
%in that it is easy to move between
%$(\mathbf{P}^1(\overline{\mathbf{F}}))^j$ and
%$(\mathbf{P}^1(\overline{\mathbf{F}}))^{j+1}$
%by appending or deleting a coordinate,
%and that $\overline{x}_j\in \mathbf{P}^1(\overline{\mathbf{F}})$
%is a value that a (rational) function could be given.
%We'll call elements of $(\mathbf{P}^1(\overline{\mathbf{F}}))^n$
%{\em multi-homogeneous coordinates}.
We can find out a lot about our mindset (the things we forgot to question)
by trying to generalize this to get extension to work always.
There are at least three things to question here.
\begin{itemize}
\item
If one starts from a standard exposition it is easy to think that
projective varieties are the obvious generalization of affine varieties.
Harris has lecture 1 titled ``Affine and Projective varieties''.
Hartshorne has first chapter titled ``Varieties'', with section 1
titled ``Affine varieties'' and section 2 titled ``Projective Varieties''.
Cox, Little and O'Shea stick with affine varieties and related topics for
seven chapters before introducing anything projective in chapter 8.
And, more importantly, projective varieties have a name, unlike
the varieties based on what we shall be calling
{\em multihomogeneous coordinates} for lack of a better name.
\item
One can then question the idea of a variety as a basic bulding block.
This seems obvious in that a variety is a closed set in the Zariski topology.
But when considering things such as a subset of singular points,
only of of the set of singular points and the complement of that set is
a variety.
So if both are of interest, maybe the partition into (disjoint) {\em parts},S,
based on both equality constraints EQ(S) and inequality constraints
NEQ(S) is better than just focusing on the equality constraints and
barely mentioning the inequality constraints.
\item
The third problem is that of a proper model
of the coordinate system being used.
There would seem to be many compelling reasons for thinking
only homogeneous polynomials for projective space,
whether it is about graded rings, Rees algebras, or Hilbert-Samuel theory.
But then one has to deal with the baggage encountered in computations,
by homogenizing before versus after the computation.
Most, if not all, of this disappears if one considers adding the appropriate
non-homogeneous equality constraints that force the choice of a canonical
representative for each projecitve point.
\end{itemize}
\newpage
So we shall choose {\em multihomogeneous coordinates},
elements of $(\mathbf{P}^1(\overline{\mathbf{F}}))^n$,
as the generalization of affine coordinates,
elements of $(\overline{\mathbf{F}})^n$.
We'll then choose partitioning sets into {\em parts} $S$
defined by a finite set $EQ(S)$ of equality constraints
and a finite (possiblt empty) set $NEQ(S)$ of inequality constraints.
And we'll choose to work with non-homogeneous equality constrains
\[ h_j(h_j-1)=0,\ (g_j-1)(h_j-1)=0,\ 1\leq j\leq n,\]
analogously to the way we would have used
\[x_0(x_0-1)=0,x_1(x_1-1)(x_0-1)=0,\ldots,\]
\[x_{n-1}(x_{n-1}-1)\cdots(x_0-1)=0,(x_n-1)\cdots(x_0-1)=0.\]
for projective space.
The only other ingredient will be a mapping of
$ \overline{\mathbf{F}}[g_n,h_n,\ldots,g_1,h_1]$
into $ \overline{\mathbf{F}}[y_{2n},y_{2n-1},\ldots,y_2,y_1]$
to simplify the more important mapping of
$ \overline{\mathbf{F}}[y_{j+1},\ldots,y_1]$
to $ \overline{\mathbf{F}}[y_{j},y_1][y_{j+1}]$.
This will allow us to identify factors of $LC(f)\in \overline{\mathbf{F}}[y_{j},y_1]$ that cause generically different extension.
We'll then be partitioning parts by adding constraints based on these factors
taking on the value $0$ or not.
\newpage
{\bf Addendum}
Take for example the statement in CLO chapter 8 section 5 that "Ideally, there should be a purely alegbraic method of eliminating $u$ and $v$ from (2) to obtain $y(1+y)=0$".
This is in reference to a misguided example using $(u,v,y)$-coordinates for $P^1\times k$ with $u+vy=0$ and $u+uy=0$.
We could embed this into $k^3$ using $(u-1)(v-1)=0$ and $v(v-1)=0$. This gives
a Gr\"obner basis of $(v-1,u+y,y(y+1))$ directly, making pretty much everything
in that section moot, since this works equally well in general.
\newpage
The pseudocode for the algorithm is:
1) Given a multi-homogeneous lex Gr\''obner basis,
append the non-homogeneous equalities
to make this into an affine problem in $(\overline{\mathbf{F}})^{2n}$.
2) Compute a lex $B_{2n}$ Gr\"obner basis for this ideal and break it up into
sets $B_{j+1}\backslash B_j$ for all $0\leq j<2n$.
3) Start with the single part $S$ defined by the equality constraints in
$EQ(S):=B_1$, and $NEQ(S)=\emptyset$.
4) Consider all current parts, and for each, map
$\overline{\mathbf{F}}[y_{k+1},\ldots y_1]$ to $\overline{\mathbf{F}}[\overline{y_k},\ldots,\overline{y_1}][y_{k+1}]$.
5) Consider the ideal generated by all the leading coefficients that are in
$\overline{\mathbf{F}}[\overline{y_k},\ldots,\overline{y_1}]$
but not in $\overline{\mathbf{F}}$, and compute a Gr\"obner basis.
6) Pick a generator $g$ of that basis and append $g$ to $EQ(S)$, respecitively
$NEQ(S)$ to partition $S$ into two (disjoint) parts.
7) Repeat steps 5) and 6) until there are no such. Then incresae $k$ by $1$
and go back to step $4$ when $k\leq 2n$. otherwise stop and output the
current parts as elements or $(\mathbf{P}^1(\overline{\mathbf{F}})^n$.
\newpage
{\bf References}
Cox, Little, and O'Shea
Harris
Harstshorne
Leonard
\end{document}
As a motivational example to understand the problem at hand, consider
the affine variety
\[ V:=\{(a_2,a_1)\in\overline{\mathbf{F}}^2\ :\ a_2^5(a_1^5+1)+a_1^5=0\}\]
of the ideal
\[ I:=\langle x_2^5(x_1^5+1)+x_1^5\rangle\subset \overline{\mathbf{F}}[x_2,x_1]\]
with a lex $x_2\succ x_1$ monomial order.
Elimination produces
\[ I_1:=I\cap\overline{\mathbf{F}}[x_1]=\langle 0\rangle\subset \overline{\mathbf{F}}[x_1]\mbox{ and }I_2:=I.\]
So $V(I_1):=\{a_1\in\overline{\mathbf{F}}\ :\ 0=0\}=\overline{\mathbf{F}}$,
but extension is different for $a_1^5+1=0$ than for $a_1^5+1\neq 0$.
In the first case there is no affine extension, whereas there is for any $a_1$
in the second case.
If one thinks that elimination and extension should always allow extension
and that all extensions should be described, then one thought might be to use
$\mathbf{P}^2(\overline{\mathbf{F}})$. But in Cox, Little, and O'Shea,
chapter 8, section 5, there is a supposedly preferrable choice of
$\mathbf{P}^1(\overline{\mathbf{F}})\times\overline{\mathbf{F}}$.
Our choice will be to use $(\mathbf{P}^1(\overline{\mathbf{F}}))^2$
because coordinates should all be of the same flavor and should be values
that a rational function could take on.
Let's see how each of these three options does on this small example,
before wriitng out the general theory proposed.
\newpage
First, projective coordinates can't really be a good choice for this
type of problem in that $(a_1:a_0)\in\mathbf{P}^1(\overline{\mathbf{F}})$
shouldn't really be expected to extend to
$(a_2:a_1:a_0)\in\mathbf{P}^2(\overline{\mathbf{F}})$, and
certainly $(1:0:0)$ should project onto $(0:0)$.
Even if we tried
\[ V^{hom}:=\{(a_2:a_1:a_0)\in\mathbf{P}^2(\overline{\mathbf{F}})\ :\
a_2^5(a_1^5+a_0^5)+a_1^5a_0^5=0\}\]
$a_1^5+a_0^5=0$ would imply that $a_1^5a_0^5=0$, so $a_1=0$ or $a_0=$,
$a_1=0$ and $a_0=0$. This doesn't give any new extension.
If we try the hybrid coordinates of CLO, then
\[ V^{CLO}:=\{((a_2:a_0),a_1)\in\mathbf{P}^1(\overline{\mathbf{F}})\times\overline{\mathbf{F}}\ :\
a_2^5(a_1^5+a_0^5)+a_1^5a_0^5=0\}\]
$a_1^5+a_0^5=0$ would imply that $a_1^5a_0^5=0$, so $a_1=0$ or $a_0=$,
$a_1=0$, $a_0=0$, and $a_2=1$. This doesn't give any extension
in the trouble some part of the affine problem either.
But if we try multi-homogeneous coordinates, then
\[ V^{CLO}:=\{((\alpha_2:\beta_2)),(\alpha_1:\beta_1))\in(\mathbf{P}^1(\overline{\mathbf{F}}))^2 :\
\alpha_2^5(\alpha_1^5+\beta_1^5)+\beta_2^5\alpha_1^5=0\}\]
$\alpha_1^5+\beta_1^5=0$ would imply that $\beta_2^5\alpha_1^5=0$, so $\alpha_1=0$ or $\beta_2=0$.
This does give the extension of $(\alpha_1:1)$ to $((1:0),(\alpha_1:1))$
when $\alpha_1^5+1=0$.
It also give the extension of $(1:0)$ to $((\alpha_2:1),(1:0)$
with $\alpha_2^5+1=0$.
Moreover, what we have done is partition the projective line into three
disjoint parts and extend each the way they should be expected to extend.
This suggests that were we to do an example with more variables, and
hence recursive steps, we should be applying those steps to partitioning a part
and extending that. So maybe instead of defining varieties as is traditional,
we should be defining {\em parts} $S$ by having not only a finite set
$EQ(S)$ of equality constraints but also a finite (possibly empty) set
$NEQ(S)$ of inequality constraints. ( In our example above, we produced $3$
parts with $EQ(S_1):=\{ \beta_1,\alpha_1-1, \beta_2-1, \alpha_2^5+1\}$,
$EQ(S_2):=\{ \beta_2,\alpha_2-1, \beta_1-1, \alpha_1^5+1\}$, and
$EQ(S_3):=\{ \beta_2-1, \beta_1-1, \alpha_2(\alpha_1^5+1)+a_1^5\}$ and
$NEQ(S_3):=\{ \alpha_1^5+1\}$.
[If you just can't live without varieties, think of each part as describing
the part of a variety described by $EQ(S)$ not contained in the union of finitely many other varieties, each described by one element of $NEQ(S)$.]
\newpage
{\bf Definition}
Let {$(R_j:=\overline{\mathbf{F}}[x_j,\ldots,x_1]\ :\ 1\leq j\leq n)$}
be a nested sequence of {\em multivariate polynomial rings}
with {\em lex monomial ordering} with {$x_n\succ\cdots\succ x_1$}.
If {$I$} is an {ideal} of {$R:=R_n$},
then the {\em elimination ideals} are
\[ {I_j:=I\cap \mathbf{F}[x_j,\ldots,x_1],\ 1\leq j\leq n}.\]
{\bf Theorem}
If {$B$} is a {Gr\"obner basis} for {$I$} then
\[ {B_j:=B\cap R_j,\ 1\leq j\leq n}\]
is a {Gr\"obner basis} for {$I_j$}.
{\bf Proof} Clearly {$\langle B_j\rangle\subseteq I_j$}.
If there were {$f\in I_j$} but {$f\notin \langle B_j\rangle$},
then {$LM(f)$} could not be divisible by {$LM(b)$} for any {$b\in B$}.
{\bf Theorem}
If {$(a_n,\ldots,a_1)\in V(I)$}, then
{$(a_j,\ldots,a_1)\in V(I_j)$} {\em extends} to {$(a_{j+1},a_j,\ldots,a_1)\in V(I_{j+1})$}.
Computationally it is relatively easy to change either {projective}
or {rational} problems into {affine} ones.
The ideal
\[{I^{(h)}:=\langle (x_n-1)(x_{n-1}-1)\cdots(x_1-1), x_{n-1}(x_{n-1}-1)\cdots
(x_0-1),\ \ldots,\ x_0(x_0-1)\rangle}\]
can be appended to restrict {$F^{n+1}$} to {${\cal P}^n(\mathbf{F})$};
while
\[{I^{(H)}:=\langle (x_n-1)(h_n-1),h_n(h_n-1),\cdots,(x_1-1)(h_1-1), h_1(h_1-1)\rangle}\]
can be appended to restrict {$F^{2n}$} to {$({\cal P}^1(\mathbf{F}))^n$}.
Consider the extension theorem from CLO 3.1.
Written in our notation this is
{\bf theorem}[CLO: The Extension Theorem]
Write the elements of a Gr\"obner basis for {$I:=I_n\subset R:=R_n:=\overline{\mathbf{F}}[x_n,\ldots,x_1]$}
as {$f_{j,k}\in R_j\backslash R_{j-1}$} for {$1\leq k\leq k(j)$},
each viewed as an element of {$R_{j-1}[x_j]$} with leading coefficient {$g_{j,k}\in R_{j-1}$}.
If {$(a_{j-1},\ldots,a_1)\in V(I_{j-1})$} and {$g_{j,k}(a_{j-1},\ldots,a_1)\neq 0$} for some {$k$},
then there is at least one b{$a_j\in\overline{\mathbf{F}}$} such that {$(a_j,\ldots,a_1)\in V(I_j)$}.
What is wrong with this?
First, it is not constructive, and it doesn't really say that {$V(I_n)$} is gotten by extension as our theorem does.
The example considered there has {$f_{2,1}:=x_2-x_1$}, {$f_{3,1}:=x_3x_1-1$} and {$f_{3,2}:=x_3x_2-1$}.
While any {$a_1\in\overline{\mathbf{F}}$} extends to {$a_2=a_1$}, this only extends to {$a_3=1/a_1$} when {$a_1\neq 0$}.
Were this done in rational terms, {$f_{2,1}:=x_2h_1-x_1h_2$}, {$f_{3,1}:=x_3x_1-h_3h_1$} and {$f_{3,2}:=x_3x_2-h_3h_2$}.
Any {$(a_1:b_1)\in{\cal P}(\overline{\mathbf{F}})$} extends to {$(a_2:b_2)=(a_1:b_1)$},
then to {$(a_3:b_3)=(b_1:a_1)$}.
But consider another example, with
{$f_{2,1}:=x_2^3+x_2x_1+x_1^5$}, {$f_{3,1}:=x_3x_1-x_2^2$}, and {$f_{3,2}:=x_3x_2+x_2+x_1^4$}.
Then any {$a_1\in\overline{\mathbf{F}}$} extends to {$a_2$} such that {$a_2^3+a_2a_1+a_1^5=0$},
and there are between 1 and 3 such values.
But the CLO extension theorem would say that this extends to {$a_3:=a_2^2/a_1$} for {$a_1\neq 0$} (and {$a_2\neq 0$}).
It says nothing whatsoever about what happens when {$a_1=0=a_2$}, yet this should extend to any {$a_3\in\overline{\mathbf{F}}$}.
Our theorem says that constructively
{$f_{2,1}(x_2)=x_2^3+x_2a_1+a_1^5=0$} produces the values {$a_2$} extending {$a_1$}.
Then for {$a_1=0=a_2$}, {$f_{3,1}(x_3,0,0)=0=f_{3,2}(x_3,0,0)$} means any {$a_3\in\overline{\mathbf{F}}$} extends;
while for {$a_1\neq 0\neq a_2$}, {$f_{3,1}(x_3,a_2,a_1)=x_3a_1-a_2^2=0$} and {$f_{3,2}(x_3,a_2,a_1)=x_3a_2+a_2+a_1^4=0$}
have common solution {$a_3=a_2^2/a_1=-(a_2+a_1^4)/a_2$} as extension.
So were one to write a theorem of this flavor but constructively,
do it in rational terms (with statement essentially the proof).
{\bf Theorem}[{Constructive rational extension}]
Let \[{d_j(x_j,h_j):=gcd\{ f_{j,k}(x_j,h_j,a_{j-1},b_{j-1},\ldots,a_1,b_1)\ :\ 1\leq k\leq k(j)\}}.\]
\begin{itemize}
\item
If {$d_j(x_j,h_j)$} depends on {$x_j$},
then there is extension to {$(a_j:1)$} for {$a_j$} any root of {$d_j(x_j,1)$}.
\item If it doesn't depend on {$x_j$} but does depend on {$h_j$},
then there is extension to {$(a_j:b_j)=(1:0)$}.
%and there is extension to \txb{$(a_j:b_j)$} when \txb{$d_j(x_j,1)=0$}.
\item If it is independent of both,
then there is extension to any {$(a_j:b_j)\in{\cal P}^1(\overline{\mathbf{F}})$}.
\end{itemize}
\end{document}
\end{document}