510 lines
18 KiB
TeX
510 lines
18 KiB
TeX
|
|
\section{Notational Conventions}
|
|
This section provides a fairly terse overview of the foundational
|
|
mathematical concepts used in this paper.
|
|
While we try to reason about \tame{} in terms of algebra,
|
|
first-order logic;
|
|
and set theory;
|
|
notation varies even within those branches.
|
|
To avoid ambiguity,
|
|
especially while introducing our own notation,
|
|
core operators and concepts are explicitly defined below.
|
|
|
|
This section begins its numbering at~0.
|
|
This is not only a hint that \tame{} (and this paper) use 0-indexing,
|
|
but also because equations; definitions; theorems; corollaries; and the
|
|
like are all numbered relative to their section.
|
|
When you see any of these prefixed with ``0.'',
|
|
this sets those references aside as foundational mathematical concepts
|
|
that are not part of the theory and operation of \tame{} itself.
|
|
|
|
|
|
\subsection{Propositional Logic}
|
|
\index{logic!propositional}
|
|
We reproduce here certain axioms and corollaries of propositional logic for
|
|
convenience and to clarify our interpretation of certain concepts.
|
|
The use of the symbols $\land$, $\lor$, and~$\neg$ are standard.
|
|
\indexsym\infer{infer}
|
|
\index{infer (\ensuremath\infer)}
|
|
The symbol $\infer$ means ``infer''.
|
|
We use $\implies$ in place of $\rightarrow$ for implication,
|
|
since the latter is used to denote the mapping of a domain to a codomain
|
|
in reference to functions.
|
|
We further use $\equiv$ in place of $\leftrightarrow$ to represent material
|
|
equivalence.
|
|
|
|
\indexsym\land{conjunction}
|
|
\index{conjunction (\ensuremath{\land})}
|
|
\begin{definition}[Logical Conjunction]
|
|
$p,q \infer (p\land q)$.
|
|
\end{definition}
|
|
|
|
\indexsym\lor{disjunction}
|
|
\index{disjunction (\ensuremath{\lor})}
|
|
\begin{definition}[Logical Disjunction]
|
|
$p \infer (p\lor q)$ and $q \infer (p\lor q)$.
|
|
\end{definition}
|
|
|
|
\begin{definition}[$\land$-Associativity]\dfnlabel{conj-assoc}
|
|
$(p \land (q \land r)) \infer ((p \land q) \land r)$.
|
|
\end{definition}
|
|
|
|
\begin{definition}[$\lor$-Associativity]\dfnlabel{disj-assoc}
|
|
$(p \lor (q \lor r)) \infer ((p \lor q) \lor r)$.
|
|
\end{definition}
|
|
|
|
\begin{definition}[$\land$-Commutativity]\dfnlabel{conj-commut}
|
|
$(p \land q) \infer (q \land p)$.
|
|
\end{definition}
|
|
|
|
\begin{definition}[$\lor$-Commutativity]\dfnlabel{disj-commut}
|
|
$(p \lor q) \infer (q \lor p)$.
|
|
\end{definition}
|
|
|
|
\begin{definition}[$\land$-Simplification]\dfnlabel{conj-simpl}
|
|
$p \land q \infer p$.
|
|
\end{definition}
|
|
|
|
\begin{definition}[Double Negation]\dfnlabel{double-neg}
|
|
$\neg\neg p \infer p$.
|
|
\end{definition}
|
|
|
|
\indexsym\neg{negation}
|
|
\index{negation (\ensuremath{\neg})}
|
|
\index{law of excluded middle}
|
|
\begin{definition}[Law of Excluded Middle]
|
|
$\infer (p \lor \neg p)$.
|
|
\end{definition}
|
|
|
|
\index{law of non-contradiction}
|
|
\begin{definition}[Law of Non-Contradiction]
|
|
$\infer \neg(p \land \neg p)$.
|
|
\end{definition}
|
|
|
|
\index{De Morgan's theorem}
|
|
\begin{definition}[De Morgan's Theorem]\dfnlabel{demorgan}
|
|
$\neg(p \land q) \infer (\neg p \lor \neg q)$
|
|
and $\neg(p \lor q) \infer (\neg p \land \neg q)$.
|
|
\end{definition}
|
|
|
|
\indexsym\equiv{equivalence}
|
|
\index{equivalence!material (\ensuremath{\equiv})}
|
|
\begin{definition}[Material Equivalence]
|
|
$p\equiv q \infer \big((p \land q) \lor (\neg p \land \neg q)\big)$.
|
|
\end{definition}
|
|
|
|
$\equiv$ denotes a logical identity.
|
|
Consequently,
|
|
it'll often be used as a definition operator.
|
|
|
|
\indexsym{\!\!\implies\!\!}{implication}
|
|
\index{implication (\ensuremath{\implies})}
|
|
\begin{definition}[Implication]
|
|
$p\implies q \infer (\neg p \lor q)$.
|
|
\end{definition}
|
|
|
|
\begin{definition}[Tautologies]\dfnlabel{prop-taut}
|
|
$p\equiv (p\land p)$ and $p\equiv (p\lor p)$.
|
|
\end{definition}
|
|
|
|
\indexsym{\true}{boolean, true}
|
|
\indexsym{\false}{boolean, false}
|
|
\index{boolean!FALSE@\tamefalse{} (\false)}%
|
|
\index{boolean!TRUE@\tametrue{} (\true)}%
|
|
\begin{definition}[Truth Values]\dfnlabel{truth-values}
|
|
$\infer\true$ and $\infer\neg\false$.
|
|
\end{definition}
|
|
|
|
\indexsym\Int{integer}
|
|
\index{integer (\Int)}%
|
|
\begin{definition}[Boolean/Integer Equivalency]\dfnlabel{bool-int}
|
|
$\Set{0,1}\in\Int, \false \equiv 0$ and $\true \equiv 1$.
|
|
\end{definition}
|
|
|
|
|
|
\subsection{First-Order Logic and Set Theory}
|
|
\index{logic!first-order}
|
|
\indexsym\emptyset{set empty}
|
|
\indexsym{\Set{}}{set}
|
|
\index{set!empty (\ensuremath{\emptyset, \{\}})}
|
|
The symbol $\emptyset$ represents the empty set---%
|
|
the set of zero elements.
|
|
We assume that the axioms of ZFC~set theory hold,
|
|
but define $\in$ here for clarity.
|
|
|
|
\todo{Introduce set-builder notation, $\union$, $\intersect$.}
|
|
\indexsym\in{set membership}
|
|
\indexsym\union{set, union}
|
|
\indexsym\intersect{set, intersection}
|
|
\index{set!membership@membership (\ensuremath\in)}
|
|
\index{set!union (\ensuremath\union)}
|
|
\index{set!intersection (\ensuremath\intersect)}
|
|
\begin{definition}[Set Membership]
|
|
$x \in S \equiv \Set{x} \intersect S \not= \emptyset.$
|
|
\end{definition}
|
|
|
|
\index{domain of discourse}
|
|
$\forall$ denotes first-order universal quantification (``for all''),
|
|
and $\exists$ first-order existential quantification (``there exists''),
|
|
over some domain of discourse.
|
|
|
|
\indexsym\exists{quantification, existential}
|
|
\index{quantification!existential (\ensuremath\exists)}
|
|
\begin{definition}[Existential Quantification]\dfnlabel{exists}
|
|
$\Exists{x\in X}{P(x)} \equiv
|
|
\true \in \Set{P(x) \mid x\in X}$.
|
|
\end{definition}
|
|
|
|
\indexsym\forall{quantification, universal}
|
|
\index{quantification!universal (\ensuremath\forall)}
|
|
\begin{definition}[Universal Quantification]\dfnlabel{forall}
|
|
$\Forall{x\in X}{P(x)} \equiv \neg\Exists{x\in X}{\neg P(x)}$.
|
|
\end{definition}
|
|
|
|
\index{quantification!vacuous truth}
|
|
\begin{remark}[Vacuous Truth]\remlabel{vacuous-truth}
|
|
By \dfnref{exists}, $\Exists{x\in\emptyset}P \equiv \false$
|
|
and by \dfnref{forall}, $\Forall{x\in\emptyset}P \equiv \true$.
|
|
And so we also have the tautologies $\infer \neg\Exists{x\in\emptyset}P$
|
|
and $\infer \Forall{x\in\emptyset}P$.
|
|
Empty domains lead to undesirable consequences---%
|
|
in particular,
|
|
we must carefully guard against them in \dfnref{quant-conn} and
|
|
\dfnref{quant-elim} to maintain soundness.
|
|
\end{remark}
|
|
|
|
We also have this shorthand notation:
|
|
|
|
\index{quantification!\ensuremath{\forall x,y,z}}
|
|
\index{quantification!\ensuremath{\exists x,y,z}}
|
|
\begin{align}
|
|
\Forall{x,y,z\in S}P \equiv
|
|
\Forall{x\in S}{\Forall{y\in S}{\Forall{z\in S}P}}, \\
|
|
\Exists{x,y,z\in S}P \equiv
|
|
\Exists{x\in S}{\Exists{y\in S}{\Exists{z\in S}P}}.
|
|
\end{align}
|
|
|
|
\begin{definition}[Quantifiers Over Connectives]\dfnlabel{quant-conn}
|
|
Assuming that $x$ is not free in $\varphi$,
|
|
\begin{alignat*}{3}
|
|
\varphi\land\Exists{x\in X}{P(x)}
|
|
&\equiv \Exists{x\in X}{\varphi\land P(x)}, \\
|
|
\varphi\lor\Exists{x\in X}{P(x)}
|
|
&\equiv \Exists{x\in X}{\varphi\lor P(x)}
|
|
\qquad&&\text{assuming $X\neq\emptyset$}.
|
|
\end{alignat*}
|
|
\end{definition}
|
|
|
|
\begin{definition}[Quantifier Elimination]\dfnlabel{quant-elim}
|
|
$\Exists{x\in X}{\varphi} \equiv \varphi$ assuming $X\neq\emptyset$
|
|
and $x$ is not free in~$\varphi$.
|
|
\end{definition}
|
|
|
|
|
|
\subsection{Functions}
|
|
\indexsym{f, g}{function}
|
|
\indexsym\mapsto{function, map}
|
|
\indexsym\rightarrow{function, domain map}
|
|
\index{function}
|
|
\index{function!map (\ensuremath\mapsto)}
|
|
\index{map|see {function}}
|
|
\index{function!domain}
|
|
\index{function!codomain}
|
|
\index{domain|see {function, domain}}
|
|
\index{function!domain map (\ensuremath\rightarrow)}
|
|
The notation $f = x \mapsto x' : A\rightarrow B$ represents a function~$f$
|
|
that maps from~$x$ to~$x'$,
|
|
where $x\in A$ (the domain of~$f$) and $x'\in B$ (the co-domain of~$f$).
|
|
|
|
\indexsym\times{set, Cartesian product}
|
|
\index{set!Cartesian product (\ensuremath\times)}
|
|
A function $A\rightarrow B$ can be represented as the Cartesian
|
|
product of its domain and codomain, $A\times B$.
|
|
For example,
|
|
$x\mapsto x^2 : \Int\rightarrow\Int$ is represented by the set of ordered
|
|
pairs $\Set{(x,x^2) \mid x\in\Int}$, which looks something like
|
|
|
|
\begin{equation*}
|
|
\Set{\ldots,\,(0,0),\,(1,1),\,(2,4),\,(3,9),\,\ldots}.
|
|
\end{equation*}
|
|
|
|
\indexsym{[\,]}{function, image}
|
|
\index{function!image (\ensuremath{[\,]})}
|
|
\index{function!as a set}
|
|
The set of values over which some function~$f$ ranges is its \dfn{image},
|
|
which is a subset of its codomain.
|
|
In the example above,
|
|
both the domain and codomain are the set of integers~$\Int$,
|
|
but the image is $\Set{x^2 \mid x\in\Int}$,
|
|
which is clearly a subset of~$\Int$.
|
|
|
|
We therefore have
|
|
|
|
\begin{align}
|
|
A \rightarrow B &\subset A\times B, \\
|
|
f : A \rightarrow B &\infer f \subset A\times B, \\
|
|
f = \alpha \mapsto \alpha' : A \rightarrow B
|
|
&= \Set{(\alpha,\alpha')
|
|
\mid \alpha\in A \land \alpha'\in B}, \\
|
|
f[D\subseteq A] &= \Set{f(\alpha) \mid \alpha\in D} \subset B, \\
|
|
f[] &= f[A].
|
|
\end{align}
|
|
|
|
\indexsym{()}{tuple}
|
|
\index{tuple (\ensuremath{()})}
|
|
\index{relation|see {function}}
|
|
An ordered pair $(x,y)$ is also called a \dfn{$2$-tuple}.
|
|
Generally,
|
|
an \dfn{$n$-tuple} is used to represent an $n$-ary function,
|
|
where by convention we have $(x)=x$.
|
|
So $f(x,y) = f((x,y)) = x+y$.
|
|
If we let $t=(x,y)$,
|
|
then we also have $f(x,y) = ft$,
|
|
which we'll sometimes write as a subscript~$f_t$ where disambiguation is
|
|
necessary and where parenthesis may add too much noise;
|
|
this notation is especially well-suited to indexes,
|
|
as in $f_1$.
|
|
Binary functions are often written using \dfn{infix} notation;
|
|
for example, we have $x+y$ rather than $+(x,y)$.
|
|
|
|
\begin{equation}
|
|
f_x = f(x) \in \Set{b \mid (x,b) \in f}
|
|
\end{equation}
|
|
|
|
|
|
\subsubsection{Binary Operations On Functions}
|
|
\indexsym{R}{relation}
|
|
Consider two unary functions $f$ and~$g$,
|
|
and a binary relation~$R$.
|
|
\indexsym{\bicomp{R}}{function, binary composition}
|
|
\index{function!binary composition (\ensuremath{\bicomp{R}})}
|
|
We introduce a notation~$\bicomp R$ to denote the composition of a binary
|
|
function with two unary functions.
|
|
|
|
\begin{align}
|
|
f &: A \rightarrow B \\
|
|
g &: A \rightarrow D \\
|
|
R &: B\times D \rightarrow F \\
|
|
f \bicomp{R} g &= \alpha \mapsto f_\alpha R g_\alpha : A \rightarrow F
|
|
\end{align}
|
|
|
|
\indexsym\circ{function, composition}
|
|
\index{function!composition (\ensuremath\circ)}
|
|
Note that $f$ and~$g$ must share the same domain~$A$.
|
|
In that sense,
|
|
this is the mapping of the operation~$R$ over the domain~$A$.
|
|
This is analogous to unary function composition~$f\circ g$.
|
|
|
|
\index{function!constant}
|
|
A scalar value~$x$ can be mapped onto some function~$f$ using a constant
|
|
function.
|
|
For example,
|
|
consider adding some number~$x$ to each element in the image of~$f$:
|
|
|
|
\begin{equation*}
|
|
f \bicomp+ (\_\mapsto x) = \alpha \mapsto f_\alpha + x.
|
|
\end{equation*}
|
|
|
|
\indexsym{\_}{variable, wildcard}
|
|
\index{variable!wildcard/hole (\ensuremath{\_})}
|
|
The symbol~$\_$ is used to denote a variable that matches anything but is
|
|
never referenced,
|
|
and is often referred to as a ``wildcard'' (since it matches anything)
|
|
or a ``hole'' (since its value goes nowhere).
|
|
|
|
Note that we consider the bracket notation for the image of a function
|
|
$(f:A\rightarrow B)[A]$ to itself be a binary function.
|
|
Given that, we have $f\bicomp{[]} = f\bicomp{[A]}$ for functions returning
|
|
functions (such as vectors of vectors in \secref{vec}).
|
|
|
|
|
|
\subsection{Monoids and Sequences}\seclabel{monoids}
|
|
\index{abstract algebra!monoid}
|
|
\index{monoid|see abstract algebra, monoid}
|
|
\begin{definition}[Monoid]\dfnlabel{monoid}
|
|
Let $S$ be some set. A \dfn{monoid} is a triple $\Monoid S\bullet e$
|
|
with the axioms
|
|
|
|
\begin{align}
|
|
\bullet &: S\times S \rightarrow S
|
|
\tag{Monoid Binary Closure} \\
|
|
\Forall{a,b,c\in S&}{a\bullet(b\bullet c) = (a\bullet b)\bullet c)},
|
|
\tag{Monoid Associativity} \\
|
|
\Exists{e\in S&}{\Forall{a\in S}{e\bullet a = a\bullet e = a}}.
|
|
\tag{Monoid Identity}\label{eq:monoid-identity}
|
|
\end{align}
|
|
\end{definition}
|
|
|
|
\index{abstract algebra}
|
|
\index{abstract algebra!semigroup}
|
|
Monoids originate from abstract algebra.
|
|
A monoid is a semigroup with an added identity element~$e$.
|
|
Only the identity element must be commutative,
|
|
but if the binary operation~$\bullet$ is \emph{also} commutative,
|
|
then the monoid is a \dfn{commutative monoid}.\footnote{%
|
|
A commutative monoid is less frequently referred to as an
|
|
\dfn{abelian monoid},
|
|
related to the common term \dfn{abelian group}.}
|
|
|
|
Consider some sequence of operations
|
|
$x_0 \bullet\cdots\bullet x_n \in S$.
|
|
Intuitively,
|
|
a monoid tells us how to combine that sequence into a single element
|
|
of~$S$.
|
|
When the sequence has one or zero elements,
|
|
we then use the identity element $e\in S$:
|
|
as $x_0 \bullet e = x_0$ in the case of one element
|
|
or $e \bullet e = e$ in the case of zero.
|
|
|
|
\indexsym\cdots{sequence}
|
|
\index{sequence}
|
|
\begin{definition}[Monoidic Sequence]\dfnlabel{monoid-seq}
|
|
Generally,
|
|
given some monoid $\Monoid S\bullet e$ and a sequence $\Fam{x}jJ\in S$
|
|
where $n<|J|$,
|
|
we have
|
|
$x_0\bullet x_1\bullet\cdots\bullet x_{n-1}\bullet x_n$
|
|
represent the successive binary operation on all indexed elements
|
|
of~$x$.
|
|
When it's clear from context that the index is increasing by a constant
|
|
of~$1$,
|
|
that notation is shortened to $x_0\bullet\cdots\bullet x_n$ to save
|
|
space.
|
|
When $|J|=1$, then $n=0$ and we have the sequence $x_0$.
|
|
When $|J|=0$, then $n=-1$,
|
|
and no such sequence exists,
|
|
in which case we expand into the identity element~$e$.
|
|
\end{definition}
|
|
|
|
For example,
|
|
given the monoid~$\Monoid\Int+0$,
|
|
the sequence $1+2+\cdots+4+5$ can be shortened to
|
|
$1+\cdots+5$ and represents the arithmetic progression
|
|
$1+2+3+4+5=15$.
|
|
If $x=\Set{1,2,3,4,5}$,
|
|
$x_0+\cdots+x_n$ represents the same sequence.
|
|
If $x=\Set{1}$,
|
|
that sequence evaluates to $1=1$.
|
|
If $x=\Set{}$,
|
|
we have $0$.
|
|
|
|
\index{conjunction!monoid}
|
|
\begin{lemma}\lemlabel{monoid-land}
|
|
$\Monoid\Bool\land\true$ is a commutative monoid.
|
|
\end{lemma}
|
|
\begin{proof}
|
|
$\Monoid\Bool\land\true$ is associative by \dfnref{conj-assoc}
|
|
and commutative by \dfnref{conj-commut}.
|
|
The identity element is~$\true\in\Bool$ by \dfnref{conj-simpl}.
|
|
\end{proof}
|
|
|
|
\index{disjunction!monoid}
|
|
\begin{lemma}\lemlabel{monoid-lor}
|
|
$\Monoid\Bool\lor\false$ is a commutative monoid.
|
|
\end{lemma}
|
|
\begin{proof}
|
|
$\Monoid\Bool\lor\false$ is associative by \dfnref{disj-assoc}
|
|
and commutative by \dfnref{disj-commut}.
|
|
The identity $\false\in\Bool$ follows from
|
|
|
|
\begin{alignat*}{3}
|
|
\false \lor p &\equiv p \lor \false &&\text{by \dfnref{disj-commut}} \\
|
|
&\equiv \neg(\neg p \land \neg\false)\qquad
|
|
&&\text{by \dfnref{demorgan}} \\
|
|
&\equiv \neg(\neg p) &&\text{by \dfnref{conj-simpl}} \\
|
|
&\equiv p. &&\text{by \dfnref{double-neg}} \tag*\qedhere
|
|
\end{alignat*}
|
|
\end{proof}
|
|
|
|
|
|
\goodbreak% Fits well on its own page, if we're near a page boundary
|
|
\subsection{Vectors and Index Sets}\seclabel{vec}
|
|
\tame{} supports scalar, vector, and matrix values.
|
|
Unfortunately,
|
|
its implementation history leaves those concepts a bit tortured.
|
|
|
|
A vector is a sequence of values, defined as a function of
|
|
an index~set.
|
|
|
|
% TODO: font changes in index, making langle unavailable
|
|
%\indexsym{\Vector{}}{vector}
|
|
\index{vector!definition (\ensuremath{\Vector{}})}
|
|
\index{sequence|see vector}
|
|
\indexsym\Vectors{vector}
|
|
\index{real number (\ensuremath\Real)}
|
|
\indexsym\Real{real number}
|
|
\indexsym{\Fam{a}jJ}{index set}
|
|
\index{family|see {index set}}
|
|
\index{index set (\ensuremath{\Fam{a}jJ})}
|
|
\begin{definition}[Vector]\dfnlabel{vec}
|
|
Let $J\subset\Int$ represent an index set.
|
|
A \dfn{vector}~$v\in\Vectors^\Real$ is a totally ordered sequence of
|
|
elements represented as a function of an element of its index set:
|
|
\begin{equation}\label{vec}
|
|
v = \Vector{v_0,\ldots,v_j}^{\Real}_{j\in J}
|
|
= j \mapsto v_j : J \rightarrow \Real.
|
|
\end{equation}
|
|
\end{definition}
|
|
|
|
This definition means that $v_j = v(j)$,
|
|
making the subscript a notational convenience.
|
|
We may omit the superscript such that $\Vectors^\Real=\Vectors$
|
|
and $\Vector{\ldots}^\Real=\Vector{\ldots}$.
|
|
|
|
\index{vector!matrix}
|
|
\begin{definition}[Matrix]\dfnlabel{matrix}
|
|
Let $J\subset\Int$ represent an index set.
|
|
A \dfn{matrix}~$M\in\Matrices$ is a totally ordered sequence of
|
|
elements represented as a function of an element of its index set:
|
|
\begin{equation}
|
|
M = \Vector{M_0,\ldots,M_j}^{\Vectors^\Real}_{j\in J}
|
|
= j \mapsto M_j : J \rightarrow \Vectors^\Real.
|
|
\end{equation}
|
|
\end{definition}
|
|
|
|
The consequences of \dfnref{matrix}---%
|
|
defining a matrix as a vector of independent vectors---%
|
|
are important.
|
|
This defines a matrix to be more like a multidimensional array,
|
|
with no requirement that the lengths of the vectors be equal.
|
|
|
|
\begin{corollary}[Matrix Row Length Variance]\corlabel{matrix-row-len}
|
|
$\infer \Exists{M\in\Matrices}{\neg\Forall*{j}{\Forall{k}{\len{M_j} = \len{M_k}}}}$.
|
|
\end{corollary}
|
|
|
|
\corref{matrix-row-len} can be read ``there exists some matrix~$M$ such that
|
|
not all row lengths of~$M$ are equal''.
|
|
In other words---%
|
|
the inner vectors of a matrix can vary in length.
|
|
|
|
Since a vector is a function,
|
|
a vector or matrix can be converted into a set of unique elements like so:
|
|
|
|
\begin{alignat*}{2}
|
|
\bigcup\Vector{\Vector{0,1},\Vector{2,2},\Vector{2,0}}\!\bicomp{[]}
|
|
&\mapsto &&\bigcup\Vector{\Vector{0,1}\![],\Vector{2,2}\![],\Vector{2,0}[]}\![] \\
|
|
&\mapsto &&\bigcup\Vector{\Set{0,1},\Set{2},\Set{2,0}}\![] \\
|
|
&\mapsto &&\bigcup\Set{\Set{0,1},\Set{2},\Set{2,0}} \\
|
|
&= &&\Set{0,1,2}.
|
|
\end{alignat*}
|
|
|
|
|
|
\subsection{XML Notation}
|
|
\indexsym{\xml{<>}}{XML}
|
|
\index{XML!notation (\xml{<>})}
|
|
The grammar of \tame{} is XML.
|
|
Equivalence relations will be used to map source expressions to an
|
|
underlying mathematical expression.
|
|
For example,
|
|
|
|
\begin{equation*}
|
|
\xml{<match on="$x$" value="$y$" />} \equiv x = y
|
|
\end{equation*}
|
|
|
|
\noindent
|
|
defines that pattern of \xmlnode{match} expression to be materially
|
|
equivalent to~$x=y$---%
|
|
anywhere an equality relation appears,
|
|
you could equivalently replace it with that XML representation without
|
|
changing the meaning of the mathematical expression.
|