aboutsummaryrefslogtreecommitdiffstats
path: root/buch
diff options
context:
space:
mode:
authorAndreas Müller <andreas.mueller@ost.ch>2022-08-27 02:58:28 +0200
committerGitHub <noreply@github.com>2022-08-27 02:58:28 +0200
commit876dadd350483c251fd82c45be0620178122295c (patch)
treedd145c9d0c532742c22eaa39405f3fb2ad173357 /buch
parentMerge pull request #72 from haddoucher/master (diff)
parentsome minor corrections (diff)
downloadSeminarSpezielleFunktionen-876dadd350483c251fd82c45be0620178122295c.tar.gz
SeminarSpezielleFunktionen-876dadd350483c251fd82c45be0620178122295c.zip
Merge pull request #73 from canuelmattaneo/master
Reucurrence relations + Spherical harmonic expansion
Diffstat (limited to '')
-rw-r--r--buch/papers/kugel/packages.tex4
-rw-r--r--buch/papers/kugel/preliminaries.tex7
-rw-r--r--buch/papers/kugel/proofs.tex4
-rw-r--r--buch/papers/kugel/references.bib9
-rw-r--r--buch/papers/kugel/sections/Introduction.tex318
-rw-r--r--buch/papers/kugel/spherical-harmonics.tex355
6 files changed, 639 insertions, 58 deletions
diff --git a/buch/papers/kugel/packages.tex b/buch/papers/kugel/packages.tex
index ead7653..c02589f 100644
--- a/buch/papers/kugel/packages.tex
+++ b/buch/papers/kugel/packages.tex
@@ -16,5 +16,5 @@
\node[gray, anchor = center] at ({#1 / 2}, {#2 / 2}) {\Huge \ttfamily \bfseries TODO};
\end{tikzpicture}}
-\DeclareMathOperator{\sphlaplacian}{\nabla^2_{\mathit{S}}}
-\DeclareMathOperator{\surflaplacian}{\nabla^2_{\partial \mathit{S}}}
+\DeclareMathOperator{\sphlaplacian}{\nabla^2_{S}}
+\DeclareMathOperator{\surflaplacian}{\nabla^2_{\partial S}}
diff --git a/buch/papers/kugel/preliminaries.tex b/buch/papers/kugel/preliminaries.tex
index e48abe4..1fa78d7 100644
--- a/buch/papers/kugel/preliminaries.tex
+++ b/buch/papers/kugel/preliminaries.tex
@@ -1,6 +1,6 @@
% vim:ts=2 sw=2 et spell tw=78:
-\section{Preliminaries}
+\section{Preliminaries}\label{kugel:sec:preliminaries}
The purpose of this section is to dust off some concepts that will become
important later on. This will enable us to be able to get a richer and more
@@ -318,11 +318,12 @@ convergence.
\end{definition}
\begin{theorem}[Fourier Theorem]
- \[
+ \label{fourier-theorem-1D}
+ \begin{equation*}
\lim_{N \to \infty} \left \|
f(x) - \sum_{n = -N}^N \hat{f}(n) E_n(x)
\right \|_2 = 0
- \]
+ \end{equation*}
\end{theorem}
\begin{lemma}
diff --git a/buch/papers/kugel/proofs.tex b/buch/papers/kugel/proofs.tex
index 143caa8..93b3857 100644
--- a/buch/papers/kugel/proofs.tex
+++ b/buch/papers/kugel/proofs.tex
@@ -1,5 +1,5 @@
% vim:ts=2 sw=2 et spell tw=80:
-\section{Proofs}
+\section{(long) Proofs}
\subsection{Legendre Functions} \label{kugel:sec:proofs:legendre}
@@ -166,7 +166,7 @@
\end{proof}
-\begin{lemma}
+\begin{lemma}\label{kugel:lemma:sol_associated_leg_eq}
If $Z_n(z)$ is a solution of the Legendre equation \eqref{kugel:eqn:legendre},
then
\begin{equation*}
diff --git a/buch/papers/kugel/references.bib b/buch/papers/kugel/references.bib
index e5d6452..984d555 100644
--- a/buch/papers/kugel/references.bib
+++ b/buch/papers/kugel/references.bib
@@ -17,6 +17,15 @@
file = {Submitted Version:/Users/npross/Zotero/storage/SN4YUNQC/Carvalhaes and de Barros - 2015 - The surface Laplacian technique in EEG Theory and.pdf:application/pdf},
}
+@article{usecase_recursion_paper,
+ title = {New Implementation of Legendre Polynomials for Solving Partial Differential Equations},
+ issn = {272767969},
+ url = {https://www.researchgate.net/publication/272767969_New_Implementation_of_Legendre_Polynomials_for_Solving_Partial_Differential_Equations},
+ shorttitle = {Implementation og Legendre Polynom},
+ date = {2013-12},
+ author = {Ali Davari, Abozar Ahmadi}
+}
+
@video{minutephysics_better_2021,
title = {A Better Way To Picture Atoms},
url = {https://www.youtube.com/watch?v=W2Xb2GFK2yc},
diff --git a/buch/papers/kugel/sections/Introduction.tex b/buch/papers/kugel/sections/Introduction.tex
new file mode 100644
index 0000000..cdefea7
--- /dev/null
+++ b/buch/papers/kugel/sections/Introduction.tex
@@ -0,0 +1,318 @@
+
+\section{Introduction \label{kugel:section:intro}}
+This is the part of the book devoted to the set of functions called spherical harmonics.
+However, before we dive into the topic, we want to make a few preliminary remarks that will avoid ``upsetting'' certain types of readers. \newline
+Since this is a purely mathematical topic, we felt it was appropriate to specify the mathematical style with which we will approach the various topics covered.\newline
+While writing we decided to try giving demonstrations for every theorem and statement we make. However, we would like to specify that the authors of this chapter are not mathematicians and our aim is not to prove as rigorously as possible everything we say. \newline
+A demonstration, to be rigorous, should consist of several simple steps using fundamental axioms. In our case, demonstrations are often not performed in this way, but rather we try to convince the reader that what we are saying stays well within the boundaries of a logical reasoning.\newline
+Sometimes we might also come across more handy demonstrations based on intuitive arguments, which can serve as intuition for the reader but would never be accepted in a mathematical context.\newline
+That being said, we can get on with the interesting topics.\newline
+When talking about Spherical Harmonics, one could start by describing the name. The latter could cause some confusion because of the various misleading translations into other languages, which do not fully reflect the meaning implied by the English name.\newline
+As an example, the German name for this function set is ``Kugelfunktionen'', which might imply functions defined in a spherical context, since ``Kugel'' is the german name for sphere.\newline
+In contrast, the English name contains the concept of ``harmonic,'' which fits well in this context.\newline
+In fact, harmonic analysis is the branch of mathematics that deals with the representation of functions using other fundamental ones, which are often easier to consider. These are called harmonics, and during the course of this chapter, you will learn that spherical harmonics belong to this class of functions, as indeed the name suggests.\newline
+The structure of this chapter is organized in such a way that some mathematical concepts introduced at the beginning, will later help the reader to understand the big picture of the subject discussed.\newline
+We could have performed the whole derivation without writing the chapter \ref{kugel:ssection:preleminary}. But we thought it might enhance the theory with interesting insights.\newline
+The first introductory sub-chapter is devoted to the topic of vector space.\newline
+In this section, vectors of finite dimensions. In addition, various mathematical operations defined in this space, including linear transformations, will be considered.\newline
+We will then try to extend the concept of vectors to functions. It may sound strange that we call a function a vector, but in this context it should not be understood only in terms of its geometric sense, as we will try to explain.\newline
+Having established these theoretical foundations, we may go on to the sub-chapter devoted to a well-known problem, namely, the calculation of the Eigenfunctions of the Laplace operator. This will in fact be the mathematical derivation of the spherical harmonics.\newline
+This derivation will allow us to understand in a deeper way some concepts underlying the Fourier theorem, a beloved and extensively used theorem in engineering.\newline
+In the third chapter we will study in more detail these special functions, defined in the previous chapter as spherical harmonics.\newline
+Some of the properties we found most beautiful, interesting and useful will be thoroughly presented.\newline
+To conclude this journey we decided to include some real-world applications of these functions, since being an engineer, as we are, usually means loving to make bridges between theory and practice.
+
+\subsection{Preleminary \label{kugel:ssection:preleminary}}
+The purpose of this chapter is to give a dusting off of some of the basic themes that underlie what you will read in the following subchapters.\newline
+This will enable the reader to be able to get a richer view on the topic that is presented in this chapter of the book. By not limiting it to the specific example we will cover.
+
+\subsubsection{Vector space \label{kugel:ssection:vector_space}}
+A vector space, is a mathematical space in which there are entities, which we will call vectors, and some very simple rules that govern life in this world. These rules are called axioms.
+
+Moreover, in this space, some mathematical operations are also defined, namely, addition, subtraction and linear transformations in general.\newline
+The basic axioms, listed below, are responsible for ruling the behavior and interaction of these vectors.\newline
+A vector space is a non-empty set $\mathcal{V}$ with a special element $\mathbf{0}$. The objects contained in this set are called vector.\newline
+We can define mathematical operations as well
+\begin{enumerate}
+ \item \textbf{Addition}\newline
+ Given two vectors $v_1, v_2 \in \mathcal{V}$, then
+ \begin{equation*}
+ v_3 = v_2 + v_1 \implies v_3 \in \mathcal{V}
+ \end{equation*}
+
+ \item \textbf{Scalar multiplication}\newline
+ Given a vector $v_1 \in \mathcal{V}$ and a scalar $\alpha \in \mathbb{R}$, then
+ \begin{equation*}
+ v_2 = \alpha v_1 \implies v_2 \in \mathcal{V}
+ \end{equation*}
+\end{enumerate}
+
+These operations have to satisfy the following axioms, for every $v_1,v_2,v_3 \in \mathcal{V}$ and $\alpha, \beta \in \mathbb{R}$
+\begin{enumerate}
+ \item \textbf{Additive axioms}
+ \begin{itemize}
+ \item $v_1 + v_2 = v_2 + v_1$
+ \item $(v_1+v_2)+v_3 = v_1+(v_2+v_3)$
+ \item $v_1 + \mathbf{0} = v_1$
+ \item $v_1 + (-v_1) = \mathbf{0}$
+ \end{itemize}
+
+ \item \textbf{Multiplicative axioms}
+ \begin{itemize}
+ \item $\mathbf{0}v_1= v_1$
+ \item $\mathbf{1}v_1 = v_1$
+ \item $(\alpha \beta) v_1 = \alpha (\beta v_1)$
+ \end{itemize}
+
+ \item \textbf{Distributive axioms}
+ \begin{itemize}
+ \item $\alpha (v_1 + v_2) = \alpha v_1 + \alpha v_2$
+ \item $(\alpha + \beta)v_1 = \alpha v_1 + \beta v_1$
+ \end{itemize}
+\end{enumerate}
+Therefore any mathematical environment in which these rules are met can be called a vector space.
+
+For this sub-chapter we will use vectors in the sense of geoemetric vectors but, as written earlier this concept is not limited to geometry and can be easily extended. In the next two points we want to define two fundamental concepts present in this world, namely span and independence.
+
+\paragraph{Span}
+The span of a vector space can be seen as the set of vectors that we can build using a linear combination of the basis vectors $v_1, v_2, \hdots, v_N$.
+\begin{figure}[!h]
+\centering
+\begin{tikzpicture}
+ \draw[thin,gray!40] (-1,-1) grid (4,4);
+ \draw[->] (-1,0)--(4,0);
+ \draw[->] (0,-1)--(0,4);
+ \draw[line width=1pt,-stealth](0,0)--(0,1) node[anchor=north west]{$\hat{\mathbf{x}}$};
+ \draw[line width=1pt,-stealth](0,0)--(1,0) node[anchor=north east]{$\hat{\mathbf{y}}$};
+ \draw[line width=1pt, gray,-stealth](0,0)--(1,1) node[anchor=north west]{$\hat{\mathbf{x}}'$};
+ \draw[line width=1pt, gray,-stealth](0,0)--(-1,1) node[anchor=north east]{$\hat{\mathbf{y}}'$};
+ \draw[line width=2pt,-stealth, blue](0,0)--(2,3) node[anchor=south east]{$\mathbf{P}$};
+ \draw [blue, decorate,decoration={brace, amplitude=5pt,mirror,raise=4ex}] (0,0) -- (2,0) node[midway,yshift=-3em]{2};
+ \draw [blue, decorate,decoration={brace, amplitude=5pt,raise=4ex}] (0,0) -- (0,3) node[midway,xshift=-3em]{3};
+\end{tikzpicture}
+\caption{Example of the two basis $\{\hat{\mathbf{x}}, \hat{\mathbf{x}}\}$ and $\{\hat{\mathbf{x}}', \hat{\mathbf{y}}'\}$ with a generic vector $\mathbf{P}$. \label{fig:span}}
+\end{figure}
+If we consider Fig.\ref{fig:span}, an example in $\mathbb{R}^2$ can be seen. In this case, the vector $\mathbf{P}$, can in fact be constructed using a linear combination of $\hat{\mathbf{x}}$ and $\hat{\mathbf{y}}$. One can write
+\begin{equation*}
+ \mathbf{P} = 2\hat{\mathbf{x}} + 3\hat{\mathbf{y}}
+\end{equation*}
+Potentially, the vector $\mathbf{P}$, can also be represented by a combination vectors of the $\hat{\mathbf{x}}'$ and $\hat{\mathbf{y}}'$.
+We can further extend this reasoning, because any point described using $\{\hat{\mathbf{x}},\hat{\mathbf{y}}\}$, can be described using $\{\hat{\mathbf{x}}',\hat{\mathbf{y}}'\}$ as well.
+More generally, every point in $\mathbb{R}^2$ can be reached using both basis. We can therefore state that
+\begin{equation*}
+ \text{span}\{\hat{\mathbf{x}},\hat{\mathbf{y}}\} = \text{span}\{\hat{\mathbf{x}}',\hat{\mathbf{y}}'\}.
+\end{equation*}
+This means that the span does not uniquely determine the base, but multiple bases can lead to the same span.\newline
+To summarize, we can say that any vector $v_p$, belongs to a span if it can be constructed using a linear combination of its basis vectors. In mathematical terms:
+\begin{equation}
+ v_P \in \text{span}\{v_1,v_2, \hdots, v_N\} \iff v_P = \sum_{i=1}^N \alpha_i v_i.
+\end{equation}\label{eq:def:span}
+Thus, we can say that a span is the set of all the vectors that satisfy the summation in Eq.(\ref{eq:def:span}).\newline
+An interesting remark is that, according to Eq.(\ref{eq:def:span}), a span always contains the $\mathbf{0}$ vector. That is beacuse by setting $\alpha_i = 0$, $\forall i$, we get $v_p=\mathbf{0}$.
+
+\paragraph{Independence}
+If we define a span of $N$ dimensions, consisting of $\{v_1,v_2, \hdots,v_{N}\}$, and the vector $v_N$ can be constructed using the other vectors of the span, i.e., if the vector $v_N$ does not provide any extra degrees of freedom, we can say that $v_N$ is linearly dependent, and it can be proved that
+\begin{equation*}
+ \text{span}\{v_1,v_2,..,v_{N-1}, v_{N}\} = \text{span}\{v_1,v_2,..,v_{N-1}\}.
+\end{equation*}
+Furthermore:
+\begin{equation*}
+ \#\text{dimensions of a span} = \#\text{linearly independent vectors}.
+\end{equation*}
+
+\paragraph{Inner product}
+This operation was already introduced in the chapter \ref{}. However, in this sub-section, we wanted to recall some fundamental concepts.\newline
+So far we have only considered the operations of addition and multiplication within a vector space. However, we can now introduce a third operation, namely the inner product.\newline
+In this case we will no longer speak of a simple vector space but of an \emph{inner product space}.
+This new operation is simply a function that receives two vectors as input and maps them to a scalar. This mathematical operation is represented as follows
+\begin{equation*}
+ \langle v_1,v_2 \rangle = k, \quad k \in \mathbb{R}
+\end{equation*}
+The scalar product allows us to introduce some new concepts
+\begin{itemize}
+\item \textbf{The norm}\newline
+The norm is a way to calculate the length of a vector. It can be indeed seen as a general measure for the length, that is defined as
+\begin{equation*}
+ ||v_p|| := \sqrt{\langle v_p,v_p \rangle}
+\end{equation*}
+\item \textbf{Orthogonality}\newline
+This is a concept that will be very important in the next sections.\newline
+Two vectors, $v_1$ and $v_2$, are said to be orthogonal if and only if the inner product of them is equal to zero. More formally:
+\begin{equation*}
+ \text{$v_1$ is orthogonal to $v_2$} \iff \langle v_1,v_2 \rangle = 0
+\end{equation*}
+\end{itemize}
+From the concept of orthogonality, it follows that an orthogonal basis can be defined as a set of vectors, whereby
+\begin{equation*}
+\{v_1,v_2,..., v_N\} \text{ is an orthogonal basis } \iff <v_n, v_m> = 0, \quad \text{if } m \neq n.
+\end{equation*}
+We can also consider a more restrictive case. For example, if we are dealing with a set of orthogonal \emph{unit} vectors, we can speak of an \emph{ortonormal} basis. The conditions will then become
+\begin{equation*}
+\{v_1,v_2,..., v_N\} \text{ is an orthonormal basis } \iff
+\begin{cases}
+ <v_n, v_m>=0, &\text{if } m \neq n \\
+ <v_n, v_m>=1, &\text{if } m = n
+\end{cases}.
+\end{equation*}
+
+\paragraph{Projection of a space into a subspace}
+A subspace is simply a vector space that is a subset of a larger (higher-dimensional) vector space, which is thus contained in it.\newline
+For example, all vectors that are on a line passing through the origin form a subspace of $\mathbb{R}^2$. The same holds for all vectors defined on a plane passing through the origin, which itself forms a subspace of $\mathbb{R}^3$.\newline
+It can be shown that a subspace is also a vector space, which can consequently be represented with an orthonormal vector span.\newline
+Suppose now that we have a vector $\mathbf{P}$ in three dimensions (we still remain in the geometric context to give examples) and suppose further that we want to compute its projection in a subspace of two dimensions.\newline
+Suppose now that we have a vector P in three dimensions (we still remain in the geometric context
+to give examples) and that we want to compute its projection in a subspace of two
+dimensions.
+In a nutshell we want to take a vector, represented with the basis vectors $\{\hat{\mathbf{x}}', \hat{\mathbf{y}}', \hat{\mathbf{z}}'\}$, and project it into the plane spanned by $\{\hat{\mathbf{x}}, \hat{\mathbf{y}}\}$.\newline
+It can easily be seen that $\{\hat{\mathbf{x}}', \hat{\mathbf{y}}', \hat{\mathbf{z}}'\}$ spans $\mathbb{R}^3$. Thus we want to go from a span in three dimensions to one in two.
+\begin{figure}[!h]
+\centering
+\begin{tikzpicture}[scale=3]
+ \filldraw[
+ draw=gray,%
+ fill=gray!20,%
+ ] (0,0,0)
+ -- (1.5,0,0)
+ -- (1.5,0,1.5)
+ -- (0,0,1.5)
+ -- cycle;
+ \draw[thick,->] (0,0,0) -- (1.7,0,0) node[anchor=north east]{$y$};
+ \draw[thick,->] (0,0,0) -- (0,1,0) node[anchor=north west]{$z$};
+ \draw[thick,->] (0,0,0) -- (0,0,1.7) node[anchor=south, xshift=-0.5em]{$x$};
+ \draw[line width=1.6pt, -stealth] (0,0,0)--(1,1,1) node[anchor=south]{$\mathbf{P}$};
+ \draw[line width=0.8pt, -stealth] (0,0,0)--(1,0,1) node[anchor=north west, yshift=0.5em]{$\tilde{\mathbf{P}}$};
+ \draw[line width=1.1pt, -stealth] (0,0,0)--(0,0,1) node[anchor=south east]{$\alpha_1 \hat{\mathbf{x}}$};
+ \draw[line width=1.1pt, -stealth] (0,0,0)--(1,0,0) node[anchor=south]{$\alpha_2 \hat{\mathbf{y}}$};
+ \draw[dashed, -] (1,0,1)--(0,0,1);
+ \draw[dashed, -] (1,0,1)--(1,0,0);
+ \draw[dashed, -] (1,0,1)--(1,1,1);
+
+ \draw[line width=1.1pt, -stealth, blue] (0,0,0)--(2*0.15,0.15,0) node[anchor=west]{$\hat{\mathbf{y}}'$};
+ \draw[line width=1.1pt, -stealth, blue] (0,0,0)--(0,0.15,2*0.15) node[anchor=east]{$\hat{\mathbf{x}}'$};
+ \draw[line width=1.1pt, -stealth, blue] (0,0,0)--(0.15,2*0.15,0) node[anchor=south]{$\hat{\mathbf{z}}'$};
+\end{tikzpicture}
+\caption{ \label{fig:projection_example}}
+\end{figure}
+If we consider Fig.(\ref{fig:projection_example}), we can visualize the problem by asking ourself: having $\mathbf{P}, \hat{\mathbf{x}}$ and $\hat{\mathbf{y}}$, how do we calculate $\alpha_1$ and $\alpha_2$?\newline
+Let's say the vector $\mathbf{P}$ in $\mathbb{R}^3$ is defined as follows:
+\begin{equation*}
+\mathbf{P} = \alpha_1' \hat{\mathbf{x}}' + \alpha_2' \hat{\mathbf{y}}' + \alpha_3' \hat{\mathbf{z}}',
+\end{equation*}
+with
+\begin{align*}
+ \hat{\mathbf{x}}' &= \frac{\hat{\mathbf{x}}+\hat{\mathbf{z}}}{\sqrt{2}},\\
+ \hat{\mathbf{y}}' &= \frac{2\hat{\mathbf{y}}+\hat{\mathbf{z}}}{\sqrt{3}},\\
+ \hat{\mathbf{z}}' &= \frac{\hat{\mathbf{y}}+2\hat{\mathbf{z}}}{\sqrt{3}}.
+\end{align*}
+Then, a way to project the vector $\mathbf{P}$ without knowing the coefficients of its basis vectors a priori (in this case the coefficients $\alpha_i'$) must be find.\newline
+This can be done using the inner product defined above.\newline
+The idea is to take $\mathbf{P}$, and project it onto the various axes we have available. Assuming that the basis we want to use in $\mathbb{R}^2$ is also orthonormal, we can write
+\begin{align*}
+\tilde{\mathbf{P}} &= \langle \mathbf{P}, \hat{\mathbf{x}} \rangle \hat{\mathbf{x}} + \langle \mathbf{P}, \hat{\mathbf{y}} \rangle \hat{\mathbf{y}}\\
+&= \alpha_1 \hat{\mathbf{x}} + \alpha_2 \hat{\mathbf{y}}
+\end{align*}
+In an unformal way we might say that we want to know ``how much of each axis'' is contained in $\mathbf{P}$. That is, how much information contained in $\mathbf{P}$, we can describe, using $\hat{\mathbf{x}}$ and $\hat{\mathbf{y}}$, respectively.\newline
+It can be shown that the projection we obtain is the representation in fewer dimensions, which is closest to the original vector $\mathbf{P}$, meaning
+\begin{equation*}
+\text{min} \big\{ ||\mathbf{P}-v|| \big\} = ||\mathbf{P}-\tilde{\mathbf{P}}||,\quad v \in \text{span}\{ \hat{\mathbf{x}},\hat{\mathbf{y}} \}.
+\end{equation*}
+
+The theory just expla
+ined applies to any projection of a space into its subspace. As long as the vectors of both bases are orthonormal.\newline
+In the case they were orthogonal with minor adjustments the same result can be obtained.
+In the most general case we can say that:\newline
+a vector $\mathbf{v}$, can be represented in any orthogonal basis $\{v_1,v_2,\hdots,v_N\}$ as follows:
+\begin{equation}
+\mathbf{v} = \sum_{i=1}^N \alpha_i v_i
+\label{eq:projection}
+\end{equation}
+To calculate the coefficient $\alpha_i$, we can apply a scalar product on both sides of Eq.(\ref{eq:projection}), obtaining
+\begin{align*}
+\langle \mathbf{v}, v_j \rangle &= \left\langle \sum_{i=1}^N \alpha_i v_i, v_j \right\rangle \\
+&= \sum_{i=1}^N \langle \alpha_i v_i, v_j \rangle \\
+&= \alpha_j \langle v_j, v_j \rangle \implies \alpha_i = \frac{\langle \mathbf{v}, v_j \rangle}{\langle v_i, v_i \rangle}
+\end{align*}
+We then have a way to represent a vector in $n$ dimensions, using fewer dimensions, in the closest possible way.
+
+Up to this point we have not yet defined the specific operation of inner product, that is, how it is practically calculated.\newline
+As written earlier the inner product is an operation that maps two vectors to a real number. It is additionally defined according to these axioms
+\begin{enumerate}
+\item \textbf{Linearity}
+\begin{align*}
+\langle \alpha v_1 + \beta v_2, v_3 \rangle &= \alpha \langle v_1, v_3 \rangle + \beta \langle v_2, v_3 \rangle \\
+\langle v_1, \alpha v_2 + \beta v_3 \rangle &= \overline{\alpha} \langle v_1, v_2 \rangle + \overline{\beta} \langle v_1, v_3 \rangle
+\end{align*}
+\item \textbf{Conjugate Symmetry}
+\begin{equation*}
+\langle v_1, v_2 \rangle = \overline{ \langle v_2, v_1 \rangle}
+\end{equation*}
+\item \textbf{Positive-definiteness}
+\begin{align*}
+\langle v_1, v_1 \rangle &\geq 0 \\
+\langle v_1, v_1 \rangle &= 0 \iff v_1= \mathbf{0}
+\end{align*}
+\end{enumerate}
+These axioms do not imply a uniqueness of the inner product. We can therefore define it as we think best.\newline
+One possible definition, which meets all the axioms, in the case of vectors of finite size is the vector product, i.e.
+\begin{equation*}
+\langle v_1,v_2 \rangle := v_1 \cdot \overline{v_2}
+\end{equation*}
+We might note that when we talk about functions this operation will have to be redefined.
+
+\subsubsection{Eigenvector \label{kugel:ssection:eigenvector}}
+We do not want to spend much time on this concept, since at the bachelor's level we assume it has been explained and much used in almost all engineering fields.
+In a nutshell, an eigenvecotor of a linear transformation $\mathcal{T}\{\cdot\}$, in the context of linear algebra, is a nonzero vector that, when the linear transformation $\mathcal{T}\{\cdot\}$ is applied on it, satisfies the following equation
+\begin{equation}
+\mathcal{T}\{v_1\} = \lambda v_1
+\label{eq:eigvec}
+\end{equation}
+Where $\lambda$ is called eigenvalue.\newline
+Linear transformations can be viewed in general as a mapping between two vector spaces. This mapping preserves the operation of scalar multiplication and satisfies the distributive property.\newline
+Recall that, if we consider a finite dimensional vector space, a linear transformation can be represented using a projection matrix, let's say $\mathbf{A}$.
+Thus, finding a vector $\mathbf{v}$, which satisfies Eq.(\ref{eq:eigvec}) is equivalent to solving the following matrix equation
+\begin{equation*}
+\text{det}(\mathbf{A} - \lambda \mathbf{I} ) = 0
+\end{equation*}
+This concept will then be extended to vector spaces of infinite dimensions in the next subsection.
+
+\subsubsection{Function space \label{kugel:ssection:function_space}}
+Up to this point, for each example, we have considered vectors in a geometric context. However, if we consider each axiom defined above, it is very general and not at all specific, as mathematicians like.\newline
+We can see that not only geometric vectors satisfy these axioms. In fact, another mathematical entity that does are functions.\newline
+So we can say that the set of all mathematical functions is a vector space.\newline
+We can at least check whether this statement makes sense.\newline
+Let us consider two functions $f_1(x), f_2(x)$, both with support on $\mathbb{R}$, then $f(x) = f_1(x) + f_2(x)$, still a function defined in $\mathbb{R}$.\newline
+The same is true if we multiply $f(x)$ by a constant, we remain in the space of functions defined in $\mathbb{R}$.\newline
+With these two statements we have verified that the operations of sum and scalar multiplication still have the closure property. The addition, multiplication and distributive axioms can also be verified. However, we will not do that here.\newline
+Therefore, the power of linear algebra allows us to consider functions as vectors. It follows that all the concepts defined earlier in \ref{kugel:ssection:vector_space} can be extended to functions.\newline
+We can then have a set of basis functions, we can project functions into subspaces, have the concept of orthogonality, etc.\newline
+The most famous application of this generalization of vector spaces into function spaces is probably \emph{Fourier} (at least for engineers).\newline
+What is done with \emph{Fourier} is to take a function, defined in a function space with specific basis functions and project it to another basis, where the basis functions are sines and cosines. Fourier is thus a simple change of basis.
+
+\subsubsection{Eigenfunction \label{kugel:ssection:eigenfuntion}}
+As in the case of vector spaces we have the possibility of defining linear operators.\newline
+Suppose we have two vector spaces $\mathcal{A}$ and $\mathcal{B}$ (complex or real). A mapping between $\mathcal{A}$ and $\mathcal{B}$, defined as $\mathcal{T}\{\cdot\}$, is called linear if
+\begin{itemize}
+\item it is homogeneous, i.e:
+\begin{equation*}
+ \mathcal{T}\{\lambda x\} = \lambda \mathcal{T}\{x\},
+\end{equation*}
+\item it is additive, i.e:
+\begin{equation*}
+ \mathcal{T}\{x+y\} = \mathcal{T}\{x\}+\mathcal{T}\{y\}.
+\end{equation*}
+\end{itemize}
+In the case of finite-dimensional vector spaces, as written earlier, we can consider these linear transformations as matrices, in fact we can map, for example, a vector space $\mathbb{R}^n$ onto $\mathbb{R}^m$, using a matrix of dimension $n \times m$.\newline
+However, we can define operators for vector spaces of infinite dimensions (in this case, function spaces) too.\newline
+For example, the derivative is an operator that maps functions from $C^1\to C$, where $C^1$ is the set of all once differentiable functions and $C$ denotes the set of continuous and real functions.\newline
+In the case of operators defined in finite-dimensional vector spaces, we can compute eigenvectors. In function spaces we have mathematical objects with the same properties, we will refer to them as \emph{eigenfunctions}.\newline
+An eigenfunction of an operator $\mathcal{T}\{\cdot\}$, analogous to the eigenvector, is a function that satisfies the following equation:
+\begin{equation*}
+\mathcal{T}\{f(x)\} = \lambda f(x).
+\end{equation*}
+A couple of examples are
+\begin{itemize}
+\item For the differential operator $\dfrac{d}{dx}\{\cdot\}$, the function $e^{ax}$ is an eigenfunction.
+\item For the fourier operator $\mathcal{F}\{\cdot\}$, the \emph{Gaussian function} $e^{-\frac{(x-\mu)^2}{2\sigma^2}}$.
+\item $\hdots$
+\end{itemize}
+Another example of a linear operator is the \emph{Laplace operator} $\nabla^2$, which is very important in engineering and mathematics. Its eigenfunctions will not be discussed in this subsection because the next section is devoted entirely to them. \ No newline at end of file
diff --git a/buch/papers/kugel/spherical-harmonics.tex b/buch/papers/kugel/spherical-harmonics.tex
index bff91ef..9349b61 100644
--- a/buch/papers/kugel/spherical-harmonics.tex
+++ b/buch/papers/kugel/spherical-harmonics.tex
@@ -111,7 +111,10 @@ that satisfy the equation
\surflaplacian f = -\lambda f.
\end{equation}
Perhaps it may not be obvious at first glance, but we are in fact dealing with a
-partial differential equation (PDE) \kugeltodo{Boundary conditions?}. If we
+partial differential equation (PDE)\footnote{
+ Considering the fact that we are dealing with a PDE,
+ you may be wondering what are the boundary conditions. Well, since this eigenvalue problem is been developed on
+ the spherical surface (boundary of a sphere), the boundary in this case are empty, i.e no boundary condition has to be considered.}.
unpack the notation of the operator $\nabla^2_{\partial S}$ according to
definition
\ref{kugel:def:surface-laplacian}, we get:
@@ -283,7 +286,7 @@ representation} which are
\end{equation*}
respectively, both of which we will not prove (see chapter 3 of
\cite{bell_special_2004} for a proof). Now that we have a solution for the
-Legendre equation, we can make use of the following lemma patch the solutions
+Legendre equation, we can make use of the following lemma to patch the solutions
such that they also become solutions of the associated Legendre equation
\eqref{kugel:eqn:associated-legendre}.
@@ -313,24 +316,19 @@ obtain the \emph{associated Legendre functions}.
The functions
\begin{equation}
P^m_n (z) = (1-z^2)^{\frac{m}{2}}\frac{d^{m}}{dz^{m}} P_n(z)
- = \frac{1}{2^n n!}(1-z^2)^{\frac{m}{2}}\frac{d^{m+n}}{dz^{m+n}}(1-z^2)^n
+ = \frac{1}{2^n n!}(1-z^2)^{\frac{m}{2}}\frac{d^{m+n}}{dz^{m+n}}(1-z^2)^n, \quad |m|<n
\end{equation}
are known as Ferrers or associated Legendre functions.
\end{definition}
+The constraint $|m|<n$, can be justified by considering eq.\eqref{kugel:eq:associated_leg_func}, where we differentiate $m+n$ times. We all know that a differentiation, to be well defined, must have an order that is greater than zero \kugeltodo{is that always true?}. Furthermore, it can be seen that this derivative is applied on a polynomial of degree $2n$. As is known from Calculus 1, if you derive a polynomial of degree $2n$ more than $2n$ times, you get zero, that would be a trivial solution. This is the power of zero: It is almost always a (boring) solution.
-\kugeltodo{Discuss $|m| \leq n$.}
-
-\if 0
-The constraint $|m|<n$, can be justified by considering Eq.\eqref{kugel:eq:associated_leg_func}, in which the derivative of degree $m+n$ is present. A derivative to be well defined must have an order that is greater than zero. Furthermore, it can be seen that this derivative is applied on a polynomial of degree $2n$. As is known from Calculus 1, if you derive a polynomial of degree $2n$ more than $2n$ times, you get zero, which is a trivial solution in which we are not interested.\newline
We can thus summarize these two conditions by writing:
\begin{equation*}
\begin{rcases}
m+n \leq 2n &\implies m \leq n \\
m+n \geq 0 &\implies m \geq -n
- \end{rcases} |m| \leq n.
+ \end{rcases} \; |m| \leq n.
\end{equation*}
-The set of functions in Eq.\eqref{kugel:eq:sph_harm_0} is named \emph{Spherical Harmonics}, which are the eigenfunctions of the Laplace operator on the \emph{spherical surface domain}, which is exactly what we were looking for at the beginning of this section.
-\fi
\subsection{Spherical Harmonics}
@@ -339,13 +337,13 @@ section \ref{kugel:sec:construction:eigenvalue}. We had left off in the middle
of the separation, were we had used the Ansatz $f(\vartheta, \varphi) =
\Theta(\vartheta) \Phi(\varphi)$ to find that $\Phi(\varphi) = e^{im\varphi}$,
and we were solving for $\Theta(\vartheta)$. As you may recall, previously we
-performed the substitution $z = \cos \vartheta$. Now we can finally to bring back the
+performed the substitution $z = \cos \vartheta$. Now we can finally bring back the
solution to the associated Legendre equation $P^m_n(z)$ into the $\vartheta$
domain and combine it with $\Phi(\varphi)$ to get the full result:
\begin{equation*}
f(\vartheta, \varphi)
= \Theta(\vartheta)\Phi(\varphi)
- = P^m_n (\cos \vartheta) e^{im\varphi}.
+ = P^m_n (\cos \vartheta) e^{im\varphi}, \quad |m|<n.
\end{equation*}
This family of functions, which recall are the solutions of the eigenvalue
problem of the surface spherical Laplacian, are the long anticipated
@@ -356,9 +354,9 @@ $Y^m_n(\vartheta, \varphi)$.
\label{kugel:def:spherical-harmonics}
The functions
\begin{equation*}
- Y^m_n (\vartheta, \varphi) = P^m_n(\cos \vartheta) e^{im\varphi},
+ Y^m_n (\vartheta, \varphi) = P^m_n(\cos \vartheta) e^{im\varphi}, \quad |m|<n
\end{equation*}
- where $m, n \in \mathbb{Z}$ and $|m| < n$ are called (unnormalized) spherical
+ where $m, n \in \mathbb{Z}$ are called (unnormalized) spherical
harmonics.
\end{definition}
@@ -507,7 +505,7 @@ product:
\begin{definition}[Inner product in $S^2$]
\label{kugel:def:inner-product-s2}
- For 2 complex valued functions $f(\vartheta, \varphi)$ and $g(\vartheta,
+ For two complex valued functions $f(\vartheta, \varphi)$ and $g(\vartheta,
\varphi)$ on the surface of the sphere the inner product is defined to be
\begin{equation*}
\langle f, g \rangle
@@ -520,36 +518,35 @@ product:
\begin{theorem} For the (unnormalized) spherical harmonics
\label{kugel:thm:spherical-harmonics-ortho}
- \begin{align*}
+ \begin{align}
\langle Y^m_n, Y^{m'}_{n'} \rangle
&= \int_{0}^\pi \int_0^{2\pi}
Y^m_n(\vartheta, \varphi) \overline{Y^{m'}_{n'}(\vartheta, \varphi)}
\sin \vartheta \, d\varphi \, d\vartheta
- \\
+ \label{kugel:eq:spherical-harmonics-inner-prod} \\
&= \frac{4\pi}{2n + 1} \frac{(m + n)!}{(n - m)!} \delta_{nn'} \delta_{mm'}
= \begin{cases}
\frac{4\pi}{2n + 1} \frac{(m + n)!}{(n - m)!}
- & \text{if } n = n' \text{ and } m = m', \\
+ & \text{if } n = n' \text{ and } m = m', \nonumber \\
0 & \text{otherwise}.
\end{cases}
- \end{align*}
+ \end{align}
\end{theorem}
\begin{proof}
We will begin by doing a bit of algebraic maipulaiton:
\begin{align*}
\int_{0}^\pi \int_0^{2\pi}
- Y^m_n(\vartheta, \varphi) \overline{Y^{m'}_{n'}(\vartheta, \varphi)}
+ Y^m_n(\vartheta, \varphi) \overline{Y^{m'}_{n'}(\vartheta, \varphi)}
\sin \vartheta \, d\varphi \, d\vartheta
&= \int_{0}^\pi \int_0^{2\pi}
e^{im\varphi} P^m_n(\cos \vartheta)
e^{-im'\varphi} P^{m'}_{n'}(\cos \vartheta)
- \, d\varphi \sin \vartheta \, d\vartheta
+ \, d\varphi \sin \vartheta \, d\vartheta
\\
&= \int_{0}^\pi
- P^m_n(\cos \vartheta) P^{m'}_{n'}(\cos \vartheta)
+ P^m_n(\cos \vartheta) P^{m'}_{n'}(\cos \vartheta) \sin \vartheta \, d\vartheta
\int_0^{2\pi} e^{i(m - m')\varphi}
- \, d\varphi \sin \vartheta \, d\vartheta
- .
+ \, d\varphi.
\end{align*}
First, notice that the associated Legendre polynomials are assumed to be real,
and are thus unaffected by the complex conjugation. Then, we can see that when
@@ -564,12 +561,15 @@ product:
\end{equation*}
where in the second step we performed the substitution $z = \cos\vartheta$;
$d\vartheta = \frac{d\vartheta}{dz} dz= - dz / \sin \vartheta$, and then we
- used lemma \ref{kugel:thm:associated-legendre-ortho}. We are allowed to use
- the lemma because $m = m'$.
-
+ used lemma \ref{kugel:thm:associated-legendre-ortho}.
+ We are allowed to use
+ the lemma because $m = m'$. After the just mentioned substitution we can write eq.\eqref{kugel:eq:spherical-harmonics-inner-prod} in this form
+ \begin{equation*}
+ \langle Y^m_n, Y^{m'}_{n'} \rangle_{\partial S} = \langle P^m_n, P^{m'}_{n'} \rangle_z \; \langle e^{im\varphi}, e^{-im'\varphi} \rangle_\varphi.
+ \end{equation*}
Now we just need look at the case when $m \neq m'$. Fortunately this is
easier: the inner integral is $\int_0^{2\pi} e^{i(m - m')\varphi} d\varphi$,
- or in other words we are integrating a complex exponetial over the entire
+ or in other words we are integrating a complex exponential over the entire
period, which always results in zero. Thus, we do not need to do anything and
the proof is complete.
\end{proof}
@@ -619,11 +619,9 @@ regrettably sometimes even ourselves, would write instead:
reader.
\end{proof}
-Lemma \ref{kugel:thm:legendre-poly-ortho} has a very similar
-proof, while the theorem \ref{kugel:thm:spherical-harmonics-ortho} for the
-spherical harmonics is proved by the following argument. The spherical harmonics
-are the solutions to the eigenvalue problem $\surflaplacian f = -\lambda f$,
-which as discussed in the previous section is solved using separation. So to
+Lemma \ref{kugel:thm:legendre-poly-ortho} has a very similar proof, while the theorem \ref{kugel:thm:spherical-harmonics-ortho} for the spherical harmonics is proved by the following argument.
+The spherical harmonics are the solutions to the eigenvalue problem $\surflaplacian f = -\lambda f$,
+which as discussed in the previous section is solved using the separation Ansatz. So to
prove their orthogonality using the Sturm-Liouville theory we argue that
\begin{equation*}
\surflaplacian = L_\vartheta L_\varphi \iff
@@ -687,26 +685,196 @@ harmonics, so from now on, unless specified otherwise when we say spherical
harmonics or write $Y^m_n$, we mean the orthonormal spherical harmonics of
definition \ref{kugel:def:spherical-harmonics-orthonormal}.
-\subsection{Recurrence Relations}
+\subsection{Recurrence Relations}\kugeltodo{replace x with z}
+The idea of this subsection is to introduce first some recursive relations regarding the Associated Legendre Functions, defined in eq.\eqref{kugel:def:ferrers-functions}. Subsequently we will extend them, in order to derive recurrence formulas for the case of Spherical Harmonic functions as well.
+\subsubsection{Associated Legendre Functions}
+To start this journey, we can first write the following equations, which relate the Associated Legendre functions of different indeces $m$ and $n$ recursively:
+\begin{subequations}
+ \begin{align}
+ P^m_n(z) &= \dfrac{1}{(2n+1)x} \left[ (m+n) P^m_{n-1}(z) + (n-m+1) P^m_{n+1}(z) \right] \label{kugel:eq:rec-leg-1} \\
+ P^m_n(z) &= \dfrac{\sqrt{1-z^2}}{2mz} \left[ P^{m+1}_n(z) + [n(n+1)-m(m-1)] P^{m-1}_n(z) \right] \label{kugel:eq:rec-leg-2} \\
+ P^m_n(z) &= \dfrac{1}{(2n+1)\sqrt{1-z^2}} \left[ P^{m+1}_{n+1}(z) - P^{m+1}_{n-1}(z) \right] \label{kugel:eq:rec-leg-3} \\
+ P^m_n(z) &= \dfrac{1}{(2n+1)\sqrt{1-z^2}} \left[ (n+m)(n+m-1)P^{m-1}_{n-1}(z) - (n-m+1)(n-m+2)P^{m-1}_{n+1}(z) \right] \label{kugel:eq:rec-leg-4}
+ \end{align}
+\end{subequations}
+Much of the effort will be proving this bunch of equalities. Then, in the second part, where we will derive the recursion equations for $Y^m_n(\vartheta,\varphi)$, we will basically reuse the ones presented above.
+
+Maybe it is worth mentioning at least one use case for these relations: In some software implementations (that include lighting computations in computer graphics, antenna modelling softwares, 3-D modelling in medical applications, etc.)
+they are widely used, as they lead to better numerical accuracy and computational cost lower by a factor of six\cite{usecase_recursion_paper}.
+\begin{enumerate}[(i)]
+ \item
+ \begin{proof}
+ This is the relation that links the associated Legendre functions with the same $m$ index but different $n$. Using \ref{} \kugeltodo{search the general equation of recursion for orthogonal polynomials (is somewhere in the book)}, we have
+ \begin{equation*}
+ (n+1)P_{n+1}(z)-(2n+1)xP_n(z)+nP_{n-1}(z)=0,
+ \end{equation*}
+ that can be differentiated $m$ times, obtaining
+ \begin{equation}\label{kugel:eq:rec_1}
+ (n+1)\frac{d^mP_{n+1}}{dz^m}-(2n+1) \left[z \frac{d^m P_n}{dz^m}+ m\frac{d^{m-1}P_{n-1}}{dz^{m-1}} \right] + n\frac{d^m P_{n-1}}{dz^m}=0.
+ \end{equation}
+ To continue this derivation, we need the following relation:
+ \begin{equation}\label{kugel:eq:rec_2}
+ \frac{dP_{n+1}}{dz} - \frac{dP_{n-1}}{dz} = (2n+1)P_n.
+ \end{equation}
+ The latter will not be derived, because it suffices to use the definition of the Legendre Polynomials $P_n(x)$ to check it.
+
+ We can now differentiate the just presented eq.\eqref{kugel:eq:rec_2} $m-1$ times, that will become
+ \begin{equation}\label{kugel:eq:rec_3}
+ \frac{d^mP_{n+1}}{dx^m} - \frac{d^mP_{n-1}}{dx^m} = (2n+1)\frac{d^{m-1}P_n}{dx^{m-1}}.
+ \end{equation}
+ Then, using eq.\eqref{kugel:eq:rec_3} in eq.\eqref{kugel:eq:rec_1}, we will have
+ \begin{equation}\label{kugel:eq:rec_4}
+ (n+1)\frac{d^mP_{n+1}}{dx^m}- (2n+1)\frac{d^mP_{n+1}}{dx^m} -m\left[\frac{d^m P_{n+1}}{dx^m}+ \frac{d^{m}P_{n-1}}{dx^m}\right] + n\frac{d^m P_{n-1}}{dx^m}=0.
+ \end{equation}
+ Finally, multiplying both sides by $(1-x^2)^{\frac{m}{2}}$ and simplifying the expression, we can rewrite eq.\eqref{kugel:eq:rec_4} in terms of $P^m_n(x)$, namely
+ \begin{equation*}
+ (n+1-m)P^m_{n+1}(x)-(2n+1)xP^m_n(x)+(m+n)P^m_{n-1}(x)=0,
+ \end{equation*}
+ that rearranged, will be
+ \begin{equation*}
+ (2n+1) x P^m_n(x)= (m+n) P^m_{n-1}(x) + (n-m+1) P^m_{n+1}(x).
+ \end{equation*}
+ \end{proof}
+
+ \item
+ \begin{proof}
+ This relation, unlike the previous one, link three expression with the same $n$ index but different $m$.
+
+ In the proof of Lemma \ref{kugel:lemma:sol_associated_leg_eq}, at some point we ran into this expression.
+ \begin{equation*}
+ (1-x^2)\frac{d^{m+2}P_n}{dx^{m+2}} - 2(m+1)x \frac{d^{m+1}P_n}{dx^{m+1}} + [n(n+1)-m(m+1)]\frac{d^mP_n}{dx^m} = 0,
+ \end{equation*}
+ that, if multiplied by $(1-x^2)^{\frac{m}{2}}$, will be
+ \begin{equation*}
+ (1-x^2)^{\frac{m}{2}+1}\frac{d^{m+2}P_n}{dx^{m+2}} - 2(m+1)x (1-x^2)^{\frac{m}{2}}\frac{d^{m+1}P_n}{dx^{m+1}} + [n(n+1)-m(m+1)](1-x^2)^{\frac{m}{2}}\frac{d^mP_n}{dx^m} = 0.
+ \end{equation*}
+ Therefore, as before, expressing it in terms of $P^m_n(x)$:
+ \begin{equation*}
+ P^{m+2}_n(x) - \frac{2(m+1)x}{\sqrt{1-x^2}}P^{m+1}_n(x) + [n(n+1)-m(m+1)]P^m_n(x)=0.
+ \end{equation*}
+ Further, we can adjust the indeces and terms, obtaining
+ \begin{equation*}
+ \frac{2mx}{\sqrt{(1-x^2)}} P^m_n(x) = P^{m+1}_n(x) + [n(n+1)-m(m-1)] P^{m-1}_n(x).
+ \end{equation*}
+
+ \end{proof}
+
+ \item
+ \begin{proof}
+ To derive this expression, we can multiply eq.\eqref{kugel:eq:rec_3} by $(1-x^2)^{\frac{m}{2}}$ and, as always, we could express it in terms of $P^m_n(x)$:
+ \begin{equation*}
+ P^m_{n+1}(x) - P^m_{n-1}(x) = (2n+1)\sqrt{1-x^2}P^{m-1}_n(x).
+ \end{equation*}
+ Afer that we can divide by $2n+1$ resulting in
+ \begin{equation}\label{kugel:eq:helper}
+ \frac{1}{2n+1}[P^m_{n+1}(x) - P^m_{n-1}(x)] = \sqrt{1-x^2}P^{m-1}_n(x).
+ \end{equation}
+ To conclude, we arrange the indeces differently:
+ \begin{equation*}
+ \sqrt{1-x^2}P^{m}_n(x)=\frac{1}{2n+1}[P^{m+1}_{n+1}(x) - P^{m+1}_{n-1}(x)].
+ \end{equation*}
+ \end{proof}
+
+ \item
+ \begin{proof}
+ For this proof we can rely on eq.\eqref{kugel:eq:rec-leg-1}, and therefore rewrite eq.\eqref{kugel:eq:rec-leg-2} as
+ \begin{equation*}
+ \frac{2m}{(2n+1)\sqrt{1-x^2}} \left[ (m+n)P^m_{n-1}(x) + (n-m+1)P^m_{n+1}(x) \right] = P^{m+1}_n(x) + [ n(n+1)-m(m-1) ]P^{m-1}_n(x).
+ \end{equation*}
+ Rewriting then $P^{m-1}_n(x)$ using eq.\eqref{kugel:eq:helper}, we will have
+ \begin{align*}
+ \frac{2m}{(2n+1)\sqrt{1-x^2}} &\left[ (m+n)P^m_{n-1}(x) + (n-m+1)P^m_{n+1}(x) \right] = P^{m+1}_n(x) \\
+ &+ \frac{n(n+1)-m(m-1)}{(2n+1)\sqrt{1-x^2}} \left[ P^m_{n+1}(x)-P^m_{n-1}(x) \right].
+ \end{align*}
+ The last equation, after some algebric rearrangements, it is easy to show that it is equivalent to
+ \begin{equation*}
+ \sqrt{1-x^2} P^m_n(x) = \dfrac{1}{2n+1} \left[ (n+m)(n+m-1)P^{m-1}_{n-1}(x) - (n-m+1)(n-m+2)P^{m-1}_{n+1}(x) \right]
+ \end{equation*}
+ \end{proof}
+
+\end{enumerate}
+
+\subsubsection{Spherical Harmonics}
+The goal of this subsection's part is to apply the recurrence relations of the $P^m_n(z)$ functions to the Spherical Harmonics.
+With some little adjustments we will be able to have recursion equations for them too. As previously written the most of the work is already done. Now it is only a matter of minor mathematical operations/rearrangements.
+
+We can start by listing all of them:
+\begin{subequations}
+ \begin{align}
+ Y^m_n(\vartheta, \varphi) &= \dfrac{1}{(2n+1)\cos \vartheta} \left[ (m+n)Y^m_{n-1}(\vartheta, \varphi) + (m-n+1)Y^m_{n+1}(\vartheta, \varphi) \right] \label{kugel:eq:rec-sph_harm-1} \\
+ Y^m_n(\vartheta, \varphi) &= \dfrac{\tan \vartheta}{2m}\left[ Y^{m+1}_n(\vartheta, \varphi)e^{-i\varphi} + [n(n+1)-m(m-1)]Y^{m-1}_n(\vartheta, \varphi)e^{i\varphi} \right] \label{kugel:eq:rec-sph_harm-2} \\
+ Y^m_n(\vartheta, \varphi) &= \dfrac{e^{-i\varphi}}{ (2n+1)\sin \vartheta } \left[ Y^{m+1}_{n+1}(\vartheta, \varphi) - Y^{m+1}_{n-1}(\vartheta, \varphi) \right] \label{kugel:eq:rec-sph_harm-3} \\
+ Y^m_n(\vartheta, \varphi) &= \dfrac{e^{i\varphi}}{(2n+1)\sin \vartheta} \left[ (n+m)(n+m-1)Y^{m-1}_{n-1}(\vartheta, \varphi) - (n-m+1)(n-m+2)Y^{m-1}_{n+1}(\vartheta, \varphi) \right] \label{kugel:eq:rec-sph_harm-4}
+ \end{align}
+\end{subequations}
-\section{Series Expansions in $L^2(S^2)$}
+\begin{enumerate}[(i)]
+ \item
+ \begin{proof}
+ We can multiply both sides of equality in eq.\eqref{kugel:eq:rec-leg-1} by $e^{im \varphi}$ and perform the substitution $z=\cos \vartheta$. After a few simple algebraic steps, we will obtain the relation we are looking for
+ \end{proof}
+ \item
+ \begin{proof}
+ In this proof, as before, we can perform the substitution $z=\cos \vartheta$, and notice that $\sqrt{1-z^2}=\sin \vartheta$, hence, the relation in eq.\eqref{kugel:eq:rec-leg-2} will be
+ \begin{equation*}
+ \frac{2m \cos \vartheta}{\sin \vartheta} P^m_n(\cos \vartheta) = P^{m+1}_n(\cos \vartheta) + [n(n+1)-m(m-1)]P^{m-1}_n P^m_n(\cos \vartheta).
+ \end{equation*}
+ The latter, multiplied by $e^{im\varphi}$, becomes
+ \begin{align*}
+ \frac{2m \cos \vartheta}{\sin \vartheta} P^m_n(\cos \vartheta)e^{im\varphi} &= P^{m+1}_n(\cos \vartheta)e^{im\varphi} + [n(n+1)-m(m-1)]P^{m-1}_n P^m_n(\cos \vartheta)e^{im\varphi} \\
+ &= P^{m+1}_n(\cos \vartheta)e^{i(m+1)\varphi}e^{-i\varphi} + [n(n+1)-m(m-1)]P^{m-1}_n (\cos \vartheta)e^{i(m-1)\varphi}e^{i\varphi} \\
+ &= Y^{m+1}_n(\vartheta, \varphi)e^{-i\varphi} + [n(n+1)-m(m-1)]Y^{m-1}_n(\vartheta, \varphi)e^{i\varphi}.
+ \end{align*}
+ Finally, after some ``cleaning''
+ \begin{equation*}
+ Y^m_n(\vartheta, \varphi) = \frac{\tan \vartheta}{2m} \left[ Y^{m+1}_n(\vartheta, \varphi)e^{-i\varphi} + [n(n+1)-m(m-1)]Y^{m-1}_n(\vartheta, \varphi)e^{i\varphi} \right]
+ \end{equation*}
+ \end{proof}
+ \item
+ \begin{proof}
+ Now we can consider eq.\eqref{kugel:eq:rec-leg-3}, and multiply it by $e^{im\varphi}$. After the usual substitution $z=\cos \vartheta$, we have
+ \begin{align*}
+ \sin \vartheta P^m_n(\cos \vartheta)e^{im\varphi} &= \dfrac{e^{im\varphi}}{2n+1}\left[ P^{m+1}_{n+1}(\cos \vartheta) - P^{m+1}_{n-1}(\cos \vartheta)\right] \\
+ &= \dfrac{e^{-i\varphi}}{2n+1}\left[ P^{m+1}_{n+1}(\cos \vartheta)e^{i(m+1)\varphi} - P^{m+1}_{n-1}(\cos \vartheta)e^{i(m+1)\varphi}\right].
+ \end{align*}
+ A few manipulations later, we will obtain
+ \begin{equation*}
+ Y^m_n(\vartheta, \varphi) = \frac{e^{-i\varphi}}{(2n+1)\sin \vartheta} \left[ Y^{m+1}_{n+1}(\vartheta, \varphi)-Y^{m+1}_{n-1}(\vartheta, \varphi) \right].
+ \end{equation*}
+ \end{proof}
+ \item
+ \begin{proof}
+ This proof is very similar to the previous one. We just have to perform the substitution $z = \cos \vartheta$, as always. Secondly we can multiply the right side by $e^{im\varphi}$ and the left one too but in a different form, namely $e^{im\varphi}=e^{i(m-1)\varphi}e^{i\varphi}$. Then it is only a question of recalling the definition of $Y^m_n(\vartheta, \varphi)$.
+ \end{proof}
+\end{enumerate}
-We have now reached a point were we have all of the tools that are necessary to
-build something truly amazing: a general series expansion formula for functions
-on the surface of the sphere. Using the jargon: we will now see that the
-spherical harmonics together with the inner product of definition
-\ref{kugel:def:inner-product-s2}
+\section{Series Expansions in $L^2(S^2)$}
+We have now reach a point where we have all the tools that are necessary to build something truly amazing: a general series expansion formula for
+function on the surface of the sphere.
+Before starting we want to recall the definition of the inner product on the spherical surface of definition \ref{kugel:def:inner-product-s2}
\begin{equation*}
\langle f, g \rangle
= \int_{0}^\pi \int_0^{2\pi}
f(\vartheta, \varphi) \overline{g(\vartheta, \varphi)}
- \sin \vartheta \, d\varphi \, d\vartheta
+ \sin \vartheta \, d\varphi \, d\vartheta.
\end{equation*}
-form a Hilbert space over the space of complex valued $L^2$ functions $S^2 \to
-\mathbb{C}$. We will see later that this fact is very consequential and is
-extremely useful for many types of applications. If the jargon was too much, no
-need to worry, we will now go back to normal words and explain it again in more
-detail.
+To be a bit technical we can say that the set of spherical harmonic functions, toghether with the inner product just showed,
+form something that we call Hilbert Space\footnote{For more details about Hilber space you can take a look in section \ref{kugel:sec:preliminaries}}.
+This function space is defined over the space of ``well-behaved'' \footnote{The definitions of ``well-behaved'' is pretty ambigous, even for mathematicians.
+It depends basically on the context.
+You can sumarize it by saying: functions for which the theory we are considering (Fourier theorem) is always true. In our case we can say that well-behaved functions
+are functions that follow some convergence contraints (pointwise, uniform, absolute, ...) that we don't want to consider further anyway.} functions.
+We can say that the theory we are about to show can be applied on all twice differentiable complex valued functions,
+to be more concise: complex valued $L^2$ functions $S^2 \to \mathbb{C}$.
+
+All these jargons are not really necessary for the practical applications of us mere mortals, namely physicists and engineers.
+From now on we will therefore assume that the functions we will dealing with fulfill these ``minor'' conditions.
+
+The insiders could turn up their nose, but we don't want to dwell too much on the concept of Hilbert space, convergence, metric, well-behaved functions etc.
+We simply think that this rigorousness could be at the expense of the possibility to appreciate the beauty and elegance of this theory.
+Furthermore, the risk of writing 300+ pages to prove that $1+1=2$\cite{principia-mathematica} is just around the corner (we apologize in advance to Mr. Whitehead and Mr. Russel for using their effort with a negative connotation).
+
+Despite all, if you desire having definitions a bit more rigorous (as rigorous as two engineers can be), you could take a look at the chapter \ref{}.
\subsection{Spherical Harmonics Series}
@@ -714,11 +882,96 @@ To talk about a \emph{series expansion} we first need a series, so we shall
build one using the spherical harmonics.
\begin{definition}[Spherical harmonic series]
+ \label{kugel:definition:spherical-harmonics-series}
+ \begin{equation}
+ f(\vartheta, \varphi)
+ = \sum_{n=0}^\infty \sum_{m =-n}^n
+ c_{m,n} Y^m_n(\vartheta, \varphi). \label{kugel:definition:spherical-harmonics-series}
+ \end{equation}
+\end{definition}
+
+With this definition we are basically saying that any function defined on the spherical surface can be represented as a linear combination of spherical harmonics.
+Does eq.\eqref{kugel:definition:spherical-harmonics-series} sound familiar? Well that is prefectly normal, since this is analog to the classical Fourier theory.
+In the latter is stated that ``any'' $T$-periodic function $f(x)$, on any interval $[x_0-T/2,x_0+T/2]$, can be represented as a linear combination of complex exponentials. More compactly:
+\begin{equation*}
+ f(x) = \sum_{n \in \mathbb{Z}} c_n e^{i \omega_0 x}, \quad \omega_0=\frac{2\pi}{T}
+\end{equation*}
+In the case of definition \ref{kugel:definition:spherical-harmonics-series} the kernels, instead of $e^{i\omega_0x}$, have become $Y^m_n$. In addition, the sum is now over the two indices $m$ and $n$.
+
+\begin{lemma}[Spherical harmonic coefficients]
+ \label{kugel:lemma:spherical-harmonic-coefficient}
+ \begin{align*}
+ c_{m,n}
+ &= \langle f, Y^m_n \rangle_{\partial S} \\
+ &= \int_0^\pi \int_0^{2\pi} f(\vartheta,\varphi) \overline{Y^m_n(\vartheta,\varphi)} \sin\vartheta \,d\varphi\,d\vartheta
+ \end{align*}
+\end{lemma}
+\begin{proof}
+ To develop this proof we will take advantage of the orthogonality property of the Spherical Harmonics. We can start and finish by applying the inner product on both sides of eq.\eqref{kugel:definition:spherical-harmonics-series}:
+ \begin{align*}
+ \langle f, Y^{m}_{n} \rangle_{\partial S}
+ &= \left\langle \sum_{n'=0}^\infty \sum_{m' =-n'}^{n'}
+ c_{m',n'} Y^{m'}_{n'}(\vartheta, \varphi) \right\rangle_{\partial S} \\
+ &= \sum_{n'=0}^\infty \sum_{m' =-n'}^{n'}
+ \langle c_{m',n'} Y^{m'}_{n'}, Y^{m}_{n} \rangle_{\partial S} \\
+ &= \sum_{n'=0}^\infty \sum_{m' =-n'}^{n'} c_{m',n'} \langle Y^{m'}_{n'}, Y^{m}_{n} \rangle_{\partial S} = c_{m,n}
+ \end{align*}
+ We omitted the $\vartheta, \varphi$ dependency to avoid overloading the notation.
+\end{proof}
+Thanks to Lemma \ref{kugel:lemma:spherical-harmonic-coefficient} we can now calculate the series expansion defined in \ref{kugel:definition:spherical-harmonics-series}.
+
+It can be shown that, for the famous ``well-behaved functions'' $f(\vartheta, \varphi)$ mentioned before, theorem \ref{fourier-theorem-spherical-surface} is true
+\begin{theorem}[Fourier Theorem on $\partial S$]
+ \label{fourier-theorem-spherical-surface}
\begin{equation*}
- \hat{f}(\vartheta, \varphi)
- = \sum_{n \in \mathbb{Z}} \sum_{m \in \mathbb{Z}}
- c_{m,n} Y^m_n(\vartheta, \varphi)
+ \lim_{N \to \infty}
+ \int_0^\pi \int_0^{2\pi} \left\| f(\vartheta,\varphi) - \sum_{n=0}^N\sum_{m=-n}^n c_{m,n} Y^m_n(\vartheta,\varphi)
+ \right\|_2 \sin\vartheta \,d\varphi\,d\vartheta = 0
\end{equation*}
-\end{definition}
+\end{theorem}
+The connection to Theorem \ref{fourier-theorem-1D} is pretty obvious.
+
+\subsection{Spectrum}
+
+\begin{figure}
+ \centering
+ \kugelplaceholderfig{.8\textwidth}{5cm}
+ \caption{\kugeltodo{Rectangular signal and his spectrum.}}
+ \label{kugel:fig:1d-fourier}
+\end{figure}
+
+In the case of the classical one-dimensional Fourier theory, we call \emph{Spectrum} the relation between the fourier coefficients $c_n$ and the multiple
+of the fundamental frequency $2\pi/T$, namely $n 2\pi/T$. In the most general case $c_n$ are complex numbers, so we divide the concept of spectrum in
+\emph{Amplitude Spectrum} and \emph{Phase Spectrum}. In fig.\ref{kugel:fig:1d-fourier} a function $f(x)$ is presented along with the amplitude spectrum.
+
+\begin{figure}
+ \centering
+ \kugelplaceholderfig{.8\textwidth}{7cm}
+ \caption{\kugeltodo{Confront between image reconstructed only with phase and one only with amplitued}}
+ \label{kugel:fig:phase&amplitude-2d-fourier}
+\end{figure}
+
+The thing that is easiest for us humans to visualize and understand is often the Amplitude Spectrum.
+This is a huge limitation, since for example in Image Processing can be showed in a nice way that much more information is contained in the phase part (see fig.\ref{kugel:fig:phase-2d-fourier}).
+
+\begin{figure}
+ \centering
+ \kugelplaceholderfig{.8\textwidth}{9cm}
+ \caption{\kugeltodo{fig that show fourier style reconstruction on sphere (with increasing index)}}
+ \label{kugel:fig:fourier-on-sphere-increasing-index}
+\end{figure}
+
+The same logic can be extended to the spherical harmonic coefficients $c_{m,n}$. In fig.\ref{kugel:fig:fourier-on-sphere-increasing-index} you can see the same concept as in fig.\ref{kugel:fig:1d-fourier}
+but with a spherical function $f(\vartheta, \varphi)$.
+
+\subsection{Energy of a function $f(\vartheta, \varpi)$}
+
+\begin{lemma}[Energy of a spherical function (\emph{Parseval's theorem})]
+ \begin{equation*}
+ \int_0^{2\pi}\int_0^\pi |f(\vartheta, \varphi)|^2 \sin\vartheta \, d\varphi \, d\varphi = \sum_{n=0}^\infty \frac{1}{2n+1} \sum_{m=-n}^n |c_{m,n}|^2.
+ \end{equation*}
+\end{lemma}
+\begin{proof}
+\end{proof}
-\subsection{Fourier on $S^2$}
+\subsection{Visualization} \ No newline at end of file