aboutsummaryrefslogtreecommitdiffstats
path: root/DigSig1.tex
blob: 6056b6e795e47601a0fd4fa7f1bd2522b7a18c19 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
% !TeX program = xelatex
% !TeX encoding = utf8
% !TeX root = DigSig1.tex
% vim: set ts=2 sw=2 et:

\documentclass[margin=small]{tex/hsrzf}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Packages

%% Font configuration
\usepackage{fontspec}
\usepackage{fouriernc}

%% Own packages
% \usepackage{tex/hsrstud}
\usepackage{tex/docmacros}

%% Mathematics
\usepackage{amssymb}

%% Frames
\usepackage{framed}

%% Language configuration
\usepackage{polyglossia}
\setdefaultlanguage{english}

%% License configuration
\usepackage[
    type={CC},
    modifier={by-nc-sa},
    version={4.0},
]{doclicense}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Metadata

\course{Electrial Engineering}
\module{DigSig1}
\semester{Fall Semester 2021}

\authoremail{naoki.pross@ost.ch}
\author{\textsl{Naoki Pross} -- \texttt{\theauthoremail}}

\title{Digital Signal Processing Lecture Notes}
\date{\thesemester}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Document

\begin{document}

\pagenumbering{roman}
\maketitle
\tableofcontents

\section*{License}
\doclicenseThis

\twocolumn
\setcounter{page}{1}
\pagenumbering{arabic}


\section{Probability and stochastics}

\subsection{Random variables}

A \emph{random variable} (RV) is a function \(x : \Omega \to \mathbb{R}\). The \emph{distribution function} of a RV is a function \(F_x : \mathbb{R} \to [0,1]\) that is always monotonically increasing and given by
\[
  F_x(\alpha) = \Pr{x \leq \alpha}.
\]
The \emph{probability density function} (PDF) of a RV is
\[
  f_x(\alpha) = \frac{dF_x}{d\alpha}.
\]
The \emph{expectation} of a RV is
\[
  \E{x} = \int_\mathbb{R} \alpha f_x(\alpha) \,d\alpha,
\]
and in the case of a discrete RV
\[
  \E{x} = \sum_k \alpha_k \Pr{x = \alpha_k}.
\]
In general it holds that
\[
  \E{g(x)} = \int_\mathbb{R} g(\alpha) f_x(\alpha) \,d\alpha,
\]
for example
\begin{align*}
  \E{x^2} &= \int_\mathbb{R} \alpha^2 f_x(\alpha) \,d\alpha \\
  \E{|x|} &= \int_\mathbb{R} |\alpha| f_x(\alpha) \,d\alpha \\
    &= \int_0^\infty \alpha \left[ f_x(\alpha) + f_x(-\alpha) \right] \,d\alpha
\end{align*}
The \emph{variance} of a RV is
\[
  \Var{x} = \sigma^2 = \E{(x - \E{x})^2} = \E{x^2} - \E{x}^2,
\]
where \(\sigma\) is called the \emph{standard deviation}. The variance is sometimes also called the \emph{second moment} of a RV, the \emph{\(n\)-th moment} of a RV is \(\E{x^n}\).

\subsection{Jointly distributed RVs}

\section{Analog signals}

In this document we will use the notation \(\Omega = 2\pi f\) for physical analog frequencies (in radians / second), and \(\omega\) for digital frequencies (in radians / sample).

\subsection{Transformations}
Recall the three important operations for the analysis of analog signals.
\begin{flalign*}
  \textit{Fourier Transform} &&
  X(\Omega) &= \int_\mathbb{R} x(t) e^{j\Omega t} \,dt \\
  %
  \textit{Inverse Fourier Transform} &&
  x(t) &= \int_\mathbb{R} X(\Omega) e^{j\Omega t} \,\frac{d\Omega}{2\pi} \\
  %
  \textit{Laplace Transform} &&
  X(s) &= \int_\mathbb{R} x(t) e^{-st} \,dt
\end{flalign*}
The Laplace transform reduces to the Fourier transform under the substitution \(s = j\Omega\).

\subsection{Linear Systems}
Recall that superposition holds. Thus the system is characterized completely by the impulse response function \(h(t)\). The output in the time domain \(y(t)\) is given by the convolution product
\[
  y(t) = h(t) * x(t) = \int_\mathbb{R} h(t - t') x(t') \,dt',
\]
and in the frequency domain \(Y(\Omega) = H(\Omega) X(\Omega)\), where \(H(\Omega)\) is the Fourier transform of \(h(t)\).

% Analog signals:
% TODO: FT of eigenfunctions e^{j\Omega_k t\}

\section{Sampling and reconstruction}

To sample a signal \(x(t)\) it means to measure (take) the value at a periodic interval every \(T\) seconds. \(T\) is thus called the \emph{sample interval} and \(f_s =1/T\) is the \emph{sampling frequency}.

\subsection{Sampling theorem}

To represent a signal \(x(t)\) by its samples \(\hat{x}(nT)\) two conditions must be met:
\begin{enumerate}
  \item \(x(t)\) must be \emph{bandlimited}, i.e. there must be a frequency \(f_\text{max}\) after which the spectrum of \(x(t)\) is always zero.
  \item The sampling rate \(f_s\) must be chosen so that
    \[
      f_s \geq 2 f_\text{max}.
    \]
\end{enumerate}
In other words you need at least 2 samples / period to reconstruct a signal.  When \(f_s = 2 f_\text{max}\), the edge case, the sampling rate is called \emph{Nyquist rate}.  The interval \(\left[-f_s / 2, f_2 / 2\right]\), and its multiples are called \emph{Nyquist intervals}, as they are bounded by the Nyquist frequencies. It would be good to have an arbitrarily high sampling frequency but in reality there is upper limit given by processing time \(T_\text{proc}\). Thus \(2f_\text{max} \leq f_s \leq f_\text{proc}\).

\subsection{Discrete-Time Fourier Transform}

Mathematically speaking, to sample a signal is equivalent multiplying a function with the the so called \emph{impulse train distribution} (aka Dirac comb).
\[
  s(t) = \sum_{n = -\infty}^{\infty} \delta(t - nT),
\]
so we write \(\hat{x}(t) = s(t)\cdot x(t)\) to represent a sampled signal. Because of the special propertie of the Dirac delta, The spectrum of a sampled function \(\hat{x}\) is
\[
  \hat{X}(f) = \sum_{n = -\infty}^{\infty} x(nT) e^{-2\pi jfTn}.
\]
This can be thought as a numerical approximation of the real spectrum \(X(f)\) which gets better as \(T \to 0\), i.e.
\[
  X(f) = \lim_{T \to 0} T\hat{X}(f).
\]
If we have a finite number \(L\) of samples to work with, we will repeat them periodically and obtain what is known as the \emph{Discrete-Time Fourier Transform} (DTFT), i.e.
\[
  \hat{X}(f) \approx \hat{X}_L(f) = \sum_{n = 0}^{L -1} x(nT) e^{-2\pi jfTn}.
\]

\subsection{Spectrum replication and aliasing}

Notice that the impulse train is periodic, and has thus a Fourier series, whose coefficients all equal to \(1/T\) (\(= f_s\), the sampling rate). So the Fourier transform of a comb is also a comb. The consequence is that, because the Fourier of the product \(x(t)\cdot s(t)\) in the time domain becomes a convolution \(X(f) * S(f)\) where \(S(f)\) is an impulse train of Dirac deltas spaced \(1/T = f_s\) apart, what is called \emph{spectrum replication} happens, mathematically
\[
  \hat{X}(f)
    = \sum_{n = -\infty}^{\infty} x(nT) e^{2\pi jfTn}
    = \frac{1}{T}\sum_{m = -\infty}^\infty X(f - mf_s).
\]
In other words, the modulation of the property of the Fourier transform copies the baseband spectrum into integer multiples of the sampling frequency. This is why \(f_s \geq 2f_\text{max}\). The important result is that
\[
  X(f) = T \hat{X}(f), \quad 
  \text{for} \quad -\frac{f_2}{2} \leq f \leq \frac{f_s}{2},
\]
and if the sampling theorem is satisfied the exact original spectrum can be recovered with a low pass filter.


% Alias frequency \(f_a = f \pmod{f_s}\).
% Anti-aliasing: analog LP prefilter cutoff \@ \(f_s/2\)

\section{Quantization}


\end{document}