454 lines
24 KiB
TeX
454 lines
24 KiB
TeX
\input{decls.tex}
|
|
\title{Linear Algebra}
|
|
\begin{document}
|
|
\maketitle
|
|
\section*{Allowable Operations on a Linear System}
|
|
Solutions invariant.
|
|
\begin{itemize}
|
|
\item Multiply an equation by a non-zero scalar
|
|
\item Swap two equations
|
|
\item Add a multiple of one equation to another
|
|
\end{itemize}
|
|
\subsection*{Example}
|
|
\begin{align*}
|
|
& \systeme{
|
|
x - 2y + 2z = 6,
|
|
-x + 3y + 4z = 2,
|
|
2x + y - 2z = -2
|
|
} \\\\
|
|
E_2 & \implies E_2 + E_1 \\
|
|
E_3 & \implies E_3 + E_1 \\
|
|
& \systeme{
|
|
x - 2y + 2z = 6,
|
|
y + 6z = 8,
|
|
5y - 6z = -14
|
|
} \\\\
|
|
E_3 & \implies E_3 - 5E_2 \\
|
|
& \systeme{
|
|
x - 2y + 2z = 6,
|
|
y + 6z = 8,
|
|
z = \frac{3}{2}
|
|
} \\\\
|
|
E_1 & \implies E_1 - 2E_3 \\
|
|
E_2 & \implies E_2 - 6E_3 \\
|
|
& \systeme{
|
|
x - 2y = 3,
|
|
y = -1,
|
|
z = \frac{3}{2}
|
|
} \\\\
|
|
E_1 & \implies E_1 + 2E_2 \\
|
|
& \systeme{
|
|
x = 1,
|
|
y = -1,
|
|
z = \frac{3}{2}
|
|
} \\\\
|
|
\end{align*}
|
|
\section*{As Matrices}
|
|
\begin{align*}
|
|
& \systeme{
|
|
x + 2y = 1,
|
|
2x - y = 3
|
|
}
|
|
\quad=\quad
|
|
\begin{pmatrix}[cc|c]
|
|
1 & 2 & 1 \\
|
|
2 & -1 & 3
|
|
\end{pmatrix} \\
|
|
& \systeme{
|
|
x - y + z = -2,
|
|
2x + 3y + z = 7,
|
|
x - 2y - z = -2
|
|
} \quad=\quad \begin{pmatrix}[ccc|c]
|
|
1 & -1 & 1 & -2 \\
|
|
2 & 3 & 1 & 7 \\
|
|
1 & -2 & -1 & -2
|
|
\end{pmatrix} \\
|
|
\grstep[R_3 - R_1]{R_2 - 2R_1} & \begin{pmatrix}[ccc|c]
|
|
1 & -1 & 1 & -2 \\
|
|
0 & 5 & -1 & 11 \\
|
|
0 & -1 & -2 & 0
|
|
\end{pmatrix} \\
|
|
\grstep{5R_3 + R_2} & \begin{pmatrix}[ccc|c]
|
|
1 & -1 & 1 & -2 \\
|
|
0 & 5 & -1 & 11 \\
|
|
0 & 0 & -11 & 11 \\
|
|
\end{pmatrix} \\
|
|
\grstep{-11^{-1}R_3} & \begin{pmatrix}[ccc|c]
|
|
1 & -1 & 1 & -2 \\
|
|
0 & 5 & -1 & 11 \\
|
|
0 & 0 & 1 & -1
|
|
\end{pmatrix} \\
|
|
\grstep[R_1 - R_3]{R_2 + R_3} & \begin{pmatrix}[ccc|c]
|
|
1 & -1 & 0 & -1 \\
|
|
0 & 5 & 0 & 10 \\
|
|
0 & 0 & 1 & -1
|
|
\end{pmatrix} \\&
|
|
\grstep{5^{-1}R_2} & \begin{pmatrix}[ccc|c]
|
|
1 & -1 & 0 & -1 \\
|
|
0 & 1 & 0 & 2 \\
|
|
0 & 0 & 1 & -1 \\
|
|
\end{pmatrix} \\
|
|
\grstep{R_1 + R_2} & \begin{pmatrix}[ccc|c]
|
|
1 & 0 & 0 & 1 \\
|
|
0 & 1 & 0 & 2 \\
|
|
0 & 0 & 1 & -1
|
|
\end{pmatrix} \\
|
|
= & \quad
|
|
\left\{
|
|
\subalign{
|
|
x & ~= ~1 \\
|
|
y & ~= ~2 \\
|
|
z & ~= ~-1
|
|
}
|
|
\right.
|
|
\end{align*}
|
|
\section*{Row-Echelon Form}
|
|
\begin{description}
|
|
\item[Row-Echelon Form] The leading entry in each row is 1 and is further to the right than the previous row's leading entry,
|
|
all 0 rows are at the end
|
|
\item[Reduced Row-Echelon Form] every other entry in a column containing a leading 1 is 0
|
|
\item[Theorem:] A matrix can be transformed to reduced row-echelon form using a finite number of allowable row operations
|
|
\end{description}
|
|
\subsection*{Example}
|
|
\begin{align*}
|
|
& \systeme{3x_1 + 2x_2 = 1,
|
|
x_1 - x_2 = 4,
|
|
2x_1 + x_2 = 5} = \begin{pmatrix}[cc|c]
|
|
3 & 2 & 1 \\
|
|
1 & -1 & 4 \\
|
|
2 & 1 & 5
|
|
\end{pmatrix} \\
|
|
\grstep{R_1\swap R_2} & \begin{pmatrix}[cc|c]
|
|
1 & -1 & 4 \\
|
|
3 & 2 & 1 \\
|
|
2 & 1 & 5
|
|
\end{pmatrix} \\
|
|
\grstep[R_2 - 3R_1]{R_3 - 2R_1} & \begin{pmatrix}[cc|c]
|
|
1 & -1 & 4 \\
|
|
0 & 5 & -11 \\
|
|
0 & 3 & -3
|
|
\end{pmatrix} \\
|
|
\grstep{5^{-1}R_2} & \begin{pmatrix}[cc|c]
|
|
1 & -1 & 4 \\
|
|
0 & 1 & \frac{-11}{5} \\
|
|
0 & 3 & -3
|
|
\end{pmatrix} \\
|
|
\grstep{R_3 - 2R_2} & \begin{pmatrix}[cc|c]
|
|
1 & -1 & 4 \\
|
|
0 & 1 & \frac{-11}{5} \\
|
|
0 & 0 & \frac{18}{5}
|
|
\end{pmatrix} \\
|
|
= & \systeme{
|
|
x_1 - x_2 = 4,
|
|
x_2 = \frac{-11}{5},
|
|
0x_1 + 0x_2 = \frac{18}{5}
|
|
}
|
|
\end{align*}
|
|
\begin{align*}
|
|
& \begin{pmatrix}[cccc|c]
|
|
1 & -1 & 1 & 1 & 6 \\
|
|
-1 & 1 & -2 & 1 & 3 \\
|
|
2 & 0 & 1 & 4 & 1 \\
|
|
\end{pmatrix} \\
|
|
\grstep[R_2 + R_1]{R_3 - 2R_1} & \begin{pmatrix}[cccc|c]
|
|
1 & -1 & 1 & 1 & 6 \\
|
|
0 & 0 & -1 & 2 & 9 \\
|
|
0 & 2 & -1 & 2 & -11
|
|
\end{pmatrix} \\
|
|
\grstep[R_2\swap R_3]{2^{-1}R_3} & \begin{pmatrix}[cccc|c]
|
|
1 & -1 & 1 & 1 & 6 \\
|
|
0 & 1 & \frac{1}{2} & 1 & \frac{-11}{2} \\
|
|
0 & 0 & -1 & 2 & 9 \\
|
|
\end{pmatrix} \\
|
|
\grstep[R_1 + R_3]{R_2 - 2^{-1}R_3} & \begin{pmatrix}[cccc|c]
|
|
1 & -1 & 0 & 3 & 15 \\
|
|
0 & 1 & 0 & 0 & -10 \\
|
|
0 & 0 & -1 & 2 & 9 \\
|
|
\end{pmatrix} \\
|
|
\grstep[-R_3]{R_1 + R_2} & \begin{pmatrix}[cccc|c]
|
|
1 & 0 & 0 & 3 & 15 \\
|
|
0 & 1 & 0 & 0 & -10 \\
|
|
0 & 0 & 1 & -2 & -9 \\
|
|
\end{pmatrix} \\
|
|
= & \systeme{
|
|
x_1 + 3x_4 = 5,
|
|
x_2 = -10,
|
|
x_3 - 2x_4 = -9
|
|
} \\
|
|
= & \left\{\substack{
|
|
x_1 = 5 - 3t \\
|
|
x_2 = -10 \\
|
|
x_3 = -9 + 2t
|
|
}\right.
|
|
\end{align*}
|
|
\section*{Determinants}
|
|
The determinant of a matrix is defined only for square matrices.
|
|
\[\det{A} \neq 0 \iff \exists \text{ a unique solution to the linear system represented by } A\]
|
|
Let
|
|
\[A = \begin{pmatrix}
|
|
a_{11} & a_{12} & a_{1n} \\
|
|
a_{21} & \ddots & \vdots \\
|
|
a_{31} & \ldots & a_{3n} \\
|
|
\end{pmatrix}
|
|
\]
|
|
\begin{description}
|
|
\item[$i, j$ minor of $A$] an $n$x$n$ matrix constructed by removing the $i^\text{th}$ row and $j^\text{th}$ column of $A$ \\
|
|
Denoted by $A_{ij}$
|
|
\end{description}
|
|
\begin{align*}
|
|
& \det{A} \text{ where } n = 1. = a_{11} \\
|
|
& \det{A} = a_{11}\det{A_{11}} - a_{12}\det{A_{12}} + ... + (-1)^{n+1}a_{1n} \tag{Laplace expansion of the first row} \\
|
|
& \qquad \text{or laplace expansion along other row or column}
|
|
\text{For } n = 2: & \\
|
|
& \det{A} = a_{11}\cdot a_{22} - a_{12}\cdot a_{21}
|
|
\end{align*}
|
|
\begin{description}
|
|
\item[Upper Triangular] lower left triangle is 0 - $d_{ij} = 0 \quad \forall{i > j}$
|
|
\item[Lower Triangular] upper right triangle is 0 - $d_{ij} = 0 \quad \forall{i < j}$
|
|
\item[Diagonal] only values on the diagonal - $d_{ij} = 0 \quad \forall{i \neq j}$ \\
|
|
$\det{A} = \prod^{N}_{i=0}~a_{ij} \forall~\text{ row-echelon }A$
|
|
\end{description}
|
|
\begin{itemize}
|
|
\item Multiplying a row of a square matrix $A$ by $r$ multiplies $\det{A}$ by $r$
|
|
\item Swapping two rows of a square matrix $A$ multiplies $\det{A}$ by $-1$
|
|
\item Adding a multiple of a row does not effect the determinant
|
|
\end{itemize}
|
|
\section*{Transposition}
|
|
\begin{description}
|
|
\item[$A^T$] $a^T_{ij} = a_{ji}~ \forall~i,j$
|
|
\end{description}
|
|
Note: $\det{A} = \det{A^T}~\forall~A$
|
|
\section*{Matrix Multiplication}
|
|
LHS has columns $=$ rows of RHS \\
|
|
It's the cartesian product
|
|
\[A\times B = (a_{i1}b_{j1} + a_{i2}b_{2j} + \ldots + a_{im}b_{mj})_{ij}\]
|
|
\begin{align*}
|
|
\begin{pmatrix}[c|c|c]
|
|
2 & 1 + 1 & 3 + 6 \\
|
|
4(2) & 4 + 1 & 3(4) + 6 \\
|
|
0 & 2 & 2(6) \\
|
|
\end{pmatrix} = \begin{pmatrix}
|
|
2 & 2 & 9 \\
|
|
8 & 5 & 18 \\
|
|
0 & 2 & 12
|
|
\end{pmatrix}
|
|
\end{align*}
|
|
\begin{align*}
|
|
\begin{pmatrix}1 \\ 2 \\ 3 \end{pmatrix}\begin{pmatrix}1 & 2 & 3 & 4\end{pmatrix} + \begin{pmatrix}
|
|
1 & 2 & 3 & 4 \\
|
|
5 & 6 & 7 & 8 \\
|
|
9 & 10 & 11 & 12 \\
|
|
\end{pmatrix}
|
|
\end{align*}
|
|
|
|
\[A\vec{x} = \vec{b}\]
|
|
where $A$ is the coefficient matrix, $\vec{x}$ is the variables, and $\vec{b}$ is the values of the equations of a linear equation system.
|
|
\subsection*{Inverse Matrices}
|
|
The identity matrix exists as $I_n$ for size $n$.
|
|
\[AA^{-1} = I_n = A^{-1}A \quad \forall~\text{matrices }A \text{ of size } n\]
|
|
Assume that $A$ has two distinct inverses, $B$ and $C$.
|
|
\begin{align*}
|
|
& \text{matrix multiplication is associative} \\
|
|
\therefore~ & C(AB) = (CA)B \\
|
|
\therefore~ & C I_n = I_n B \\
|
|
\therefore~ & C = B \\
|
|
& \text{
|
|
As $B = C$, while $B$ and $C$ are assumed to be distinct, matrices have no more than one unique inverse by contradiction
|
|
}
|
|
\end{align*}
|
|
Matrices are invertible $\iff \det{A} \neq 0$
|
|
\[\det{AB} = \det{A}\det{B}\]
|
|
\[\therefore~ \det{A}\det{A^{-1}} = \det{I_n} = 1\]
|
|
\[\therefore~ \det{A} \neq 0 \]
|
|
\begin{align*}
|
|
\begin{pmatrix} a & b \\ c & d \end{pmatrix}^{-1} = \frac{1}{ad - bc}\begin{pmatrix} d & -b \\ -c & a \end{pmatrix}
|
|
\end{align*}
|
|
\subsubsection*{Computation thereof}
|
|
\[\det{A} = \sum_{k = 1}^{n}~a_{ik}(-1)^{i+j}\det{A_{ij}} \quad \text{ for any $i$}\]
|
|
\begin{description}
|
|
\item[Matrix of Cofactors: $C$] determinants of minors \& signs of laplace expansion \\
|
|
ie. $\sum A \odot C = \det{A}$
|
|
\item[$\adj{A}$ Adjucate of $A$ =] $C^T$
|
|
\end{description}
|
|
\begin{align*}
|
|
A & = \begin{pmatrix}
|
|
1 & 0 & 1 \\
|
|
-1 & 1 & 2 \\
|
|
2 & 0 & 1
|
|
\end{pmatrix} \\
|
|
C(A) & = \begin{pmatrix}
|
|
1 & 5 & -2 \\
|
|
0 & -1 & 0 \\
|
|
-1 & -3 & 1 \\
|
|
\end{pmatrix}
|
|
\end{align*}
|
|
$$ A^{-1} = \frac{\adj{A}}{\det{A}} $$
|
|
Gaussian elimination can also be used: augmented matrix with $I_n$ on the right,
|
|
reduce to reduced row-echelon. If the left is of the form $I_n$, the right is
|
|
the inverse. If there is a zero row, $\det{A} = 0$, and the $A$ has no inverse.
|
|
\section*{Linear Transformations}
|
|
\begin{align*}
|
|
f: & ~ \R^n \to \R^m \\
|
|
f & (x_1, \cdots, x_n) = (f_1(x_1, \cdots, x_n), f_2(x_1, \cdots, x_n), \cdots, f_m(x_1, \cdots, x_n))
|
|
\end{align*}
|
|
$f$ is a linear transformation if \(\forall i.~f_i(x_1, \cdots, x_n)\) is a
|
|
linear polynomial in $x_1, \cdots, x_n$ with a zero constant term
|
|
\begin{align*}
|
|
f(x_1,~ x_2) & = (x_1 + x_2,~ 3x_1 - x_2,~ 10x_2) \tag{is a linear transformation} \\
|
|
g(x_1,~ x_2,~ x_3) & = (x_1 x_2,~ x_3^2) \tag{not a linear transformation} \\
|
|
h(x_1,~ x_2) & = (3x_1 + 4,~ 2x_2 - 4) \tag{not a linear transformation} \\
|
|
\end{align*}
|
|
\[f: \R^n \to \R^m = \vec{x} \to A\vec{x} \]
|
|
\[\exists \text{ a matrix $A$ of dimension $n$x$m$ } \forall\text{ linear transforms } f \]
|
|
\[\forall \text{ matrices $A$ of dimension $n$x$m$ } \exists \text{ a linear transform $f$ of dimension $n$x$m$ such that } f(\vec{x}) = A\vec{x} \]
|
|
Function composition of linear translations is is just matrix multiplication:
|
|
\begin{align*}
|
|
f(\vec{x}) & = A\vec{x} \\
|
|
g(\vec{y}) & = B\vec{y} \\
|
|
(f\cdot g)(\vec{x}) & = g(f(\vec{x})) = BA\vec{x}
|
|
\end{align*}
|
|
A function \(f: \R^n \to \R^m\) is a linear transformation iff:
|
|
\begin{enumerate}
|
|
\item $f(\vec{x} + \vec{y}) = f(\vec{x}) + f(\vec{y}) \quad \forall~\vec{x},~\vec{y} \in \R^n $
|
|
\item $f(r\vec{x}) = r\cdot f(\vec{x}) \quad \forall~\vec{x} \in \R^n, r \in \R $
|
|
\end{enumerate}
|
|
\subsection*{Building the matrix of a linear transform}
|
|
\[ f(\vec{x}) = f(x_1\vec{e}_1 + x_2\vec{e}_2) = f(x_1\vec{e}_1) + f(x_2\vec{e}_2) = x_1f(\vec{e}_1) + x_2f(\vec{e}_2) \]
|
|
\[ A = \begin{pmatrix} f(\vec{e}_1) & f(\vec{e}_2) \end{pmatrix} \]
|
|
\begin{align*}
|
|
& \vec{e}_1 = \begin{pmatrix} 1 \\ 0 \end{pmatrix}
|
|
\\ & \vec{e}_2 = \begin{pmatrix} 0 \\ 1 \end{pmatrix}
|
|
\\ & \vdots
|
|
\\ & \forall \vec{x}.~ \vec{x} = \sum_{i}^{n}~\vec{e}_i x_i
|
|
\end{align*}
|
|
\subsection*{Composition}
|
|
\[ \paren{f \cdot g}\paren{\vec{x}} = f(g(\vec{x})) = AB\vec{x} \]
|
|
where: $f(\vec{x}) = A\vec{x}$, $g(\vec{x}) = B\vec{x}$
|
|
\subsection*{Geometry}
|
|
\begin{description}
|
|
\item[rotation of $x$ by $\theta$ anticlockwise] \( = R_\theta = \begin{pmatrix} \cos{\theta} & -\sin{\theta} \\ \sin{\theta} & \cos{\theta} \end{pmatrix} \)
|
|
\item[reflection about a line at angle $\alpha$ from the $x$-axis] \( = T_\alpha = R_{\alpha}T_0R_{-\alpha}\) where \( T_0 = \begin{pmatrix} 1 & 0 \\ 0 & -1 \end{pmatrix} \)
|
|
\item[scaling by $\lambda \in \R$] \( = S_\lambda = \lambda I_n\)
|
|
\item[Skew by $\alpha$ in $x$ and $\gamma$ in $y$] \( \begin{pmatrix} \alpha & 0 \\ 0 & \gamma \end{pmatrix}\)
|
|
\end{description}
|
|
The image of the unit square under the linear transform $A$ is a parallelogram of $(0, 0)$, $(a_{11}, a_{21})$, $(a_{12}, a_{22})$, $(a_{11} + a_{12}, a_{21} + a_{22})$, with area $ \abs{\det{A}} $
|
|
\subsection*{Inversion}
|
|
Inversion of a linear transformation is equivalent to inversion of its representative matrix
|
|
\subsection*{Eigen\{values, vectors\}}
|
|
\[ \begin{pmatrix} a & 0 \\ 0 & b \end{pmatrix}\begin{pmatrix} 1 \\ 0 \end{pmatrix} = \begin{pmatrix} a \\ 0 \end{pmatrix} = a\vec{e}_1\]
|
|
\[ \begin{pmatrix} a & 0 \\ 0 & b \end{pmatrix}\begin{pmatrix} 0 \\ 1 \end{pmatrix} = \begin{pmatrix} 0 \\ b \end{pmatrix} = b\vec{e}_2\]
|
|
\[ T_\alpha \vec{x} = \vec{x} \text{ for $\vec{x}$ along the line of transformation }\]
|
|
\begin{description}
|
|
\item[Eigenvector (of some transformation $f$)] A non-zero vector $\vec{x}$ such that $f(\vec{x}) = \lambda\vec{x}$ for some value $\lambda$
|
|
\item[Eigenvalue] $\lambda$ as above
|
|
\end{description}
|
|
\[ \forall \text{ eigenvectors of $A$ } \vec{x}, c \in R, \neq 0 .~ c\vec{x} \text{ is an eigenvector with eigenvalue } \lambda\]
|
|
|
|
\[ \forall A: \text{$n$x$n$ matrix}.\quad P_A\paren{\lambda} = \det{\paren{A - \lambda I_n}} \tag{characteristic polynomial in $\lambda$}\]
|
|
Eigenvalues of $A$ are the solutions of $P_A\paren{\lambda} = 0$
|
|
\begin{align*}
|
|
& A\vec{x} = \lambda\vec{x} & x \neq 0 \\
|
|
\iff & A\vec{x} - \lambda\vec{x} = 0 \\
|
|
\iff & (A - \lambda I_n)\vec{x} = 0 \\
|
|
\iff & \det{\paren{A - \lambda I_n}} = 0 \\
|
|
& \quad \text{ or $\paren{A - \lambda I_n}$ is invertible and $x = 0$ }
|
|
\end{align*}
|
|
\[ P_{R\theta}(\lambda) \text{ has roots } \frac{2\cos{\theta} \pm \sqrt{-4\lambda^2\sin^2{\theta}}}{2}\]
|
|
\[ R_\theta \text{ has eigenvalues }\iff \sin{\theta} = 0 \]
|
|
\subsubsection*{Example}
|
|
\begin{align*}
|
|
A & = \begin{pmatrix} 4 & 0 & 1 \\ -2 & 1 & 0 \\ -2 & 0 & 1 \end{pmatrix} \\
|
|
P_A(\lambda) & = \det{A - \lambda I_3} = \det{\begin{pmatrix} 4 - \lambda & 0 & 1 \\ -2 & 1 - \lambda & 0 \\ -2 & 0 & 1 - \lambda \end{pmatrix}} = (1 - \lambda)\det{\begin{pmatrix}4 - \lambda & 1 \\ -2 & 1 - \lambda \end{pmatrix}} \\
|
|
& = (1 - \lambda)\paren{(4 - \lambda)(1 - \lambda) + 2} = (1 - \lambda)(\lambda^2 - 5\lambda + 6) \\
|
|
& = (1 - \lambda)(2 - \lambda)(3 - \lambda) \\
|
|
\lambda & = 1, 2, 3 \\
|
|
A\vec{x} & = \lambda\vec{x}~\forall. \text{ eigenvectors } \vec{x} \\
|
|
(A - \lambda I_n)\vec{x} & = 0 \\
|
|
& \begin{pmatrix} 3 & 0 & 1 \\ -2 & 0 & 0 \\ -2 & 0 & 0 \end{pmatrix}\vec{x} = \begin{pmatrix} 0 \\ 0 \\ 0 \end{pmatrix} \\
|
|
& \text{ eigenvectors with eigenvalue 1 are } s\vec{e}_2~~\forall~s \in \R, \neq 0 \\
|
|
(A - 2I_n)\vec{x} & = 0 \\
|
|
& \begin{pmatrix} 2 & 0 & 1 \\ -2 & -1 & 0 \\ -2 & 0 & -1 \end{pmatrix}\vec{x} = \begin{pmatrix} 0 \\ 0 \\ 0\end{pmatrix} \\
|
|
& \systeme{
|
|
2x_1 + x_3 = 0,
|
|
-2x_1 - x_2 = 0,
|
|
-2x_1 -x_3 = 0
|
|
}: s\begin{pmatrix} 1 \\ -2 \\ -2 \end{pmatrix} \\ \\
|
|
B & = \begin{pmatrix} 5 & 3 & 3 \\ -3 & -1 & -3 \\ -1 & -3 & -1 \end{pmatrix}
|
|
\end{align*}
|
|
Repeated roots of the characteristic polynomial lead to multiple variables.
|
|
\[ \forall \text{ matrices } A.~A \text{ is invertible } \iff 0 \text{ is not an eigenvalue }\]
|
|
\begin{align*}
|
|
\text{If $0$ is an eigenvalue, } P_A(0) = 0 \therefore \det{\paren{A - 0I_n}} = 0
|
|
\end{align*}
|
|
\[ P_A(\lambda) = \det{\paren{(-I_n)(\lambda I_n - A)}} = \det{-I_n}\det{\lambda I_n - A}\]
|
|
\[ = (-1)^n \lambda^n + c_{n-1}\lambda^{n-1} ... \]
|
|
\begin{description}
|
|
\item[Trace] The sum of the diagonal of a matrix
|
|
\[ c_{n - 1} = (-1)^{n+1}\operatorname{tr}A \]
|
|
\item[Cayley-Hamilton Theorem:] \( P_A(A) = 0_n \)
|
|
\end{description}
|
|
\subsubsection*{Example}
|
|
\begin{align*}
|
|
A & = \begin{pmatrix} 1 & 4 \\ 3 & 2 \end{pmatrix} \\
|
|
P_A(\lambda) & = (1 - \lambda)(2 - \lambda) - 12 \\
|
|
& = 2 - 3\lambda + \lambda^2 - 12 \\
|
|
& = \lambda^2 - 3\lambda - 10 \\
|
|
P_A & (5 \text{ or } -2) = 0 \\
|
|
P_A(A) & = 0 \\
|
|
A^2 & = 3A - 10I_2 \\
|
|
A^3 & = (3A - 10I_2)A \\
|
|
A^{n + 2} & = 3A^{n+1} + 10A^n
|
|
\end{align*}
|
|
\subsection*{Diagonalization}
|
|
\begin{description}
|
|
\item[Similarity of matrices] $A$ and $B$ are \emph{similar} iff there exists an invertible matrix $P$ such that $B = P^{-1}AP$
|
|
\[ T_\alpha \text{ is similar to } T_0 (P = R_{-\alpha}) \]
|
|
\end{description}
|
|
|
|
For similar $A$, $B$:
|
|
\begin{itemize}
|
|
\item $\det{A}$ = $\det{B}$
|
|
\item $P_A(\lambda) = P_B(\lambda)$
|
|
\[\det{\paren{A - \lambda I_n}} = \det{\paren{PBP^{-1} - \lambda PP^{-1}}} = \det{\paren{P(B - \lambda I_n)P^{-1}}}\]
|
|
\[ (A - \lambda I_n) \text{ and } (B - \lambda I_n) \text{ are similar }\]
|
|
\item eigenvalues are the same
|
|
\item trace is the same
|
|
\end{itemize}
|
|
|
|
\begin{description}
|
|
\item[Diagonalizable Matrix] a square matrix that is similar to a diagonal matrix
|
|
\[ P^{-1}AP = D \]
|
|
$P$ diagonalizes $A$ \quad
|
|
(P is not necessarily unique)
|
|
\end{description}
|
|
An $n$x$n$ matrix $A$ is dagonalizable iff there exists a matrix $P = (\vec{x}_1, \vec{x}_2, ...)$, where $\vec{x}_i$ is an eigenvector of $A$
|
|
\[ P^{-1}AP = \begin{pmatrix} \lambda_1 & 0 & \cdots & \cdots \\ 0 & \lambda_2 & 0 & \cdots \\ \vdots & \vdots & \ddots & \cdots \end{pmatrix} \]
|
|
i.e. it's diagonal, with the $ii^\text{th}$ element equal to the eigenvalue corresponding to $\vec{x}_i$
|
|
\subsubsection*{Example}
|
|
\begin{align*}
|
|
A & = \begin{pmatrix} 4 & 0 & 1 \\ -2 & 1 & 0 \\ -2 & 0 & 1 \end{pmatrix} \\
|
|
\lambda_1 & = \begin{pmatrix} 0 \\ s \\ 0 \end{pmatrix} \\ \lambda_2 & = \begin{pmatrix} t \\ -2t \\ -2t \end{pmatrix} \\ \lambda_3 & = \begin{pmatrix} u \\ -u \\ -u \end{pmatrix} \\
|
|
P & = \begin{pmatrix}
|
|
0 & 1 & 1 \\
|
|
1 & -2 & -1 \\
|
|
0 & -2 & -1
|
|
\end{pmatrix} \\
|
|
P^{-1} & = \begin{pmatrix}
|
|
0 & 1 & -1 \\ -1 & 0 & -1 \\ 2 & 0 & 1
|
|
\end{pmatrix} \\
|
|
P^{-1}AP & = \begin{pmatrix}
|
|
1 & 0 & 0 \\ 0 & 2 & 0 \\ 0 & 0 & 3
|
|
\end{pmatrix}
|
|
\end{align*}
|
|
\subsection*{Linear Independence}
|
|
For any set of vectors $V = { v_i }~\forall_{i<n}$ in $R^n$, $v_i$ are linearly independent if
|
|
\[ \sum k_i v_i = 0 \implies \forall i.~ k_i = 0 \quad (k_i \in \R) \]
|
|
This implies that no vector in $V$ can be written as a sum of scalar multiples of the others. \\
|
|
A square matrix is invertible iff its columns are linearly independent in $R^n$
|
|
\\ An $n$x$n$ matrix $A$ is diagonalizable iff it admits $n$ linearly independent eigenvectors \\
|
|
For $\vec{x}_i$ eigenvectors of $A$ corresponding to distinct eigenvalues $\lambda_i$, $\{ \vec{x}_i \}$ is linearly independent. \\
|
|
Note: the inverse is \emph{not} true: \( \exists \text{ matrices } $A$\) with repeated eigenvalues and linearly independent eigenvectors.
|
|
\[ \forall k \in \N, A = PDP^{-1}.~A^k = (PDP^{-1})^k = PD^kP^{-1} \]
|
|
Exponentiation of a diagonal matrix is elementwise
|
|
\end{document}
|