2025-02-15 16:01:05 +01:00

85 lines
3.9 KiB
TeX

\Section[
\eng{Machine-Learning}
\ger{Maschinelles Lernen}
]{ml}
\Subsection[
\eng{Performance metrics}
\ger{Metriken zur Leistungsmessung}
]{performance}
\eng[cp]{correct predictions}
\ger[cp]{richtige Vorhersagen}
\eng[fp]{false predictions}
\ger[fp]{falsche Vorhersagen}
\eng[y]{ground truth}
\eng[yhat]{prediction}
\ger[y]{Wahrheit}
\ger[yhat]{Vorhersage}
\begin{formula}{accuracy}
\desc{Accuracy}{}{}
\desc[german]{Genauigkeit}{}{}
\eq{a = \frac{\tgt{cp}}{\tgt{fp} + \tgt{cp}}}
\end{formula}
\TODO{is $n$ the nuber of predictions or the number of output features?}
\begin{formula}{mean_abs_error}
\desc{Mean absolute error (MAE)}{}{$y$ \gt{y}, $\hat{y}$ \gt{yhat}, $n$ ?}
\desc[german]{Mittlerer absoluter Fehler (MAE)}{}{}
\eq{\text{MAE} = \frac{1}{n} \sum_{i=1}^n \abs{y_i - \hat{y}_i}}
\end{formula}
\begin{formula}{root_mean_square_error}
\desc{Root mean squared error (RMSE)}{}{$y$ \gt{y}, $\hat{y}$ \gt{yhat}, $n$ ?}
\desc[german]{Standardfehler der Regression}{Quadratwurzel des mittleren quadratischen Fehlers (RSME)}{}
\eq{\text{RMSE} = \sqrt{\frac{1}{n} \sum_{i=1}^n \left(y_i - \hat{y}_i\right)^2}}
\end{formula}
\Subsection[
\eng{Regression}
\ger{Regression}
]{reg}
\Subsubsection[
\eng{Linear Regression}
\ger{Lineare Regression}
]{linear}
\begin{formula}{eq}
\desc{Linear regression}{Fits the data under the assumption of \hyperref[f:math:pt:distributions:cont:normal]{normally distributed errors}}{$\mat{x}\in\R^{N\times M}$ input data, $\mat{y}\in\R^{N\times L}$ output data, $\mat{b}$ bias, $\vec{W}$ weights, $N$ samples, $M$ features, $L$ output variables}
\desc[german]{Lineare Regression}{Fitted Daten unter der Annahme \hyperref[f:math:pt:distributions:cont:normal]{normalverteilter Fehler}}{}
\eq{\mat{y} = \mat{b} + \mat{x} \cdot \vec{W}}
\end{formula}
\begin{formula}{design_matrix}
\desc{Design matrix}{Stack column of ones to the feature vector\\Useful when $b$ is scalar}{$x_{ij}$ feature $j$ of sample $i$}
\desc[german]{Designmatrix Ansatz}{}{}
\eq{
\mat{X} = \begin{pmatrix} 1 & x_{11} & \ldots & x_{1M} \\ \vdots & \vdots & \vdots & \vdots \\ 1 & x_{N1} & \ldots & x_{NM} \end{pmatrix}
}
\end{formula}
\begin{formula}{scalar_bias}
\desc{Linear regression with scalar bias}{Using the design matrix, the scalar is absorbed into the weight vector}{$\mat{y}$ output data, $\mat{X}$ \fqEqRef{comp:ml:reg:design_matrix}, $\vec{W}$ weights}
\desc[german]{Lineare Regression mit skalarem Bias}{Durch die Designmatrix wird der Bias in den Gewichtsvektor absorbiert}{}
\eq{\mat{y} = \mat{X} \cdot \vec{W}}
\end{formula}
\begin{formula}{normal_equation}
\desc{Normal equation}{Solves \fqEqRef{comp:ml:reg:linear:scalar_bias}}{$\mat{y}$ output data, $\mat{X}$ \fqEqRef{comp:ml:reg:linear:design_matrix}, $\vec{W}$ weights}
\desc[german]{Normalengleichung}{Löst \fqEqRef{comp:ml:reg:linear:scalar_bias}}{}
\eq{\vec{W} = \left(\mat{X}^\T \mat{X}\right)^{-1} \mat{X}^T \mat{y}}
\end{formula}
\Subsubsection[
\eng{Ridge regression}
\ger{Ridge Regression}
]{ridge}
\TODO{ridge reg, Kernel ridge reg, gaussian process reg}
% \Subsection[
% \eng{Bayesian probability theory}
% % \ger{}
% ]{bayesian}
\Subsection[
\eng{Gradient descent}
\ger{Gradientenverfahren}
]{gd}
\TODO{in lecture 30 CMP}