forked from tesserata/CS-exam-materials
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path[eng] Indepent component analysis. Linear discriminant analysis.tex
104 lines (63 loc) · 2.26 KB
/
[eng] Indepent component analysis. Linear discriminant analysis.tex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
\documentclass[11pt]{article}
\usepackage{fontspec} % loaded by polyglossia, but included here for transparency
\usepackage{polyglossia}
\setmainlanguage{russian}
\setotherlanguage{english}
\newfontfamily{\cyrillicfont}{Times New Roman}
\usepackage{mathtools}
\topmargin -1.5cm
\oddsidemargin -0.04cm
\evensidemargin -0.04cm
\textwidth 16.59cm
\textheight 21.94cm
\begin{document}
\textbf{ICA}
$ X: n \times d \\ $
$ X = UDV^{\top} \\ $
$ U^{\top} \sqrt{n} = S \\ $
$\frac{1}{\sqrt{n}}SV^{\top} = A^{\top} \\$
X = AS \\
$X = ARR^{\top}S \\$
$ X = A^{*}S^{*} \\ $
$\bar{x} = 0, \omega v(x) = I \\$
S - независ.$ \rightarrow \omega v(S) = I \\$
X = AS \\
Задача: \\
$S = A^{\top}X\\$
$H(y) = \int g(y) log(g) dy \\$
$I(y) = \sum_{j=1}^d H(y_i) - H(x) - log(A) \\$
$\min_{y} I(y)$ \\
\textbf{Linear discriminant analysis} \\
1) $k=1, K, \pi_k, F_k(x) \\ $
$ \max_{k} \Pr(C=k|X=x) = \frac{\pi_k f_k(x)}{\sum_{l} \pi_l f_l(x)} \\$
$\mu$ - mean
$\Sigma$ - dispersion
$f_k=\frac{1}{(2\pi)^{\frac{d}{2}} \mid \Sigma_k \mid ^{\frac{1}{2}}} e^{-(x - \mu_k)^{\top} \Sigma_k^{-1}(x- \mu_k)} \\$
$log (\frac{\Pr(C=k|X=x)}{\Pr(C=l|X=x)}) = log({\frac{\pi_k}{\pi_l}}) - \frac{1}{2}(\mu_k + \mu_l)\Sigma^{-1}(\mu_k + \mu_l) + x^{\top}\Sigma^{-1}(\mu_k - \mu_l) \\$
2) $\hat{\pi_k} = \frac{\sum C=k}{\pi} \\ $
$\bar{\mu_k} = \frac{\sum I(C=k)x}{\sum I(C=k)x} \\ $
$\Sigma=\frac{1}{n} \sum_{k} \sum_{i=1}^n I(C=k) \\$
$W = \sum_{k} \sum I(C=k)(x-\mu_k)^{\top}(x-\mu_k) \\
M =
\begin{vmatrix}
\mu_1 \\
\ldots \\
\mu_n
\end{vmatrix}$ \\
$ \bar{M} = \frac{1}{k}\sum \hat{\mu_k} \\$
$B = (M-\bar{M})^{\top}(M-\bar{M}) \\$
$\max_{a} \frac{a^{\top}Ba}{a^{\top}wa}$ - Fisher distribution \\
$(x - \hat{\mu_k})^{\top}(x - \hat{\mu_k})$ \\
\textbf{GSVD} \\
$B = U_B D_B V^{\top} \\ $
$ W = U_w D_w V^{\top} \\$
$a^{\top}V D_B U_B^{\top} U_B D_B V^{\top} a = b^{\top} D_B^{\top}b$ \\
\textbf{Common Spatial Patterns} \\
$X: n \times c \times t ;
y = n \times 1 $ \\
n - experiments; c - channels; t - time \\
K = 2 \\
$P^{(k)} = \frac{1}{n_k} \sum_{l \in (y=k)} X(i, \cdot, \cdot) \\
P \in c \times t $ \\
$max_\omega = \frac{\omega P^{(l)}P^{(1)^{\top}}\omega^{\top}}{\omega(P^{(1)}P^{(1)^{\top}} + P^{(2)}P^{(2)^{\top}})\omega^{\top}} $ \\
\end{document}