\relax \@writefile{toc}{\contentsline {section}{\numberline {1}Intro to similarity functions}{1}} \@writefile{toc}{\contentsline {subsection}{\numberline {1.1} data similarities and dot product }{1}} \@writefile{toc}{\contentsline {subsection}{\numberline {1.2}Data similarity and kernels}{1}} \@writefile{toc}{\contentsline {section}{\numberline {2}k Nearest-Neighbors}{2}} \@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces kNN classification: for $k$=1, prediction is given by majority(1 point)=red;for $k$=3 the majority is still red; for $k$=5 the majority is blue.}}{2}} \@writefile{toc}{\contentsline {subsection}{\numberline {2.1} can use a kernel as the distance function}{3}} \@writefile{toc}{\contentsline {section}{\numberline {3}kNN visualization of decision boundary}{3}} \@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces 1-NN classification (left), 7-NN classification (middle) and 7-NN regression (right)}}{3}} \@writefile{toc}{\contentsline {section}{\numberline {4}kNN in window/range - not a fix $k$}{3}} \@writefile{toc}{\contentsline {section}{\numberline {5}kNN for feature selection}{3}} \@writefile{toc}{\contentsline {section}{\numberline {6}Kernel density estimation / Parzen Windows}{4}} \@writefile{toc}{\contentsline {section}{\numberline {7}Local classification based on heat equation}{4}} \@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces A graph view of the kernel/similarity. The nodes are the datapoints (exemplified by digits), the edges are similarity/kernel values $k_{ij}$ : missing edges are $k_{ij}=0$. Labeled (training) points have the label in blue text.}}{5}} \bibstyle{abbrv} \bibdata{../../bibtex/ir,../../bibtex/aslam,../../bibtex/query_local,../../bibtex/learning,../../bibtex/other,../../bibtex/math,bibtex/local,bibtex/1mq}