Table of Contents
K Nearest Neighbor
import numpy as np
from collections import Counter
from sklearn import datasets
from sklearn.model_selection import train_test_split
def euclidean_distance(x1, x2):
return np.sqrt(np.sum((x1 - x2)**2))
class KNN:
def __init__(self, k=3):
self.k = k
def fit(self, X, y):
# Does not include any additional training.
self.X_train = X
self.y_train = y
def predict(self, X):
predicted_labels = [self._predict(x) for x in X]
return np.array(predicted_labels) # Convert the list into numpy array
def _predict(self, x):
# Compute the distances
distances = [euclidean_distance(x, x_train) for x_train in self.X_train]
# Get the K nearest neighbors/samples and labels
# This can be done by sorting the distances and getting their indices
k_indices = np.argsort(distances)[:self.k]
#Let us get k nearest labels
k_nearest_labels = [self.y_train[i] for i in k_indices]
# Majority vote, get most common class labels
most_common = Counter(k_nearest_labels).most_common(1)
#This returns a tuple of the most common element and the number of times it repeats as tuple.
# We need the first tuple and the first index to get the most common label.
return most_common[0][0]
iris = datasets.load_iris()
X, y = iris.data, iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 1234)
clf = KNN(k=5)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
print(type(predictions))
acc = np.sum(predictions == y_test)/ len(y_test)
print(acc)
<class 'numpy.ndarray'>
0.9666666666666667
!jupyter nbconvert "ML Algos from Scratch.ipynb" --to markdown --output-dir "G:\My Drive\hugo\learning\content\ml\ml_algos" --output index.md
#!xcopy /s /y "C:\Users\siyer\OneDrive - NetApp Inc\Python\Projects\snowflake\*.jpg" "G:\My Drive\hugo\work_datascience\content\post\sfsql"
[NbConvertApp] Converting notebook ML Algos from Scratch.ipynb to markdown
[NbConvertApp] Writing 994 bytes to G:\My Drive\hugo\learning\content\ml\ml_algos\index.md