Added the files.

This commit is contained in:
Batuhan Berk Başoğlu 2025-11-13 21:27:33 -05:00
commit 1dc07ec52f
Signed by: batuhan-basoglu
SSH key fingerprint: SHA256:kEsnuHX+qbwhxSAXPUQ4ox535wFHu/hIRaa53FzxRpo
18 changed files with 10203 additions and 0 deletions

1
.gitattributes vendored Normal file
View file

@ -0,0 +1 @@
FashionMNIST/fashion-mnist_train.csv filter=lfs diff=lfs merge=lfs -text

53
.idea/workspace.xml generated Normal file
View file

@ -0,0 +1,53 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="AutoImportSettings">
<option name="autoReloadType" value="SELECTIVE" />
</component>
<component name="ChangeListManager">
<list default="true" id="53d2c8fc-09f6-4596-950a-66eac2662d99" name="Changes" comment="" />
<option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" />
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
<option name="LAST_RESOLUTION" value="IGNORE" />
</component>
<component name="Git.Settings">
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" />
</component>
<component name="ProjectColorInfo"><![CDATA[{
"associatedIndex": 7
}]]></component>
<component name="ProjectId" id="35RSHS8xmtnia7nWZhfabhC6peP" />
<component name="ProjectViewState">
<option name="hideEmptyMiddlePackages" value="true" />
<option name="showLibraryContents" value="true" />
</component>
<component name="PropertiesComponent"><![CDATA[{
"keyToString": {
"ModuleVcsDetector.initialDetectionPerformed": "true",
"Python.Unnamed.executor": "Run",
"Python.multilayer-perceptron.executor": "Run",
"RunOnceActivity.ShowReadmeOnStart": "true",
"RunOnceActivity.TerminalTabsStorage.copyFrom.TerminalArrangementManager.252": "true",
"RunOnceActivity.git.unshallow": "true",
"git-widget-placeholder": "master",
"last_opened_file_path": "/home/arctichawk1/Desktop/Projects/Private/Classification-of-Image-Data-with-MLP-and-CNN"
}
}]]></component>
<component name="SharedIndexes">
<attachedChunks>
<set>
<option value="bundled-python-sdk-4e2b1448bda8-9a97661f3031-com.jetbrains.pycharm.pro.sharedIndexes.bundled-PY-252.27397.106" />
</set>
</attachedChunks>
</component>
<component name="TaskManager">
<task active="true" id="Default" summary="Default task">
<changelist id="53d2c8fc-09f6-4596-950a-66eac2662d99" name="Changes" comment="" />
<created>1763071302191</created>
<option name="number" value="Default" />
<option name="presentableId" value="Default" />
<updated>1763071302191</updated>
</task>
<servers />
</component>
</project>

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:6e94982a04469e61eb7dc493ce1f606f8ded0a0b8bae7cb4809bf8701fd94b7f
size 133047193
1 version https://git-lfs.github.com/spec/v1
2 oid sha256:6e94982a04469e61eb7dc493ce1f606f8ded0a0b8bae7cb4809bf8701fd94b7f
3 size 133047193

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

12
README.md Normal file
View file

@ -0,0 +1,12 @@
# Classification-of-Image-Data-with-MLP-and-CNN
This project implements both a multilayer perceptron and a convolutional neural network in Python;
the perceptron comprises an input layer, one or more hidden layers, and an output layer,
while the convolutional network features two convolutional layers, a fully connected hidden layer
and a fully connected output layer.
# Authors
- Batuhan Berk Başoğlu, 260768350 - batuhan-basoglu
- Jared Tritt, 260763506 - Jaredtritt

133
multilayer-perceptron.py Normal file
View file

@ -0,0 +1,133 @@
import numpy as np
from torchvision import datasets
class MLP:
def __init__(self, input_size, hidden_size1, hidden_size2, output_size, weight_scale):
self.W1 = np.random.randn(input_size, hidden_size1) * weight_scale
self.b1 = np.zeros((1, hidden_size1))
self.W2 = np.random.randn(hidden_size1, hidden_size2) * weight_scale
self.b2 = np.zeros((1, hidden_size2))
self.W3 = np.random.randn(hidden_size2, output_size) * weight_scale
self.b3 = np.zeros((1, output_size))
def forward(self, x):
self.x = x
self.z1 = x @ self.W1 + self.b1
self.a1 = self.relu(self.z1)
self.z2 = self.a1 @ self.W2 + self.b2
self.a2 = self.relu(self.z2)
self.z3 = self.a2 @ self.W3 + self.b3
self.a3 = self.softmax(self.z3)
return self.a3
def backward(self, y, lr):
m = y.shape[0]
y_one_hot = self.one_hot_encode(y, self.W3.shape[1])
dz3 = self.a3 - y_one_hot
dw3 = (self.a2.T @ dz3) / m
db3 = np.sum(dz3, axis=0, keepdims=True) / m
dz2 = (dz3 @ self.W3.T) * self.relu_deriv(self.z2)
dw2 = (self.a1.T @ dz2) / m
db2 = np.sum(dz2, axis=0, keepdims=True) / m
dz1 = (dz2 @ self.W2.T) * self.relu_deriv(self.z1)
dw1 = (self.x.T @ dz1) / m
db1 = np.sum(dz1, axis=0, keepdims=True) / m
self.W3 -= lr * dw3
self.b3 -= lr * db3
self.W2 -= lr * dw2
self.b2 -= lr * db2
self.W1 -= lr * dw1
self.b1 -= lr * db1
@staticmethod
def relu(x):
return np.maximum(0, x)
@staticmethod
def relu_deriv(x):
return (x > 0).astype(float)
@staticmethod
def softmax(x):
e_x = np.exp(x - np.max(x, axis=1, keepdims=True))
return e_x / np.sum(e_x, axis=1, keepdims=True)
@staticmethod
def one_hot_encode(y, num_classes):
return np.eye(num_classes)[y]
@staticmethod
def cross_entropy_loss(y, y_hat):
m = y.shape[0]
eps = 1e-12
y_hat_clipped = np.clip(y_hat, eps, 1. - eps)
log_probs = -np.log(y_hat_clipped[np.arange(m), y])
return np.mean(log_probs)
def train_model(self, x_train, y_train, x_val, y_val, lr, epochs, batch_size):
for epoch in range(1, epochs + 1):
perm = np.random.permutation(x_train.shape[0])
x_train_shuffled, y_train_shuffled = x_train[perm], y_train[perm]
epoch_loss = 0.0
num_batches = int(np.ceil(x_train.shape[0] / batch_size))
for i in range(num_batches):
start = i * batch_size
end = start + batch_size
x_batch = x_train_shuffled[start:end]
y_batch = y_train_shuffled[start:end]
self.forward(x_batch)
self.backward(y_batch, lr)
epoch_loss += self.cross_entropy_loss(y_batch, self.a3)
epoch_loss /= num_batches
val_pred = self.predict(x_val)
val_acc = np.mean(val_pred == y_val)
print(f"Epoch {epoch:02d} | Training Loss: {epoch_loss:.4f} | Value Accuracy: {val_acc:.4f}")
return val_acc
def predict(self, x):
probs = self.forward(x)
return np.argmax(probs, axis=1)
train_set = datasets.FashionMNIST(root='.', train=True, download=True)
test_set = datasets.FashionMNIST(root='.', train=False, download=True)
x_train = train_set.data.numpy().reshape(-1, 28 * 28).astype(np.float32) / 255.0
y_train = train_set.targets.numpy()
x_test = test_set.data.numpy().reshape(-1, 28 * 28).astype(np.float32) / 255.0
y_test = test_set.targets.numpy()
mlp = MLP(
input_size = 28 * 28,
hidden_size1= 128,
hidden_size2= 64,
output_size = 10,
weight_scale= 1e-2
)
mlp.train_model(
x_train = x_train,
y_train = y_train,
x_val = x_test,
y_val = y_test,
lr = 1e-2,
epochs = 10,
batch_size=128
)
test_pred = mlp.predict(x_test)
test_acc = np.mean(test_pred == y_test)
print(f"\nFinal test accuracy: {test_acc:.4f}")