This commit is contained in:
Joachim Bach
2025-10-04 11:20:37 +02:00
parent a61e837b26
commit aeb97473ea

View File

@@ -18,7 +18,7 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 37,
"id": "9421a4e1",
"metadata": {},
"outputs": [],
@@ -45,7 +45,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 38,
"id": "ecd4a4cf",
"metadata": {},
"outputs": [],
@@ -63,7 +63,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 39,
"id": "623096a5",
"metadata": {},
"outputs": [],
@@ -81,7 +81,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 40,
"id": "c59a1651",
"metadata": {},
"outputs": [
@@ -210,7 +210,7 @@
"4 3.070920e-08 2.346150e-04 9.748010e-07 1.071610e-06 0.000831 4 "
]
},
"execution_count": 4,
"execution_count": 40,
"metadata": {},
"output_type": "execute_result"
}
@@ -229,7 +229,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 41,
"id": "fd0adce4",
"metadata": {},
"outputs": [],
@@ -256,27 +256,31 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 42,
"id": "3c36b377",
"metadata": {},
"outputs": [],
"source": [
"def bayes_classification(df):\n",
" \"\"\"\n",
" Take classification decisions according to Bayes rule.\n",
" \n",
" Parameters\n",
" ----------\n",
" df : Pandas DataFrame of shape (n_samples, n_features + ground truth)\n",
" Dataset.\n",
" \n",
" Returns\n",
" -------\n",
" preds : Numpy array of shape (n_samples,)\n",
" Class labels for each data sample.\n",
" \"\"\"\n",
" # Your code here\n",
" pass"
" \"\"\"\n",
" Take classification decisions according to Bayes rule.\n",
"\n",
" Parameters\n",
" ----------\n",
" df : Pandas DataFrame of shape (n_samples, n_features + ground truth)\n",
" Dataset.\n",
"\n",
" Returns\n",
" -------\n",
" preds : Numpy array of shape (n_samples,)\n",
" Class labels for each data sample.\n",
" \"\"\"\n",
" y_pred = []\n",
" for i in range(df.shape[0]):\n",
" index = np.argmax(df.iloc[i,:10]) # take all the line except the y value\n",
" y_pred.append(index)\n",
" \n",
" return y_pred\n"
]
},
{
@@ -289,12 +293,29 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 43,
"id": "f3b21bfb",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Error rate = 0.10729999999999995\n"
]
}
],
"source": [
"# Your code here: compute and print the error rate of the system"
"# Your code here: compute and print the error rate of the system\n",
"y_pred_a = bayes_classification(dataset_a)\n",
"\n",
"correct = 0\n",
"for i in range(0, len(y_pred_a)):\n",
" if(dataset_a.iloc[i,10] == y_pred_a[i]):\n",
" correct += 1\n",
"\n",
"success = correct/len(y_pred_a)\n",
"print(f\"Error rate = {1-success}\")"
]
},
{
@@ -307,7 +328,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 44,
"id": "bb106415",
"metadata": {},
"outputs": [],
@@ -330,20 +351,83 @@
" cm : Numpy array of shape (n_classes, n_classes)\n",
" Confusion matrix.\n",
" \"\"\"\n",
" # Your code here\n",
" pass"
" matrix = np.zeros((n_classes, n_classes))\n",
"\n",
" for i in range(0, len(y_pred)):\n",
" matrix[y_true[i], y_pred[i]] += 1 \n",
"\n",
" return matrix"
]
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 45,
"id": "1b38e3a8",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" 0 1 2 3 4 5 6 7 8 9\n",
" 0 | 944 0 11 0 0 2 10 7 5 1\n",
" 1 | 0 1112 2 3 1 4 3 1 9 0\n",
" 2 | 10 6 921 12 15 3 19 15 26 5\n",
"t 3 | 1 1 31 862 2 72 5 14 12 10\n",
"r 4 | 2 3 6 2 910 1 12 6 4 36\n",
"u 5 | 12 3 6 29 19 768 19 9 21 6\n",
"e 6 | 14 3 21 2 22 28 865 0 3 0\n",
" 7 | 0 14 30 9 7 2 1 929 3 33\n",
" 8 | 12 16 18 26 24 46 22 19 772 19\n",
" 9 | 10 4 6 22 53 18 0 48 4 844\n",
" predicted \n"
]
}
],
"source": [
"# Your code here: compute and print the confusion matrix"
"# Your code here: compute and print the confusion matrix\n",
"\n",
"cm_a = confusion_matrix(dataset_a.iloc[:,10], y_pred_a, nb_classes)\n",
"\n",
"#headers\n",
"print(\" \", end=\"\")\n",
"for j in range(nb_classes):\n",
" print(f\"{j:5d}\", end=\"\")\n",
"print()\n",
"\n",
"#rows\n",
"for i in range(nb_classes):\n",
" match i:\n",
" case 3:\n",
" print(\"t\", end=\"\")\n",
" case 4:\n",
" print(\"r\", end=\"\")\n",
" case 5:\n",
" print(\"u\", end=\"\")\n",
" case 6:\n",
" print(\"e\", end=\"\")\n",
" case _:\n",
" print(\" \", end=\"\")\n",
"\n",
" print(f\"{i:3d} |\", end=\"\")\n",
" for j in range(nb_classes):\n",
" print(f\"{int(cm_a[i, j]):5d}\", end=\"\")\n",
"\n",
" print()\n",
"\n",
"\n",
"print(\" predicted \")\n",
"# print(cm.astype(int))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0cf5380f",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"id": "ed8db908",
@@ -354,7 +438,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 46,
"id": "0e229ce0",
"metadata": {},
"outputs": [],
@@ -373,13 +457,24 @@
" precisions : Numpy array of shape (n_classes,)\n",
" Precision per class.\n",
" \"\"\"\n",
" # Your code here\n",
" pass"
" rates = []\n",
" for i in range(cm.shape[1]):\n",
" correct = cm[i,i]\n",
" incorrect = 0\n",
" for j in range(cm.shape[0]):\n",
" if i != j:\n",
" incorrect += cm[j,i]\n",
"\n",
" rates.append(correct/(correct+incorrect))\n",
"\n",
" return rates\n",
" \n",
" "
]
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 47,
"id": "95325772",
"metadata": {},
"outputs": [],
@@ -398,28 +493,96 @@
" recalls : Numpy array of shape (n_classes,)\n",
" Recall per class.\n",
" \"\"\"\n",
" # Your code here\n",
" pass"
" rates = []\n",
" for i in range(cm.shape[0]):\n",
" correct = cm[i,i]\n",
" incorrect = 0\n",
" for j in range(cm.shape[1]):\n",
" if i != j:\n",
" incorrect += cm[i,j]\n",
"\n",
" rates.append(correct/(correct+incorrect))\n",
"\n",
" return rates"
]
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 48,
"id": "a0fb19e3",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Class 0, precision = 0.9393034825870646\n",
"Class 1, precision = 0.9569707401032702\n",
"Class 2, precision = 0.8754752851711026\n",
"Class 3, precision = 0.8914167528438469\n",
"Class 4, precision = 0.8641975308641975\n",
"Class 5, precision = 0.8135593220338984\n",
"Class 6, precision = 0.9048117154811716\n",
"Class 7, precision = 0.8864503816793893\n",
"Class 8, precision = 0.8987194412107101\n",
"Class 9, precision = 0.8846960167714885\n",
"\n",
"Best = class 1, 0.9569707401032702\n",
"Worst = class 5, 0.8135593220338984\n"
]
}
],
"source": [
"# Your code here: find and print the worst and best classes in terms of precision"
"# Your code here: find and print the worst and best classes in terms of precision\n",
"precision_a = precision_per_class(cm_a)\n",
"\n",
"for i in range(len(precision_a)):\n",
" print(f\"Class {i}, precision = {precision_a[i]}\")\n",
"\n",
"print(\"\")\n",
"\n",
"print(f\"Best = class {np.argmax(precision_a)}, {precision_a[np.argmax(precision_a)]}\")\n",
"print(f\"Worst = class {np.argmin(precision_a)}, {precision_a[np.argmin(precision_a)]}\")\n"
]
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 49,
"id": "42c3edd8",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Class 0, recall = 0.963265306122449\n",
"Class 1, recall = 0.9797356828193833\n",
"Class 2, recall = 0.8924418604651163\n",
"Class 3, recall = 0.8534653465346534\n",
"Class 4, recall = 0.9266802443991853\n",
"Class 5, recall = 0.8609865470852018\n",
"Class 6, recall = 0.9029227557411273\n",
"Class 7, recall = 0.9036964980544747\n",
"Class 8, recall = 0.7926078028747433\n",
"Class 9, recall = 0.8364717542120912\n",
"\n",
"Best = class 1, 0.9797356828193833\n",
"Worst = class 8, 0.7926078028747433\n"
]
}
],
"source": [
"# Your code here: find and print the worst and best classes in terms of recall"
"# Your code here: find and print the worst and best classes in terms of recall\n",
"\n",
"recall_a = recall_per_class(cm_a)\n",
"\n",
"for i in range(len(recall_a)):\n",
" print(f\"Class {i}, recall = {recall_a[i]}\")\n",
"\n",
"print(\"\")\n",
"\n",
"print(f\"Best = class {np.argmax(recall_a)}, {recall_a[np.argmax(recall_a)]}\")\n",
"print(f\"Worst = class {np.argmin(recall_a)}, {recall_a[np.argmin(recall_a)]}\")\n"
]
},
{
@@ -432,17 +595,19 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 50,
"id": "b98c2545",
"metadata": {},
"outputs": [],
"source": [
"# Your code here: load the data of the system B"
"# Your code here: load the data of the system B\n",
"path = \"ex2-system-b.csv\"\n",
"dataset_b = pd.read_csv(path, sep=\";\", index_col=False, names=[\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"y_true\"])\n"
]
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": 51,
"id": "050091b9",
"metadata": {},
"outputs": [],
@@ -461,13 +626,18 @@
" accuracy : Float\n",
" Accuracy of the system.\n",
" \"\"\"\n",
" # Your code here\n",
" pass"
"\n",
" diag = 0\n",
" for i in range(cm.shape[0]):\n",
" diag += cm[i,i]\n",
"\n",
" acc = diag / np.sum(cm)\n",
" return acc"
]
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 52,
"id": "adc0f138",
"metadata": {},
"outputs": [],
@@ -486,34 +656,74 @@
" f1_score : Float\n",
" F1 score of the system.\n",
" \"\"\"\n",
" # Your code here\n",
" pass"
"\n",
" f1 = []\n",
" precision = precision_per_class(cm)\n",
" recall = recall_per_class(cm)\n",
"\n",
" for i in range(0, len(precision)):\n",
" f1.append(2*((precision[i] * recall[i])/(precision[i] + recall[i])))\n",
" return np.sum(f1)/len(f1)\n"
]
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 53,
"id": "f1385c87",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"System A accuracy = 0.8927\n",
"System A f1 = 0.8907308492877297\n"
]
}
],
"source": [
"# Your code here: compute and print the accuracy and the F1 score of the system A"
"# Your code here: compute and print the accuracy and the F1 score of the system A\n",
"\n",
"acc_a = system_accuracy(cm_a)\n",
"print(f\"System A accuracy = {acc_a}\")\n",
"\n",
"f1_a = system_f1_score(cm_a)\n",
"\n",
"print(f\"System A f1 = {f1_a}\")"
]
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 54,
"id": "50c64d08",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"System A accuracy = 0.9613\n",
"System A f1 = 0.9608568150389065\n"
]
}
],
"source": [
"# Your code here: compute and print the accuracy and the F1 score of the system B"
"# Your code here: compute and print the accuracy and the F1 score of the system B\n",
"y_pred_b = bayes_classification(dataset_b)\n",
"cm_b = confusion_matrix(dataset_b.iloc[:,10], y_pred_b, nb_classes)\n",
"\n",
"acc_b = system_accuracy(cm_b)\n",
"print(f\"System A accuracy = {acc_b}\")\n",
"\n",
"f1_b = system_f1_score(cm_b)\n",
"\n",
"print(f\"System A f1 = {f1_b}\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": ".venv",
"language": "python",
"name": "python3"
},
@@ -527,7 +737,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.11"
"version": "3.12.3"
}
},
"nbformat": 4,