diff --git a/notebooks/regression/Decision_tree_regression(Small Dataset).ipynb b/notebooks/regression/Decision_tree_regression(Small Dataset).ipynb index af4c0558ad90b89aa55bac3da87b4ac402574434..46ba9c63d870b9bde145c5b6f62b1e227367bfcb 100644 --- a/notebooks/regression/Decision_tree_regression(Small Dataset).ipynb +++ b/notebooks/regression/Decision_tree_regression(Small Dataset).ipynb @@ -1407,7 +1407,7 @@ "id": "816b4ae7", "metadata": {}, "source": [ - "- With these parameters, the model's performance improved significantly. This means the model now explains over 44% of the variance in productivity, more than twice as much as the default model. The improvements can be attributed to regularization, better choice of splitting criteria (poisson), and smarter structural constraints (e.g., limiting depth and number of leaves). The model is now better at generalizing to unseen data, and errors have decreased both in absolute (MAE) and squared (RMSE) terms." + "- With these parameters, the model's performance improved significantly. This means the model now explains over 44% of the variance in productivity, more than twice as much as the default model. The improvements can be attributed to regularization, better choice of splitting criteria (poisson), and smarter structural constraints (e.g., limiting depth and number of leaves). The model is now better at generalizing to unseen data, and errors have decreased both in absolute (MAE) and squared (MSE) terms." ] }, { @@ -1450,8 +1450,7 @@ "plt.title('Tuned Decision Tree Regressor Metrics')\n", "plt.ylabel('Score')\n", "plt.grid(axis='y', linestyle='--', alpha=0.5)\n", - "plt.tight_layout()\n", - "plt.show()\n" + "plt.tight_layout()" ] }, {