diff --git a/cnnBase.ipynb b/cnnBase.ipynb index d46ccde1e2381736dc934292c623447f2fad8c17..a2340de81fc6b36473619a4e61c26bdf25eed2c3 100644 --- a/cnnBase.ipynb +++ b/cnnBase.ipynb @@ -456,133 +456,20 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Validation loss decreased (inf --> 2.069341). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.069341246117937, Validation Acc: 0.326, Learning Rate: 0.001\n", - "Finished Training\n", - "Validation loss decreased (inf --> 2.081061). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.081061193283568, Validation Acc: 0.3, Learning Rate: 0.001\n", - "Finished Training\n", - "Validation loss decreased (inf --> 2.077981). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.077980510731961, Validation Acc: 0.30933333333333335, Learning Rate: 0.001\n", - "Finished Training\n", - "Validation loss decreased (inf --> 2.057297). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.057297257666892, Validation Acc: 0.29133333333333333, Learning Rate: 0.001\n", - "Finished Training\n", - "Validation loss decreased (inf --> 2.058545). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.05854465352728, Validation Acc: 0.30266666666666664, Learning Rate: 0.001\n", - "Finished Training\n", - "Validation loss decreased (inf --> 2.058182). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.05818188444097, Validation Acc: 0.2866666666666667, Learning Rate: 0.001\n", - "Finished Training\n", - "Validation loss decreased (inf --> 2.077766). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.07776573110134, Validation Acc: 0.298, Learning Rate: 0.001\n", - "Finished Training\n", - "Validation loss decreased (inf --> 2.057639). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.0576388835906982, Validation Acc: 0.29533333333333334, Learning Rate: 0.001\n", - "Finished Training\n", - "Validation loss decreased (inf --> 2.081729). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.0817288033505705, Validation Acc: 0.30666666666666664, Learning Rate: 0.001\n", - "Finished Training\n", - "Validation loss decreased (inf --> 2.092310). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.0923096575635545, Validation Acc: 0.31733333333333336, Learning Rate: 0.001\n", - "Finished Training\n", - "Generation 1/50\n", - "Validation loss decreased (inf --> 2.078669). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.0786693324433996, Validation Acc: 0.332, Learning Rate: 0.001\n", - "Finished Training\n", - "Validation loss decreased (inf --> 2.064316). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.064316229617342, Validation Acc: 0.2926666666666667, Learning Rate: 0.001\n", - "Finished Training\n", - "Validation loss decreased (inf --> 2.081282). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.081282280861063, Validation Acc: 0.312, Learning Rate: 0.001\n", - "Finished Training\n", - "Validation loss decreased (inf --> 2.080946). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.0809461157372655, Validation Acc: 0.30466666666666664, Learning Rate: 0.001\n", - "Finished Training\n", - "Validation loss decreased (inf --> 2.072785). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.072785425693431, Validation Acc: 0.29733333333333334, Learning Rate: 0.001\n", - "Finished Training\n", - "Validation loss decreased (inf --> 2.085408). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.085408312209109, Validation Acc: 0.2813333333333333, Learning Rate: 0.001\n", - "Finished Training\n", - "Validation loss decreased (inf --> 2.073224). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.0732235249052655, Validation Acc: 0.288, Learning Rate: 0.001\n", - "Finished Training\n", - "Validation loss decreased (inf --> 2.054482). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.054482099857736, Validation Acc: 0.32066666666666666, Learning Rate: 0.001\n", - "Finished Training\n", - "Validation loss decreased (inf --> 2.090219). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.0902189295342626, Validation Acc: 0.29533333333333334, Learning Rate: 0.001\n", - "Finished Training\n", - "Validation loss decreased (inf --> 2.097155). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.0971552590106395, Validation Acc: 0.312, Learning Rate: 0.001\n", - "Finished Training\n", - "Generation 2/50\n", - "Validation loss decreased (inf --> 2.092001). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.0920009359400322, Validation Acc: 0.2926666666666667, Learning Rate: 0.001\n", - "Finished Training\n", - "Validation loss decreased (inf --> 2.073072). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.073072228025883, Validation Acc: 0.28933333333333333, Learning Rate: 0.001\n", - "Finished Training\n", - "Validation loss decreased (inf --> 2.114132). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.114131823499152, Validation Acc: 0.264, Learning Rate: 0.001\n", - "Finished Training\n", - "Validation loss decreased (inf --> 2.084767). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.0847667886855756, Validation Acc: 0.33666666666666667, Learning Rate: 0.001\n", - "Finished Training\n", - "Validation loss decreased (inf --> 2.077225). Saving model ...\n", - "Epoch 1/ 1, Validation Loss: 2.077225426410107, Validation Acc: 0.30533333333333335, Learning Rate: 0.001\n", - "Finished Training\n" - ] - }, - { - "ename": "KeyboardInterrupt", - "evalue": "", - "output_type": "error", - "traceback": [ - "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[1;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", - "\u001b[1;32mc:\\Users\\Main\\Documents\\com3013-comp-intelligence\\cnnBase.ipynb Cell 11\u001b[0m line \u001b[0;36m1\n\u001b[0;32m <a href='vscode-notebook-cell:/c%3A/Users/Main/Documents/com3013-comp-intelligence/cnnBase.ipynb#X13sZmlsZQ%3D%3D?line=135'>136</a>\u001b[0m \u001b[39m# Print the best individual (weights)\u001b[39;00m\n\u001b[0;32m <a href='vscode-notebook-cell:/c%3A/Users/Main/Documents/com3013-comp-intelligence/cnnBase.ipynb#X13sZmlsZQ%3D%3D?line=136'>137</a>\u001b[0m \u001b[39mprint\u001b[39m(\u001b[39m\"\u001b[39m\u001b[39mBest Individual (Weights):\u001b[39m\u001b[39m\"\u001b[39m, best_ind)\n\u001b[1;32m--> <a href='vscode-notebook-cell:/c%3A/Users/Main/Documents/com3013-comp-intelligence/cnnBase.ipynb#X13sZmlsZQ%3D%3D?line=138'>139</a>\u001b[0m DEAPStuff()\n", - "\u001b[1;32mc:\\Users\\Main\\Documents\\com3013-comp-intelligence\\cnnBase.ipynb Cell 11\u001b[0m line \u001b[0;36m1\n\u001b[0;32m <a href='vscode-notebook-cell:/c%3A/Users/Main/Documents/com3013-comp-intelligence/cnnBase.ipynb#X13sZmlsZQ%3D%3D?line=120'>121</a>\u001b[0m \u001b[39mdel\u001b[39;00m mutant\u001b[39m.\u001b[39mfitness\u001b[39m.\u001b[39mvalues\n\u001b[0;32m <a href='vscode-notebook-cell:/c%3A/Users/Main/Documents/com3013-comp-intelligence/cnnBase.ipynb#X13sZmlsZQ%3D%3D?line=122'>123</a>\u001b[0m \u001b[39m# Evaluate the offspring\u001b[39;00m\n\u001b[1;32m--> <a href='vscode-notebook-cell:/c%3A/Users/Main/Documents/com3013-comp-intelligence/cnnBase.ipynb#X13sZmlsZQ%3D%3D?line=123'>124</a>\u001b[0m fitnesses \u001b[39m=\u001b[39m \u001b[39mlist\u001b[39;49m(\u001b[39mmap\u001b[39;49m(toolbox\u001b[39m.\u001b[39;49mevaluate, offspring))\n\u001b[0;32m <a href='vscode-notebook-cell:/c%3A/Users/Main/Documents/com3013-comp-intelligence/cnnBase.ipynb#X13sZmlsZQ%3D%3D?line=124'>125</a>\u001b[0m \u001b[39mfor\u001b[39;00m ind, fit \u001b[39min\u001b[39;00m \u001b[39mzip\u001b[39m(offspring, fitnesses):\n\u001b[0;32m <a href='vscode-notebook-cell:/c%3A/Users/Main/Documents/com3013-comp-intelligence/cnnBase.ipynb#X13sZmlsZQ%3D%3D?line=125'>126</a>\u001b[0m ind\u001b[39m.\u001b[39mfitness\u001b[39m.\u001b[39mvalues \u001b[39m=\u001b[39m fit\n", - "\u001b[1;32mc:\\Users\\Main\\Documents\\com3013-comp-intelligence\\cnnBase.ipynb Cell 11\u001b[0m line \u001b[0;36m7\n\u001b[0;32m <a href='vscode-notebook-cell:/c%3A/Users/Main/Documents/com3013-comp-intelligence/cnnBase.ipynb#X13sZmlsZQ%3D%3D?line=66'>67</a>\u001b[0m scheduler \u001b[39m=\u001b[39m optim\u001b[39m.\u001b[39mlr_scheduler\u001b[39m.\u001b[39mReduceLROnPlateau(optimizer, mode\u001b[39m=\u001b[39m\u001b[39m'\u001b[39m\u001b[39mmin\u001b[39m\u001b[39m'\u001b[39m, patience\u001b[39m=\u001b[39m\u001b[39m2\u001b[39m, factor\u001b[39m=\u001b[39m\u001b[39m0.1\u001b[39m, verbose\u001b[39m=\u001b[39m\u001b[39mTrue\u001b[39;00m)\n\u001b[0;32m <a href='vscode-notebook-cell:/c%3A/Users/Main/Documents/com3013-comp-intelligence/cnnBase.ipynb#X13sZmlsZQ%3D%3D?line=68'>69</a>\u001b[0m \u001b[39m# Train the model and return the validation loss\u001b[39;00m\n\u001b[1;32m---> <a href='vscode-notebook-cell:/c%3A/Users/Main/Documents/com3013-comp-intelligence/cnnBase.ipynb#X13sZmlsZQ%3D%3D?line=69'>70</a>\u001b[0m fitness \u001b[39m=\u001b[39m train_model_for_fitness(eval_model, train_loader, val_loader, criterion, optimizer, scheduler, num_epochs\u001b[39m=\u001b[39;49m\u001b[39m1\u001b[39;49m)\n\u001b[0;32m <a href='vscode-notebook-cell:/c%3A/Users/Main/Documents/com3013-comp-intelligence/cnnBase.ipynb#X13sZmlsZQ%3D%3D?line=71'>72</a>\u001b[0m \u001b[39mreturn\u001b[39;00m (fitness,)\n", - "\u001b[1;32mc:\\Users\\Main\\Documents\\com3013-comp-intelligence\\cnnBase.ipynb Cell 11\u001b[0m line \u001b[0;36m1\n\u001b[0;32m <a href='vscode-notebook-cell:/c%3A/Users/Main/Documents/com3013-comp-intelligence/cnnBase.ipynb#X13sZmlsZQ%3D%3D?line=9'>10</a>\u001b[0m model\u001b[39m.\u001b[39mtrain()\n\u001b[0;32m <a href='vscode-notebook-cell:/c%3A/Users/Main/Documents/com3013-comp-intelligence/cnnBase.ipynb#X13sZmlsZQ%3D%3D?line=10'>11</a>\u001b[0m running_loss \u001b[39m=\u001b[39m \u001b[39m0.0\u001b[39m\n\u001b[1;32m---> <a href='vscode-notebook-cell:/c%3A/Users/Main/Documents/com3013-comp-intelligence/cnnBase.ipynb#X13sZmlsZQ%3D%3D?line=12'>13</a>\u001b[0m \u001b[39mfor\u001b[39;00m inputs, labels \u001b[39min\u001b[39;00m (train_loader):\n\u001b[0;32m <a href='vscode-notebook-cell:/c%3A/Users/Main/Documents/com3013-comp-intelligence/cnnBase.ipynb#X13sZmlsZQ%3D%3D?line=13'>14</a>\u001b[0m inputs, labels \u001b[39m=\u001b[39m inputs\u001b[39m.\u001b[39mto(device), labels\u001b[39m.\u001b[39mto(device)\n\u001b[0;32m <a href='vscode-notebook-cell:/c%3A/Users/Main/Documents/com3013-comp-intelligence/cnnBase.ipynb#X13sZmlsZQ%3D%3D?line=14'>15</a>\u001b[0m optimizer\u001b[39m.\u001b[39mzero_grad()\n", - "File \u001b[1;32mc:\\Users\\Main\\anaconda3\\envs\\PyTorch\\Lib\\site-packages\\torch\\utils\\data\\dataloader.py:630\u001b[0m, in \u001b[0;36m_BaseDataLoaderIter.__next__\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 627\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_sampler_iter \u001b[39mis\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[0;32m 628\u001b[0m \u001b[39m# TODO(https://github.com/pytorch/pytorch/issues/76750)\u001b[39;00m\n\u001b[0;32m 629\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_reset() \u001b[39m# type: ignore[call-arg]\u001b[39;00m\n\u001b[1;32m--> 630\u001b[0m data \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_next_data()\n\u001b[0;32m 631\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_num_yielded \u001b[39m+\u001b[39m\u001b[39m=\u001b[39m \u001b[39m1\u001b[39m\n\u001b[0;32m 632\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_dataset_kind \u001b[39m==\u001b[39m _DatasetKind\u001b[39m.\u001b[39mIterable \u001b[39mand\u001b[39;00m \\\n\u001b[0;32m 633\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_IterableDataset_len_called \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m \u001b[39mand\u001b[39;00m \\\n\u001b[0;32m 634\u001b[0m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_num_yielded \u001b[39m>\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_IterableDataset_len_called:\n", - "File \u001b[1;32mc:\\Users\\Main\\anaconda3\\envs\\PyTorch\\Lib\\site-packages\\torch\\utils\\data\\dataloader.py:674\u001b[0m, in \u001b[0;36m_SingleProcessDataLoaderIter._next_data\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 672\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m_next_data\u001b[39m(\u001b[39mself\u001b[39m):\n\u001b[0;32m 673\u001b[0m index \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_next_index() \u001b[39m# may raise StopIteration\u001b[39;00m\n\u001b[1;32m--> 674\u001b[0m data \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_dataset_fetcher\u001b[39m.\u001b[39;49mfetch(index) \u001b[39m# may raise StopIteration\u001b[39;00m\n\u001b[0;32m 675\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_pin_memory:\n\u001b[0;32m 676\u001b[0m data \u001b[39m=\u001b[39m _utils\u001b[39m.\u001b[39mpin_memory\u001b[39m.\u001b[39mpin_memory(data, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_pin_memory_device)\n", - "File \u001b[1;32mc:\\Users\\Main\\anaconda3\\envs\\PyTorch\\Lib\\site-packages\\torch\\utils\\data\\_utils\\fetch.py:49\u001b[0m, in \u001b[0;36m_MapDatasetFetcher.fetch\u001b[1;34m(self, possibly_batched_index)\u001b[0m\n\u001b[0;32m 47\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mauto_collation:\n\u001b[0;32m 48\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mhasattr\u001b[39m(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mdataset, \u001b[39m\"\u001b[39m\u001b[39m__getitems__\u001b[39m\u001b[39m\"\u001b[39m) \u001b[39mand\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mdataset\u001b[39m.\u001b[39m__getitems__:\n\u001b[1;32m---> 49\u001b[0m data \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mdataset\u001b[39m.\u001b[39;49m__getitems__(possibly_batched_index)\n\u001b[0;32m 50\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m 51\u001b[0m data \u001b[39m=\u001b[39m [\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mdataset[idx] \u001b[39mfor\u001b[39;00m idx \u001b[39min\u001b[39;00m possibly_batched_index]\n", - "File \u001b[1;32mc:\\Users\\Main\\anaconda3\\envs\\PyTorch\\Lib\\site-packages\\torch\\utils\\data\\dataset.py:362\u001b[0m, in \u001b[0;36mSubset.__getitems__\u001b[1;34m(self, indices)\u001b[0m\n\u001b[0;32m 358\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39m__getitems__\u001b[39m(\u001b[39mself\u001b[39m, indices: List[\u001b[39mint\u001b[39m]) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m List[T_co]:\n\u001b[0;32m 359\u001b[0m \u001b[39m# add batched sampling support when parent dataset supports it.\u001b[39;00m\n\u001b[0;32m 360\u001b[0m \u001b[39m# see torch.utils.data._utils.fetch._MapDatasetFetcher\u001b[39;00m\n\u001b[0;32m 361\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mcallable\u001b[39m(\u001b[39mgetattr\u001b[39m(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mdataset, \u001b[39m\"\u001b[39m\u001b[39m__getitems__\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39mNone\u001b[39;00m)):\n\u001b[1;32m--> 362\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mdataset\u001b[39m.\u001b[39;49m__getitems__([\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mindices[idx] \u001b[39mfor\u001b[39;49;00m idx \u001b[39min\u001b[39;49;00m indices]) \u001b[39m# type: ignore[attr-defined]\u001b[39;00m\n\u001b[0;32m 363\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m 364\u001b[0m \u001b[39mreturn\u001b[39;00m [\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mdataset[\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mindices[idx]] \u001b[39mfor\u001b[39;00m idx \u001b[39min\u001b[39;00m indices]\n", - "File \u001b[1;32mc:\\Users\\Main\\anaconda3\\envs\\PyTorch\\Lib\\site-packages\\torch\\utils\\data\\dataset.py:364\u001b[0m, in \u001b[0;36mSubset.__getitems__\u001b[1;34m(self, indices)\u001b[0m\n\u001b[0;32m 362\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mdataset\u001b[39m.\u001b[39m__getitems__([\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mindices[idx] \u001b[39mfor\u001b[39;00m idx \u001b[39min\u001b[39;00m indices]) \u001b[39m# type: ignore[attr-defined]\u001b[39;00m\n\u001b[0;32m 363\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m--> 364\u001b[0m \u001b[39mreturn\u001b[39;00m [\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mdataset[\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mindices[idx]] \u001b[39mfor\u001b[39;49;00m idx \u001b[39min\u001b[39;49;00m indices]\n", - "File \u001b[1;32mc:\\Users\\Main\\anaconda3\\envs\\PyTorch\\Lib\\site-packages\\torch\\utils\\data\\dataset.py:364\u001b[0m, in \u001b[0;36m<listcomp>\u001b[1;34m(.0)\u001b[0m\n\u001b[0;32m 362\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mdataset\u001b[39m.\u001b[39m__getitems__([\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mindices[idx] \u001b[39mfor\u001b[39;00m idx \u001b[39min\u001b[39;00m indices]) \u001b[39m# type: ignore[attr-defined]\u001b[39;00m\n\u001b[0;32m 363\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m--> 364\u001b[0m \u001b[39mreturn\u001b[39;00m [\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mdataset[\u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mindices[idx]] \u001b[39mfor\u001b[39;00m idx \u001b[39min\u001b[39;00m indices]\n", - "File \u001b[1;32mc:\\Users\\Main\\anaconda3\\envs\\PyTorch\\Lib\\site-packages\\torchvision\\datasets\\cifar.py:118\u001b[0m, in \u001b[0;36mCIFAR10.__getitem__\u001b[1;34m(self, index)\u001b[0m\n\u001b[0;32m 115\u001b[0m img \u001b[39m=\u001b[39m Image\u001b[39m.\u001b[39mfromarray(img)\n\u001b[0;32m 117\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mtransform \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[1;32m--> 118\u001b[0m img \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mtransform(img)\n\u001b[0;32m 120\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mtarget_transform \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[0;32m 121\u001b[0m target \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mtarget_transform(target)\n", - "File \u001b[1;32mc:\\Users\\Main\\anaconda3\\envs\\PyTorch\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1518\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1516\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_compiled_call_impl(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs) \u001b[39m# type: ignore[misc]\u001b[39;00m\n\u001b[0;32m 1517\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m-> 1518\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_call_impl(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n", - "File \u001b[1;32mc:\\Users\\Main\\anaconda3\\envs\\PyTorch\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1527\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1522\u001b[0m \u001b[39m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[0;32m 1523\u001b[0m \u001b[39m# this function, and just call forward.\u001b[39;00m\n\u001b[0;32m 1524\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m (\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_pre_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_pre_hooks\n\u001b[0;32m 1525\u001b[0m \u001b[39mor\u001b[39;00m _global_backward_pre_hooks \u001b[39mor\u001b[39;00m _global_backward_hooks\n\u001b[0;32m 1526\u001b[0m \u001b[39mor\u001b[39;00m _global_forward_hooks \u001b[39mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[1;32m-> 1527\u001b[0m \u001b[39mreturn\u001b[39;00m forward_call(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[0;32m 1529\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m 1530\u001b[0m result \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n", - "File \u001b[1;32mc:\\Users\\Main\\anaconda3\\envs\\PyTorch\\Lib\\site-packages\\torchvision\\transforms\\v2\\_container.py:53\u001b[0m, in \u001b[0;36mCompose.forward\u001b[1;34m(self, *inputs)\u001b[0m\n\u001b[0;32m 51\u001b[0m needs_unpacking \u001b[39m=\u001b[39m \u001b[39mlen\u001b[39m(inputs) \u001b[39m>\u001b[39m \u001b[39m1\u001b[39m\n\u001b[0;32m 52\u001b[0m \u001b[39mfor\u001b[39;00m transform \u001b[39min\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mtransforms:\n\u001b[1;32m---> 53\u001b[0m outputs \u001b[39m=\u001b[39m transform(\u001b[39m*\u001b[39;49minputs)\n\u001b[0;32m 54\u001b[0m inputs \u001b[39m=\u001b[39m outputs \u001b[39mif\u001b[39;00m needs_unpacking \u001b[39melse\u001b[39;00m (outputs,)\n\u001b[0;32m 55\u001b[0m \u001b[39mreturn\u001b[39;00m outputs\n", - "File \u001b[1;32mc:\\Users\\Main\\anaconda3\\envs\\PyTorch\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1518\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1516\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_compiled_call_impl(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs) \u001b[39m# type: ignore[misc]\u001b[39;00m\n\u001b[0;32m 1517\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m-> 1518\u001b[0m \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_call_impl(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n", - "File \u001b[1;32mc:\\Users\\Main\\anaconda3\\envs\\PyTorch\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1527\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1522\u001b[0m \u001b[39m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[0;32m 1523\u001b[0m \u001b[39m# this function, and just call forward.\u001b[39;00m\n\u001b[0;32m 1524\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m (\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_pre_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_pre_hooks\n\u001b[0;32m 1525\u001b[0m \u001b[39mor\u001b[39;00m _global_backward_pre_hooks \u001b[39mor\u001b[39;00m _global_backward_hooks\n\u001b[0;32m 1526\u001b[0m \u001b[39mor\u001b[39;00m _global_forward_hooks \u001b[39mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[1;32m-> 1527\u001b[0m \u001b[39mreturn\u001b[39;00m forward_call(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[0;32m 1529\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m 1530\u001b[0m result \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n", - "File \u001b[1;32mc:\\Users\\Main\\anaconda3\\envs\\PyTorch\\Lib\\site-packages\\torchvision\\transforms\\transforms.py:676\u001b[0m, in \u001b[0;36mRandomCrop.forward\u001b[1;34m(self, img)\u001b[0m\n\u001b[0;32m 668\u001b[0m \u001b[39m\u001b[39m\u001b[39m\"\"\"\u001b[39;00m\n\u001b[0;32m 669\u001b[0m \u001b[39mArgs:\u001b[39;00m\n\u001b[0;32m 670\u001b[0m \u001b[39m img (PIL Image or Tensor): Image to be cropped.\u001b[39;00m\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 673\u001b[0m \u001b[39m PIL Image or Tensor: Cropped image.\u001b[39;00m\n\u001b[0;32m 674\u001b[0m \u001b[39m\"\"\"\u001b[39;00m\n\u001b[0;32m 675\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mpadding \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[1;32m--> 676\u001b[0m img \u001b[39m=\u001b[39m F\u001b[39m.\u001b[39;49mpad(img, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mpadding, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mfill, \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mpadding_mode)\n\u001b[0;32m 678\u001b[0m _, height, width \u001b[39m=\u001b[39m F\u001b[39m.\u001b[39mget_dimensions(img)\n\u001b[0;32m 679\u001b[0m \u001b[39m# pad the width if needed\u001b[39;00m\n", - "File \u001b[1;32mc:\\Users\\Main\\anaconda3\\envs\\PyTorch\\Lib\\site-packages\\torchvision\\transforms\\functional.py:539\u001b[0m, in \u001b[0;36mpad\u001b[1;34m(img, padding, fill, padding_mode)\u001b[0m\n\u001b[0;32m 537\u001b[0m _log_api_usage_once(pad)\n\u001b[0;32m 538\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39misinstance\u001b[39m(img, torch\u001b[39m.\u001b[39mTensor):\n\u001b[1;32m--> 539\u001b[0m \u001b[39mreturn\u001b[39;00m F_pil\u001b[39m.\u001b[39;49mpad(img, padding\u001b[39m=\u001b[39;49mpadding, fill\u001b[39m=\u001b[39;49mfill, padding_mode\u001b[39m=\u001b[39;49mpadding_mode)\n\u001b[0;32m 541\u001b[0m \u001b[39mreturn\u001b[39;00m F_t\u001b[39m.\u001b[39mpad(img, padding\u001b[39m=\u001b[39mpadding, fill\u001b[39m=\u001b[39mfill, padding_mode\u001b[39m=\u001b[39mpadding_mode)\n", - "File \u001b[1;32mc:\\Users\\Main\\anaconda3\\envs\\PyTorch\\Lib\\site-packages\\torchvision\\transforms\\_functional_pil.py:215\u001b[0m, in \u001b[0;36mpad\u001b[1;34m(img, padding, fill, padding_mode)\u001b[0m\n\u001b[0;32m 213\u001b[0m \u001b[39m# RGB image\u001b[39;00m\n\u001b[0;32m 214\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mlen\u001b[39m(img\u001b[39m.\u001b[39mshape) \u001b[39m==\u001b[39m \u001b[39m3\u001b[39m:\n\u001b[1;32m--> 215\u001b[0m img \u001b[39m=\u001b[39m np\u001b[39m.\u001b[39;49mpad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (\u001b[39m0\u001b[39;49m, \u001b[39m0\u001b[39;49m)), padding_mode)\n\u001b[0;32m 216\u001b[0m \u001b[39m# Grayscale image\u001b[39;00m\n\u001b[0;32m 217\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mlen\u001b[39m(img\u001b[39m.\u001b[39mshape) \u001b[39m==\u001b[39m \u001b[39m2\u001b[39m:\n", - "File \u001b[1;32m<__array_function__ internals>:200\u001b[0m, in \u001b[0;36mpad\u001b[1;34m(*args, **kwargs)\u001b[0m\n", - "File \u001b[1;32mc:\\Users\\Main\\anaconda3\\envs\\PyTorch\\Lib\\site-packages\\numpy\\lib\\arraypad.py:862\u001b[0m, in \u001b[0;36mpad\u001b[1;34m(array, pad_width, mode, **kwargs)\u001b[0m\n\u001b[0;32m 857\u001b[0m roi \u001b[39m=\u001b[39m _view_roi(padded, original_area_slice, axis)\n\u001b[0;32m 858\u001b[0m \u001b[39mwhile\u001b[39;00m left_index \u001b[39m>\u001b[39m \u001b[39m0\u001b[39m \u001b[39mor\u001b[39;00m right_index \u001b[39m>\u001b[39m \u001b[39m0\u001b[39m:\n\u001b[0;32m 859\u001b[0m \u001b[39m# Iteratively pad until dimension is filled with reflected\u001b[39;00m\n\u001b[0;32m 860\u001b[0m \u001b[39m# values. This is necessary if the pad area is larger than\u001b[39;00m\n\u001b[0;32m 861\u001b[0m \u001b[39m# the length of the original values in the current dimension.\u001b[39;00m\n\u001b[1;32m--> 862\u001b[0m left_index, right_index \u001b[39m=\u001b[39m _set_reflect_both(\n\u001b[0;32m 863\u001b[0m roi, axis, (left_index, right_index),\n\u001b[0;32m 864\u001b[0m method, include_edge\n\u001b[0;32m 865\u001b[0m )\n\u001b[0;32m 867\u001b[0m \u001b[39melif\u001b[39;00m mode \u001b[39m==\u001b[39m \u001b[39m\"\u001b[39m\u001b[39mwrap\u001b[39m\u001b[39m\"\u001b[39m:\n\u001b[0;32m 868\u001b[0m \u001b[39mfor\u001b[39;00m axis, (left_index, right_index) \u001b[39min\u001b[39;00m \u001b[39mzip\u001b[39m(axes, pad_width):\n", - "File \u001b[1;32mc:\\Users\\Main\\anaconda3\\envs\\PyTorch\\Lib\\site-packages\\numpy\\lib\\arraypad.py:357\u001b[0m, in \u001b[0;36m_set_reflect_both\u001b[1;34m(padded, axis, width_pair, method, include_edge)\u001b[0m\n\u001b[0;32m 352\u001b[0m left_pad \u001b[39m-\u001b[39m\u001b[39m=\u001b[39m chunk_length\n\u001b[0;32m 354\u001b[0m \u001b[39mif\u001b[39;00m right_pad \u001b[39m>\u001b[39m \u001b[39m0\u001b[39m:\n\u001b[0;32m 355\u001b[0m \u001b[39m# Pad with reflected values on right side:\u001b[39;00m\n\u001b[0;32m 356\u001b[0m \u001b[39m# First limit chunk size which can't be larger than pad area\u001b[39;00m\n\u001b[1;32m--> 357\u001b[0m chunk_length \u001b[39m=\u001b[39m \u001b[39mmin\u001b[39;49m(old_length, right_pad)\n\u001b[0;32m 358\u001b[0m \u001b[39m# Slice right to left, start on or next to edge, stop relative to start\u001b[39;00m\n\u001b[0;32m 359\u001b[0m start \u001b[39m=\u001b[39m \u001b[39m-\u001b[39mright_pad \u001b[39m+\u001b[39m edge_offset \u001b[39m-\u001b[39m \u001b[39m2\u001b[39m\n", - "\u001b[1;31mKeyboardInterrupt\u001b[0m: " - ] - } - ], + "outputs": [], "source": [ "from deap import base, creator, tools, algorithms\n", "import random\n", + "import logging\n", + "\n", + "\n", "\n", "def train_model_for_fitness(model, train_loader, val_loader, criterion, optimizer, scheduler, num_epochs=50):\n", " val_losses = []\n", "\n", - " early_stopping = EarlyStopping(patience=5, delta=0, verbose=True)\n", + " early_stopping = EarlyStopping(patience=5, delta=0, verbose=False)\n", "\n", " for epoch in range(num_epochs):\n", " model.train()\n", @@ -612,7 +499,7 @@ "\n", " #print(f'Epoch {epoch + 1}/ {num_epochs}, Validation Loss: {val_loss}, Validation Acc: {val_acc}, Learning Rate: {optimizer.param_groups[0][\"lr\"]}')\n", "\n", - " print('Finished Training')\n", + " #print('Finished Training')\n", "\n", " return val_loss # Return only the validation loss for the genetic algorithm fitness\n", "\n", @@ -706,6 +593,7 @@ " # Replace the old population by the offspring\n", " population[:] = offspring\n", " print(\"best: {best}\".format(best=tools.selBest(population, 1)[0]))\n", + " print(\"best (Accuracy):\", evaluate(tools.selBest(population, 1)[0]))\n", " # Get the best individual\n", " best_ind = tools.selBest(population, 1)[0]\n", "\n", @@ -713,22 +601,11 @@ "\n", " # Print the best individual (weights)\n", " print(\"Best Individual (Weights):\", best_ind)\n", + " print(\"Best Individual (Loss):\", evaluate(best_ind))\n", "\n", "DEAPStuff()" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, { "cell_type": "code", "execution_count": null,