You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
DroneDetector/train_scripts/Training_models_2.4-Copy3.i...

466 lines
32 KiB
Plaintext

{
"cells": [
{
"cell_type": "markdown",
"id": "5a13ad6b-56c9-4381-b376-1765f6dd7553",
"metadata": {
"slideshow": {
"slide_type": ""
},
"tags": []
},
"source": [
"# Импортирование библиотек"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "7311cb4a-5bf3-4268-b431-43eea10e9ed6",
"metadata": {
"slideshow": {
"slide_type": ""
},
"tags": []
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"cuda\n"
]
},
{
"data": {
"text/plain": [
"0"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from sklearn.model_selection import train_test_split\n",
"from torch.utils.data import Dataset, DataLoader\n",
"from torch import default_generator, randperm\n",
"from torch.utils.data.dataset import Subset\n",
"import torchvision.transforms as transforms\n",
"from torchvision.io import read_image\n",
"from importlib import import_module\n",
"import matplotlib.pyplot as plt\n",
"from torchvision import models\n",
"import torch, torchvision\n",
"from pathlib import Path\n",
"from PIL import Image\n",
"import torch.nn as nn\n",
"from tqdm import tqdm\n",
"import pandas as pd\n",
"import numpy as np\n",
"import matplotlib\n",
"import os, shutil\n",
"import mlconfig\n",
"import random\n",
"import shutil\n",
"import timeit\n",
"import copy\n",
"import time\n",
"import cv2\n",
"import csv\n",
"import sys\n",
"import io\n",
"import gc\n",
"\n",
"plt.rcParams[\"savefig.bbox\"] = 'tight'\n",
"torch.manual_seed(1)\n",
"#matplotlib.use('Agg')\n",
"device = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
"print(device)\n",
"torch.cuda.empty_cache()\n",
"cv2.destroyAllWindows()\n",
"gc.collect()"
]
},
{
"cell_type": "markdown",
"id": "384de097-82c6-41f5-bda9-b2f54bc99593",
"metadata": {
"slideshow": {
"slide_type": ""
},
"tags": []
},
"source": [
"# Подготовка и обучение детектирование"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "46e4dc99-6994-4fee-a32e-f3983bd991bd",
"metadata": {},
"outputs": [],
"source": [
"def prepare_and_learning_detection(num_classes, num_samples, path_dataset, model_name, config_name, model):\n",
" num_samples_per_class = num_samples // num_classes\n",
"\n",
" #----------Создаём папку для сохранения результатов обучения--------------\n",
" \n",
" ind = 1\n",
" while True:\n",
" if os.path.exists(\"models/\" + model_name + str(ind)):\n",
" ind += 1\n",
" else:\n",
" os.mkdir(\"models/\" + model_name + str(ind))\n",
" path_res = \"models/\" + model_name + str(ind) + '/'\n",
" break\n",
" \n",
" #----------Создаём файл dataset.csv для обучения--------------\n",
" \n",
" pd_columns = ['file_name']\n",
" df = pd.DataFrame(columns=pd_columns)\n",
" \n",
" subdirs = os.listdir(path_dataset)\n",
" for subdir in subdirs:\n",
" files = os.listdir(path_dataset + subdir + '/')\n",
" num_samples_per_class = min(num_samples_per_class, len(files))\n",
" for subdir in subdirs:\n",
" files = os.listdir(path_dataset + subdir + '/')\n",
" random.shuffle(files)\n",
" files_to_process = files[:num_samples_per_class]\n",
" for file in files_to_process:\n",
" row = pd.DataFrame({pd_columns[0]: [str(path_dataset + subdir + '/' + file)]})\n",
" df = pd.concat([df, row], ignore_index=True)\n",
" \n",
" df.to_csv(path_res + 'dataset.csv', index=False)\n",
" \n",
" #----------Импортируем параметры для обучения--------------\n",
" \n",
" def load_function(attr):\n",
" module_, func = attr.rsplit('.', maxsplit=1)\n",
" return getattr(import_module(module_), func)\n",
" \n",
" config = mlconfig.load('config_' + config_name + '.yaml')\n",
" \n",
" #----------Создаём класс датасета--------------\n",
" \n",
" class MyDataset(Dataset):\n",
" def __init__(self, path_dataset, csv_file):\n",
" data=[]\n",
" with open(path_dataset + csv_file, newline='') as csvfile:\n",
" reader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n",
" for row in list(reader)[1:]:\n",
" row = str(row)\n",
" data.append(row[2: len(row)-2])\n",
" self.sig_filenames = data\n",
" self.path_dataset = path_dataset\n",
" \n",
" def __len__(self):\n",
" return len(self.sig_filenames)\n",
" \n",
" def __getitem__(self, idx):\n",
" image_real = np.asarray(cv2.split(cv2.imread(self.sig_filenames[idx][:-8]+'real.jpg')), dtype=np.float32)\n",
" if 'drone' in list(self.sig_filenames[idx].split('/')):\n",
" label = torch.tensor(0)\n",
" if 'noise' in list(self.sig_filenames[idx].split('/')):\n",
" label = torch.tensor(1)\n",
" return image_real, label\n",
" \n",
" #----------Создаём датасет--------------\n",
" \n",
" dataset = MyDataset(path_dataset=path_res, csv_file='dataset.csv')\n",
" train_set, valid_set = torch.utils.data.random_split(dataset, [0.7, 0.3], generator=torch.Generator().manual_seed(42))\n",
" batch_size = config.batch_size\n",
" train_dataloader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, drop_last=True)\n",
" valid_dataloader = torch.utils.data.DataLoader(valid_set, batch_size=batch_size, shuffle=True, drop_last=True)\n",
" \n",
" dataloaders = {}\n",
" dataloaders['train'] = train_dataloader\n",
" dataloaders['val'] = valid_dataloader\n",
" dataset_sizes = {}\n",
" dataset_sizes['train'] = len(train_set)\n",
" dataset_sizes['val'] = len(valid_set)\n",
"\n",
" #----------Обучаем модель--------------\n",
"\n",
" val_loss = []\n",
" val_acc = []\n",
" train_loss = []\n",
" train_acc = []\n",
" epochs = config.epoch\n",
" \n",
" best_acc = 0.0\n",
" best_model = copy.deepcopy(model.state_dict())\n",
" limit = config.limit\n",
" epoch_limit = epochs\n",
" \n",
" start = timeit.default_timer()\n",
" for epoch in range(1, epochs+1):\n",
" print(f\"Epoch : {epoch}\\n\")\n",
" dataloader = None\n",
" \n",
" for phase in ['train', 'val']:\n",
" running_loss = 0.0\n",
" running_corrects = 0\n",
" \n",
" for (img, label) in tqdm(dataloaders[phase]):\n",
" img, label = img.to(device), label.to(device)\n",
" optimizer.zero_grad()\n",
" \n",
" with torch.set_grad_enabled(phase == 'train'):\n",
" output = model(img)\n",
" _, pred = torch.max(output.data, 1)\n",
" loss = criterion(output, label)\n",
" if phase=='train' :\n",
" loss.backward()\n",
" optimizer.step()\n",
" \n",
" running_loss += loss.item() * img.size(0)\n",
" running_corrects += torch.sum(pred == label.data)\n",
" \n",
" epoch_loss = running_loss / dataset_sizes[phase]\n",
" epoch_acc = running_corrects.double() / dataset_sizes[phase]\n",
" \n",
" print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))\n",
" \n",
" if phase=='train' :\n",
" train_loss.append(epoch_loss)\n",
" train_acc.append(epoch_acc)\n",
" else :\n",
" val_loss.append(epoch_loss)\n",
" val_acc.append(epoch_acc)\n",
" if val_acc[-1] > best_acc :\n",
" ind_limit = 0\n",
" best_acc = val_acc[-1]\n",
" best_model = copy.deepcopy(model.state_dict())\n",
" torch.save(best_model, path_res + model_name + '.pth')\n",
" else:\n",
" ind_limit += 1\n",
" \n",
" if ind_limit >= limit:\n",
" break\n",
" \n",
" if ind_limit >= limit:\n",
" epoch_limit = epoch\n",
" break\n",
" \n",
" print()\n",
" \n",
" end = timeit.default_timer()\n",
" print(f\"Total time elapsed = {end - start} seconds\")\n",
" epoch_limit += 1\n",
" \n",
" #----------Вывод графиков и сохранение результатов обучения--------------\n",
" \n",
" train_acc = np.asarray(list(map(lambda x: x.item(), train_acc)))\n",
" val_acc = np.asarray(list(map(lambda x: x.item(), val_acc)))\n",
" \n",
" np.save(path_res+'train_acc.npy', train_acc)\n",
" np.save(path_res+'val_acc.npy', val_acc)\n",
" np.save(path_res+'train_loss.npy', train_loss)\n",
" np.save(path_res+'val_loss.npy', val_loss)\n",
" \n",
" plt.figure()\n",
" plt.plot(range(1,epoch_limit), train_loss, color='blue')\n",
" plt.plot(range(1,epoch_limit), val_loss, color='red')\n",
" plt.xlabel('Epoch')\n",
" plt.ylabel('Loss')\n",
" plt.title('Loss Curve')\n",
" plt.legend(['Train Loss', 'Validation Loss'])\n",
" plt.show()\n",
" plt.clf()\n",
" plt.cla()\n",
" plt.close()\n",
" \n",
" plt.figure()\n",
" plt.plot(range(1,epoch_limit), train_acc, color='blue')\n",
" plt.plot(range(1,epoch_limit), val_acc, color='red')\n",
" plt.xlabel('Epoch')\n",
" plt.ylabel('Accuracy')\n",
" plt.title('Accuracy Curve')\n",
" plt.legend(['Train Accuracy', 'Validation Accuracy'])\n",
" plt.show()\n",
" \n",
" plt.clf()\n",
" plt.cla()\n",
" plt.close()\n",
" torch.cuda.empty_cache()\n",
" cv2.destroyAllWindows()\n",
" del model\n",
" gc.collect()\n",
"\n",
" return path_res, model_name"
]
},
{
"cell_type": "markdown",
"id": "93c136ee",
"metadata": {},
"source": [
"### Ensemble"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "52e8d4c5",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"C:\\Users\\snytk\\miniconda3\\envs\\python311\\Lib\\site-packages\\torchvision\\models\\_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead.\n",
" warnings.warn(\n",
"C:\\Users\\snytk\\miniconda3\\envs\\python311\\Lib\\site-packages\\torchvision\\models\\_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=ResNet18_Weights.IMAGENET1K_V1`. You can also use `weights=ResNet18_Weights.DEFAULT` to get the most up-to-date weights.\n",
" warnings.warn(msg)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch : 1\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" 0%| | 0/337 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([], device='cuda:0', size=(4, 0))\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n"
]
},
{
"ename": "RuntimeError",
"evalue": "Expected 3D (unbatched) or 4D (batched) input to conv2d, but got input of size: [4, 0]",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mRuntimeError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[1;32mIn[10], line 42\u001b[0m\n\u001b[0;32m 38\u001b[0m model \u001b[38;5;241m=\u001b[39m model\u001b[38;5;241m.\u001b[39mto(device)\n\u001b[0;32m 40\u001b[0m \u001b[38;5;66;03m#----------Создания датасета и обучение модели--------------\u001b[39;00m\n\u001b[1;32m---> 42\u001b[0m path_res, model_name \u001b[38;5;241m=\u001b[39m \u001b[43mprepare_and_learning_detection\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnum_classes\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mnum_classes\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mnum_samples\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;241;43m20000\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mpath_dataset\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m//192.168.11.63/data/DATASETS/Energomash/2400_learning/\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[0;32m 43\u001b[0m \u001b[43m \u001b[49m\u001b[43mmodel_name\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mconfig_name\u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m_2.4_jpg_\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconfig_name\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mconfig_name\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 46\u001b[0m torch\u001b[38;5;241m.\u001b[39mcuda\u001b[38;5;241m.\u001b[39mempty_cache()\n\u001b[0;32m 47\u001b[0m cv2\u001b[38;5;241m.\u001b[39mdestroyAllWindows()\n",
"Cell \u001b[1;32mIn[2], line 108\u001b[0m, in \u001b[0;36mprepare_and_learning_detection\u001b[1;34m(num_classes, num_samples, path_dataset, model_name, config_name, model)\u001b[0m\n\u001b[0;32m 105\u001b[0m optimizer\u001b[38;5;241m.\u001b[39mzero_grad()\n\u001b[0;32m 107\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m torch\u001b[38;5;241m.\u001b[39mset_grad_enabled(phase \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtrain\u001b[39m\u001b[38;5;124m'\u001b[39m):\n\u001b[1;32m--> 108\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[43mmodel\u001b[49m\u001b[43m(\u001b[49m\u001b[43mimg\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 109\u001b[0m _, pred \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mmax(output\u001b[38;5;241m.\u001b[39mdata, \u001b[38;5;241m1\u001b[39m)\n\u001b[0;32m 110\u001b[0m loss \u001b[38;5;241m=\u001b[39m criterion(output, label)\n",
"File \u001b[1;32m~\\miniconda3\\envs\\python311\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1553\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1551\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[0;32m 1552\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m-> 1553\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[1;32m~\\miniconda3\\envs\\python311\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1562\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1557\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[0;32m 1558\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[0;32m 1559\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[0;32m 1560\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[0;32m 1561\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[1;32m-> 1562\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 1564\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m 1565\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
"Cell \u001b[1;32mIn[10], line 28\u001b[0m, in \u001b[0;36mModel.forward\u001b[1;34m(self, x)\u001b[0m\n\u001b[0;32m 26\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, x):\n\u001b[0;32m 27\u001b[0m \u001b[38;5;28mprint\u001b[39m(x)\n\u001b[1;32m---> 28\u001b[0m x \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 29\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m x\n",
"File \u001b[1;32m~\\miniconda3\\envs\\python311\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1553\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1551\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[0;32m 1552\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m-> 1553\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[1;32m~\\miniconda3\\envs\\python311\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1562\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1557\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[0;32m 1558\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[0;32m 1559\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[0;32m 1560\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[0;32m 1561\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[1;32m-> 1562\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 1564\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m 1565\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
"File \u001b[1;32m~\\miniconda3\\envs\\python311\\Lib\\site-packages\\torchvision\\models\\resnet.py:285\u001b[0m, in \u001b[0;36mResNet.forward\u001b[1;34m(self, x)\u001b[0m\n\u001b[0;32m 284\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, x: Tensor) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tensor:\n\u001b[1;32m--> 285\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_forward_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[1;32m~\\miniconda3\\envs\\python311\\Lib\\site-packages\\torchvision\\models\\resnet.py:268\u001b[0m, in \u001b[0;36mResNet._forward_impl\u001b[1;34m(self, x)\u001b[0m\n\u001b[0;32m 266\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_forward_impl\u001b[39m(\u001b[38;5;28mself\u001b[39m, x: Tensor) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tensor:\n\u001b[0;32m 267\u001b[0m \u001b[38;5;66;03m# See note [TorchScript super()]\u001b[39;00m\n\u001b[1;32m--> 268\u001b[0m x \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconv1\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 269\u001b[0m x \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbn1(x)\n\u001b[0;32m 270\u001b[0m x \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mrelu(x)\n",
"File \u001b[1;32m~\\miniconda3\\envs\\python311\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1553\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1551\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[0;32m 1552\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m-> 1553\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[1;32m~\\miniconda3\\envs\\python311\\Lib\\site-packages\\torch\\nn\\modules\\module.py:1562\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1557\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[0;32m 1558\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[0;32m 1559\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[0;32m 1560\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[0;32m 1561\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[1;32m-> 1562\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 1564\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m 1565\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
"File \u001b[1;32m~\\miniconda3\\envs\\python311\\Lib\\site-packages\\torch\\nn\\modules\\conv.py:458\u001b[0m, in \u001b[0;36mConv2d.forward\u001b[1;34m(self, input)\u001b[0m\n\u001b[0;32m 457\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m: Tensor) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tensor:\n\u001b[1;32m--> 458\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_conv_forward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbias\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[1;32m~\\miniconda3\\envs\\python311\\Lib\\site-packages\\torch\\nn\\modules\\conv.py:454\u001b[0m, in \u001b[0;36mConv2d._conv_forward\u001b[1;34m(self, input, weight, bias)\u001b[0m\n\u001b[0;32m 450\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpadding_mode \u001b[38;5;241m!=\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mzeros\u001b[39m\u001b[38;5;124m'\u001b[39m:\n\u001b[0;32m 451\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m F\u001b[38;5;241m.\u001b[39mconv2d(F\u001b[38;5;241m.\u001b[39mpad(\u001b[38;5;28minput\u001b[39m, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_reversed_padding_repeated_twice, mode\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpadding_mode),\n\u001b[0;32m 452\u001b[0m weight, bias, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride,\n\u001b[0;32m 453\u001b[0m _pair(\u001b[38;5;241m0\u001b[39m), \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdilation, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mgroups)\n\u001b[1;32m--> 454\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mF\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconv2d\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbias\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mstride\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 455\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpadding\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdilation\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgroups\u001b[49m\u001b[43m)\u001b[49m\n",
"\u001b[1;31mRuntimeError\u001b[0m: Expected 3D (unbatched) or 4D (batched) input to conv2d, but got input of size: [4, 0]"
]
}
],
"source": [
"#----------Инициализируем модель и параметры обучения--------------\n",
"\n",
"torch.cuda.empty_cache()\n",
"cv2.destroyAllWindows()\n",
"gc.collect()\n",
"\n",
"config_name = \"ensemble\"\n",
" \n",
"def load_function(attr):\n",
" module_, func = attr.rsplit('.', maxsplit=1)\n",
" return getattr(import_module(module_), func)\n",
" \n",
"config = mlconfig.load('config_' + config_name + '.yaml')\n",
"\n",
"model = models.resnet18(pretrained=True)\n",
"\n",
"num_classes = 2\n",
"\n",
"model.fc = nn.Linear(model.fc.in_features, num_classes)\n",
"\n",
"class Model(nn.Module):\n",
" def __init__(self, model):\n",
" super(Model, self).__init__()\n",
" self.model = model\n",
"\n",
" def forward(self, x):\n",
" print(x)\n",
" x = self.model(x)\n",
" return x\n",
"\n",
"model = Model(model)\n",
"\n",
"optimizer = load_function(config.optimizer.name)(model.parameters(), lr=config.optimizer.lr)\n",
"criterion = load_function(config.loss_function.name)()\n",
"scheduler = load_function(config.scheduler.name)(optimizer, step_size=config.scheduler.step_size, gamma=config.scheduler.gamma)\n",
"\n",
"if device != 'cpu':\n",
" model = model.to(device)\n",
"\n",
"#----------Создания датасета и обучение модели--------------\n",
"\n",
"path_res, model_name = prepare_and_learning_detection(num_classes = num_classes, num_samples = 20000, path_dataset = \"//192.168.11.63/data/DATASETS/Energomash/2400_learning/\", \n",
" model_name = config_name+\"_2.4_jpg_\", config_name = config_name, model=model)\n",
"\n",
"\n",
"torch.cuda.empty_cache()\n",
"cv2.destroyAllWindows()\n",
"del model\n",
"gc.collect()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "57d18676",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "c10afb29",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"celltoolbar": "Отсутствует",
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.6"
}
},
"nbformat": 4,
"nbformat_minor": 5
}