0


基于VGG16的猫狗分类实战

1 使用卷积神经网络识别猫和狗数据集

1.1 理论基础

1.1.1 VGG****架构

VGG16是由Karen Simonyan和Andrew Zisserman于2014年在论文“VERY DEEP CONVOLUTIONAL NETWORKS FOR LARGE SCALE IMAGE RECOGNITION”中提出的一种处理多分类、大范围图像识别问题的卷积神经网络架构,成功对ImageNet数据集的14万张图片进行了1000个类别的归类并有92.7%的准确率。

本项目即对Pytorch官方预训练的VGG16网络进行微调并完成了猫狗分类任务。实际的微调方法也十分简单,仅需将分类层的最后一层修改为(1x1x2)即可将分类结果从1000类修改为二分类。

图1 VGG16架构图

**1.1.2 **卷积神经网络

** **卷积神经网络是在深度神经网络的基础上改进的专用于图像识别的神经网络模型,其拥有四个主要特征层:卷积层、池化层、激活层、全连接层(深度神经网络)。其中卷积层通过采用卷积核对输入数据进行处理,提取输入数据的特定特征;池化层通过压缩图像数据大小实现神经网络运算的加速;激活层的作用与其在深度神经网络中一致,用于模拟人脑神经元的刺激结果;全连接层实际上即是深度神经网络,即图像数据经卷积层、池化层、激活层处理后输入深度神经网络进行分类运算。

**1.1.3 **深度神经网络

  1. 深度神经网络是深度学习的基础算法,下图为深度神经网络的部分介绍。

图2 深度神经网络的结构

图3 深度神经网络节点内部算法

2 代码

2.1 main.py

  1. import torch
  2. from torchvision import datasets, transforms
  3. from torch.utils.data import DataLoader
  4. from training_settings import train, val
  5. from vgg16 import vgg16
  6. import os
  7. # BATCH大小
  8. BATCH_SIZE = 20
  9. # 迭代次数
  10. EPOCHS = 40
  11. # 采用cpu还是gpu进行计算
  12. DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
  13. # 学习率
  14. modellr = 1e-4
  15. # 数据预处理
  16. path = "D:\\Storage\\ProgramData\\Python\\DistingushCD\\net\\data\\dc_2000"
  17. transform = transforms.Compose([transforms.CenterCrop(224), transforms.ToTensor(),transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
  18. ])
  19. # 数据加载
  20. dataset_train = datasets.ImageFolder(path + '\\' + 'train', transform)
  21. print('trainset:{}'.format(dataset_train.class_to_idx))
  22. dataset_test = datasets.ImageFolder(path + '\\' + 'test', transform)
  23. print('testset:{}'.format(dataset_test.class_to_idx))
  24. train_loader = DataLoader(dataset_train, batch_size=BATCH_SIZE, shuffle=True)
  25. test_loader = DataLoader(dataset_test, batch_size=BATCH_SIZE, shuffle=False)
  26. # 设置模型
  27. model = vgg16(pretrained=True, progress=True, num_classes=2)
  28. model = model.to(DEVICE)
  29. # 设置优化器
  30. optimizer = torch.optim.Adam(model.parameters(), lr=modellr)
  31. # sculer = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
  32. # 训练
  33. for epoch in range(1, EPOCHS + 1):
  34. train(model, DEVICE, train_loader, optimizer, epoch) # 训练过程函数
  35. val(model, DEVICE, test_loader, optimizer) # 测试过程函数
  36. # 储存模型
  37. torch.save(model, 'D:\\Storage\\ProgramData\\Python\\DistingushCD\\netmodel.pth')

2.2 VGG16.py

  1. import torch
  2. import torch.nn as nn
  3. from torch.hub import load_state_dict_from_url
  4. model_urls = {
  5. 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
  6. } # 下载模型
  7. class VGG(nn.Module):
  8. def __init__(
  9. self,
  10. features: nn.Module,
  11. num_classes: int = 1000,
  12. init_weights: bool = True
  13. ):
  14. super(VGG, self).__init__()
  15. self.features = features
  16. self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
  17. self.classifier = nn.Sequential(
  18. nn.Linear(512 * 7 * 7, 4096),
  19. nn.ReLU(True),
  20. nn.Dropout(),
  21. nn.Linear(4096, 4096),
  22. nn.ReLU(True),
  23. nn.Dropout(),
  24. nn.Linear(4096, num_classes),
  25. )
  26. if init_weights:
  27. self._initialize_weights()
  28. def forward(self, x):
  29. x = self.features(x)
  30. x = self.avgpool(x)
  31. x = torch.flatten(x, 1)
  32. x = self.classifier(x)
  33. return x
  34. def _initialize_weights(self):
  35. for m in self.modules():
  36. if isinstance(m, nn.Conv2d):
  37. nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
  38. if m.bias is not None:
  39. nn.init.constant_(m.bias, 0)
  40. elif isinstance(m, nn.BatchNorm2d):
  41. nn.init.constant_(m.weight, 1)
  42. nn.init.constant_(m.bias, 0)
  43. elif isinstance(m, nn.Linear):
  44. nn.init.normal_(m.weight, 0, 0.01)
  45. nn.init.constant_(m.bias, 0)
  46. def make_layers(cfg, batch_norm=False):
  47. layers = []
  48. in_channels = 3
  49. for v in cfg:
  50. if v == 'M':
  51. layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
  52. else:
  53. conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
  54. if batch_norm:
  55. layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
  56. else:
  57. layers += [conv2d, nn.ReLU(inplace=True)]
  58. in_channels = v
  59. return nn.Sequential(*layers)
  60. cfgs = {
  61. 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
  62. }
  63. def vgg16(pretrained=True, progress=True, num_classes=2):
  64. model = VGG(make_layers(cfgs['D']))
  65. if pretrained:
  66. state_dict = load_state_dict_from_url(model_urls['vgg16'], model_dir='./model',
  67. progress=progress)
  68. model.load_state_dict(state_dict)
  69. if num_classes != 1000:
  70. model.classifier = nn.Sequential(
  71. nn.Linear(512 * 7 * 7, 4096),
  72. nn.ReLU(True),
  73. nn.Dropout(p=0.5),
  74. nn.Linear(4096, 4096),
  75. nn.ReLU(True),
  76. nn.Dropout(p=0.5),
  77. nn.Linear(4096, num_classes),
  78. )
  79. return model

2.3 traning_settings.py

  1. import torch
  2. import torch.nn.functional as F
  3. # cost函数设置
  4. criterion = torch.nn.CrossEntropyLoss()
  5. def train(model, device, train_loader, optimizer, epoch):
  6. total_train = 0
  7. for data in train_loader:
  8. img, label = data
  9. with torch.no_grad():
  10. img = img.to(device)
  11. label = label.to(device)
  12. optimizer.zero_grad()
  13. output = model(img)
  14. train_loss = criterion(output, label).to(device)
  15. train_loss.backward()
  16. optimizer.step()
  17. total_train += train_loss
  18. print("Epoch:{}, Loss of training set:{:.5f}".format(epoch, total_train))
  19. def val(model, device, test_lodaer, optimizer):
  20. total_test = 0
  21. total_accuracy = 0
  22. total_num = len(test_lodaer.dataset)
  23. for data in test_lodaer:
  24. img, label = data
  25. with torch.no_grad():
  26. img = img.to(device)
  27. label = label.to(device)
  28. optimizer.zero_grad()
  29. output = model(img)
  30. test_loss = criterion(output, label).to(device)
  31. total_test += test_loss
  32. accuracy = (output.argmax(1) == label).sum()
  33. total_accuracy += accuracy
  34. print("Loss of testing set:{:.5f}, Accuracy of testing set:{:.1%}\n".format(total_test, total_accuracy/total_num))

2.4 prediction.py

  1. import torch.utils.data.distributed
  2. import torchvision.transforms as transforms
  3. from torch.autograd import Variable
  4. import os
  5. from PIL import Image
  6. classes = ('cat', 'dog')
  7. transform_test = transforms.Compose([
  8. transforms.CenterCrop(224),
  9. transforms.ToTensor(),
  10. transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
  11. ])
  12. DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
  13. model = torch.load('D:\\Storage\\ProgramData\\Python\\DistingushCD\\netmodel.pth')
  14. model.eval()
  15. model.to(DEVICE)
  16. path = 'D:\\Storage\\ProgramData\\Python\\DistingushCD\\net\\data\\dc_2000\\to_test\\test1\\'
  17. file = '1002.jpg'
  18. img = Image.open(path + file)
  19. img.show()
  20. img = transform_test(img)
  21. img.unsqueeze_(0)
  22. img = Variable(img).to(DEVICE)
  23. out = model(img)
  24. # Predict
  25. _, pred = torch.max(out.data, 1)
  26. print('Image Name: {},\nprediction: It\'s a {}.'.format(file, classes[pred.data.item()]))

3 结果

数据集预处理介绍

图4 测试集路径

图5 测试集图片命名

测试集和训练集路径地址和图片命名规则符合图5、图6,数据集和测试集通过DataLoader函数自动导入训练程序。

图6 训练集和测试集标签展示

​​​​​​​定量实验结果展示

图7 训练结果展示

训练结果采用测试集总损失值以及精确度衡量,通过图6可以看出模型训练后有较好的预测效果。

​​​​​​​定性实验结果展示

图8 在测试集中抽取一张“狗”照片进行预测

  1. 训练完毕后,通过编写新的脚本程序读取已储存的模型,并通过迁移学习预测输入的照片。

图9 图8所示照片的预测结果

  1. 通过已储存模型对图7所展示照片进行预测,可以成功预测出这是一张“狗”的照片。

​​​​​​​分析实验结果

  1. 通过多次实验检测,通过微调VGG16训练的模型对猫狗分类有良好的预测结果,其测试集精确度达95%,能够基本完成项目目标。

本文转载自: https://blog.csdn.net/joniy0/article/details/129693738
版权归原作者 芝芝士Clim 所有, 如有侵权,请联系我们删除。

“基于VGG16的猫狗分类实战”的评论:

还没有评论