由于mikel-brostrom在github上发布的Yolov5_DeepSort_Pytorch更新,使整个代码封装性更好,进而允许采用多种REID特征识别模型,完善了deepsort在检测跟踪方面的性能。本博文记录如何使用此版本Yolov5_DeepSort_Pytorch的过程,同时给出ZQPei REID模型的修改方法,以适应mikel-brostrom更新版本。
使用Yolov5_DeepSort_Pytorch默认的osnet REID实现跟踪track.py
Yolov5_DeepSort_Pytorch中包含了两个链接目录yolov5和reid,不能一次性把github中的代码克隆下来,因此,需分别将三个github代码克隆到本地。
Yolov5_DeepSort_Pytorch:
git clone https://github.com/mikel-brostrom/Yolov5_DeepSort_Pytorch
Yolov5:
git clone https://github.com/ultralytics/yolov5
REID:
git clone https://github.com/KaiyangZhou/deep-person-reid
假定你的deepsort目录为your_dir,是第一个克隆下来的目录。第二个克隆目录是yolov5,将yolov5目录放在your_dir目录下,即your_dir/yolov5。第三个克隆目录是reid,放到your_dir/deep_sort/deep目录下,your_dir/deep_sort/deep/reid。
假定已经安装了conda和虚拟环境,且安装好运行Yolov5_DeepSort_Pytorch所需的模块。进入reid目录,运行
python setup.py develop
如此,即安装好KaiyangZhou的REID环境。
下载yolov5模型权重,放入目录your_dir/yolov5/weights
从KaiyangZhou的github中,Model zoo里下载权重文件,例如osnet_x1_0.pth,放到checkpoint目录:your_dir/deep_sort/deep/checkpoint。
(1)修改deep_sort/configs/deep_sort.yaml
DEEPSORT:
MODEL_TYPE: "osnet_x1_0"
REID_CKPT: '~/your_dir/deep_sort/deep/checkpoint/osnet_x1_0_imagenet.pth'
MAX_DIST: 0.1# 0.2 The matching threshold. Samples with larger distance are considered an invalid match
MAX_IOU_DISTANCE: 0.7# 0.7 Gating threshold. Associations with cost larger than this value are disregarded.
MAX_AGE: 90# 30 Maximum number of missed misses before a track is deleted
N_INIT: 3# 3 Number of frames that a track remains in initialization phase
NN_BUDGET: 100# 100 Maximum size of the appearance descriptors gallery
MIN_CONFIDENCE: 0.75
NMS_MAX_OVERLAP: 1.0
这里,增加REID_CKPT,把某些参数设置放到yaml文件中,尽可能减少track.py命令行中的输入参数。
(2)修改track.py中DeepSort类实例的参数定义
deepsort = DeepSort( cfg.DEEPSORT.MODEL_TYPE,
cfg.DEEPSORT.REID_CKPT,# 添加checkpoint路径
device,
max_dist=cfg.DEEPSORT.MAX_DIST,
max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
max_age=cfg.DEEPSORT.MAX_AGE,
n_init=cfg.DEEPSORT.N_INIT,
nn_budget=cfg.DEEPSORT.NN_BUDGET,)
此处增加了一个reid权重文件路径参数,故也需在DeepSort类定义中增加该参数model_path,修改deep_sort/deep_sort.py,
__init__()
:
classDeepSort(object):def__init__(self, model_type, model_path, device, max_dist=0.2, min_confidence=0.3, nms_max_overlap=1.0, max_iou_distance=0.7, max_age=70, n_init=3, nn_budget=100, use_cuda=True):
self.min_confidence = min_confidence
self.nms_max_overlap = nms_max_overlap
self.extractor = FeatureExtractor(
model_name=model_type,
model_path = model_path,
device=str(device))
max_cosine_distance = max_dist
metric = NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
self.tracker = Tracker(
metric, max_iou_distance=max_iou_distance, max_age=max_age, n_init=n_init)
注:mikel好像又改了有关model_path的引入方法,我感觉太复杂,故还是用以上的修改办法,其目的就是从deep_sort/deep/checkpoint中找到权重文件路径,避免从网上下载权重文件,或者从本地缓存.torch中去找权重。
(3)运行deepsort跟踪程序,命令行选项中给出一种比较全的选项
python track.py --yolo_model ~/your_dir/yolov5/weights/yolov5s.pt \ // yolov5权文件
--source ~/your_dir/video_demo.mp4 \ // 输入视频文件
--show-vid \ // 显示跟踪视频
--classes 02\ // 0= 行人类别, 2=小汽车类别。
--save-txt \ // 输出兼容MOT16格式文件
--save-vid \ // 保存跟踪视频
其中,classes 0 表示yolov5检测对象为行人,类型号0。
更改ZQPei REID模型文件
此模型文件命名为model_ZQP.py,放入目录 deep_sort/deep/reid/torchreid/models
模型更改只需添加一个定义函数 def ZQP()
import torch
import torch.nn as nn
import torch.nn.functional as F
classBasicBlock(nn.Module):def__init__(self, c_in, c_out, is_downsample=False):super(BasicBlock, self).__init__()
self.is_downsample = is_downsample
if is_downsample:
self.conv1 = nn.Conv2d(
c_in, c_out,3, stride=2, padding=1, bias=False)else:
self.conv1 = nn.Conv2d(
c_in, c_out,3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(c_out)
self.relu = nn.ReLU(True)
self.conv2 = nn.Conv2d(c_out, c_out,3, stride=1,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(c_out)if is_downsample:
self.downsample = nn.Sequential(
nn.Conv2d(c_in, c_out,1, stride=2, bias=False),
nn.BatchNorm2d(c_out))elif c_in != c_out:
self.downsample = nn.Sequential(
nn.Conv2d(c_in, c_out,1, stride=1, bias=False),
nn.BatchNorm2d(c_out))
self.is_downsample =Truedefforward(self, x):
y = self.conv1(x)
y = self.bn1(y)
y = self.relu(y)
y = self.conv2(y)
y = self.bn2(y)if self.is_downsample:
x = self.downsample(x)return F.relu(x.add(y),True)defmake_layers(c_in, c_out, repeat_times, is_downsample=False):
blocks =[]for i inrange(repeat_times):if i ==0:
blocks +=[BasicBlock(c_in, c_out, is_downsample=is_downsample),]else:
blocks +=[BasicBlock(c_out, c_out),]return nn.Sequential(*blocks)classNet(nn.Module):def__init__(self, num_classes=751, pretrained=True, loss ='softmax',**kwargs):# market1501=751, dukemtmcreid=702super(Net, self).__init__()# Net# 3 128 64
self.conv = nn.Sequential(
nn.Conv2d(3,64,3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ELU(inplace=True),)
self.layer1 = make_layers(64,64,2,False)
self.layer2 = make_layers(64,128,2,True)
self.layer3 = make_layers(128,256,2,True)
self.layer4 = make_layers(256,512,2,True)#self.avgpool = nn.AvgPool2d((8,4),1)
self.adaptiveavgpool = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Sequential(
nn.Linear(512,256),
nn.BatchNorm1d(256),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(256, num_classes),)defforward(self, x):
x = self.conv(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)#x = self.avgpool(x)
x = self.adaptiveavgpool(x)#
x = x.view(x.size(0),-1)# B x 128ifnot self.training:
x = x.div(x.norm(p=2,dim=1,keepdim=True))return x
# classifier
x = self.classifier(x)return x
defZQP(num_classes=751, pretrained=True, loss='softmax',**kwargs):
model = Net(
num_classes=num_classes,
pretrained = pretrained,
loss ='softmax',**kwargs
)return model
if __name__ =='__main__':
net = Net(pretrained=True)# Net
x = torch.randn(4,3,256,256)# 128, 64
y = net(x)
下面将模型ZQP添加到REID的定义文件中
deep_sort/deep/reid/torchreid/models/__init__.py
中引入ZQP模型文件,添加:
from.model_ZQP import*
在字典__model_factory中添加模型名称ZQP:
__model_factory ={# image classification models'resnet18': resnet18,'resnet34': resnet34,'resnet50': resnet50,......'ZQP': ZQP
现在,只要修改deep_sort/configs/deep_sort.yaml中的MODEL_TYPE和REID_CKPT路径,就可运行ZQPei的reid模型ckpt.t7。
MODEL_TYPE: "ZQP"
REID_CKPT: '~/your_dir/deep_sort/deep/checkpoint/ckpt.t7'
另外,由于ZQPei reid模型训练中resize为128x64(hxw),故需修改deep_sort/deep/reid/torchreid/utils/feature_extractor.py中的image_size
def__init__(
self,
model_name='',
model_path='',
image_size=(128,64),#(256, 128) (h, w)
pixel_mean=[0.485,0.456,0.406],
pixel_std=[0.229,0.224,0.225],
pixel_norm=True,
device='cuda',
verbose=True):
至此,我们完成了添加ZQP reid模型到KaiyangZhou REID模型中的整个过程,并可以用同样的命令行参数,运行track.py
reid模型训练
deepsort中有两个神经网络模型,一个是目标检测模型yolov5,另一个是特征识别模型reid。yolov5模型训练有很多文章可参考,省略,此处侧重谈谈reid模型的训练。KaiyangZhou给出识别行人特征的reid模型训练方法,训练程序deep_sort/deep/reid/scripts/main.py。
训练可分别采用两个数据集:Market-1501和DukeMTMC-reID。
Market-1501数据集:
训练数据集”bounding_box_train“有751个行人ID,包含 12,936 张图像,平均每人有17.2张训练数据;
测试集“bounding_box_test”有750个行人ID,包含19,732张图像,平均每人有26.3张测试数据。
查询集gally从测试集中挑选出750个行人在6个摄像头下的图片,共3368 张查询图像。
dukemtmc-reid数据集:
“bounding_box_test”——用于测试集的 702 人,包含 17,661 张图像(随机采样,702 ID + 408 distractor ID)
“bounding_box_train”——用于训练集的 702 人,包含 16,522 张图像(随机采样)
“query”——为测试集中的 702 人在每个摄像头中随机选择一张图像作为 query,共有 2,228 张图像
只要从网上下载这两个数据集,使用其安排好的目录结构无需更改,只要指出数据集的根目录。
以下给出ZQPei模型在dukemtmc-reid数据集的训练方法
构造配置文件:deep_sort/deep/reid/configs/ZQP_128x64.yaml
model:
name:'ZQP'
pretrained:True#True
data:type:'image'
sources:['dukemtmcreid']
targets:['dukemtmcreid']# market1501, dukemtmcreid
height:128
width:64
combineall:False
transforms:['random_flip']#random_flip random_erase color_jitter
save_dir:'deep_sort/deep/reid/log/ZQP'
loss:
name:'softmax'
softmax:
label_smooth:True
train:
optim:'amsgrad'
lr:0.0015
max_epoch:40#150
batch_size:64
fixbase_epoch:10#10
open_layers:['classifier']#'classifier'
lr_scheduler:'cosine'# stepsize: [60]
test:
batch_size:300
dist_metric:'euclidean'
normalize_feature:False
evaluate:False#test only
eval_freq:-1
rerank:False
运行训练程序main.py
python main.py \
--config-file deep_sort/deep/reid/configs/ZQP_128x64.yaml \
--root ~/your_datasets/dukemtmc_reid \
model.load_weights ~/your_dir/deep_sort/deep/checkpoint/ckpt.t7
这里,在配置文件ZQP_128x64.yaml中pretrained = True,表示需加载预先训练的权重ckpt.t7。如果从头开始训练,则pretrained = False,并在命令行中删除model.load_weights项。命令行指定配置文件ZQP_128x64.yaml,数据集根目录dukemtmc_reid。
此外,采用dukemtmc训练数据集行人ID数为702,所以,需更改特征类型为702, 即模型文件model_ZQP.py中num_classes=702。
数据集目录组成应为:
~/your_datasets/dukemtmc_reid/dukemtmc-reid/DukeMTMC-reID/bounding_box_test, bounding_box_train, query
用逗号分开的项目表示在DukeMTMC-reID目录下有三个子目录bounding_box_test、bounding_box_train和query。
用自己的数据集训练reid
要利用KaiyangZhou reid训练程序,需要将数据集构造成market-1501的结构形式,即bounding_box_train, bounding_box_test, guery。
下面是一个例子,用veri-wild提供的小汽车数据集来构造符合market-1501构成形式的数据集。
veri-wild提供了包含40多万张4万辆汽车的id图片,每个汽车ID目录下有多张不同摄像机和不同时刻获取的汽车图片。假定选择800个汽车ID组成训练集,另外800个汽车ID组成测试集,从测试集中取出每个汽车ID在每个摄像头下的图片,组成query。
(1)从veri-wild中分别抽取800个ID放入train_800和test_800,每个ID的图片在20-30张之间。
(2)对train_800和test_800图片进行resize,压缩数据集容量。
(3)按照market-1501文件命名规则更改数据集图片名称,并从test数据集挑选图片放入query。
由此,仿照market-1501构成训练reid的小汽车数据集,进行汽车特征的reid特征识别模型。
参考程序如下:
import os
from shutil import copyfile, copytree, rmtree
from torch.functional import broadcast_shapes
from PIL import Image
import matplotlib.pyplot as plt
import random
import shutil
defget_cam_n(f_list):# 构造列表[file_name, camera_ID],列表抽取,每个camera_ID只取一项。
A =[]for cam_n inrange(1,7):for j inrange(len(f_list)):
B = f_list[j][1]if(f_list[j][1]== cam_n):
A.append(f_list[j])breakreturn A
defmake_fileprefix(src_dir, tar_dir, gallary_dir):# src_dir 带ID子目录的数据集目录,ID子目录下是同一ID的图片。for subdir in os.listdir( src_dir ):# ID子目录名称
src=src_dir+"/"+subdir
f_list =[]for file_name in os.listdir(src):# 提取ID子目录内文件名称
fileprefix= os.path.splitext(file_name)[0]
cam_n = random.randint(1,6)
A1=[fileprefix, cam_n]
f_list.append(A1)
file_name1 = subdir+"_c"+str( cam_n)+"_f"+ fileprefix +".jpg"
copyfile(src+"/"+file_name, tar_dir+"/"+file_name1)#将各子目录文件添加prefix,拷贝到一个目标文件夹tar_dirprint("copyfile: ", file_name1)
f_cam=get_cam_n(f_list)# 列表[file_name, camera_ID],每个camera_ID只有一张图片。for i inrange(len(f_cam)):# 从 test中抽取图片到query
file_prefix = f_cam[i][0]
cam_n = f_cam[i][1]
file_name1 = subdir+"_c"+str( cam_n)+"_f"+ file_prefix +".jpg"
file_name0 = src+"/"+file_prefix+'.jpg'
copyfile(file_name0, gallary_dir+"/"+file_name1)#将各子目录文件添加prefix,拷贝到一个目标文件夹tar_dirreturndefimage_resize(src_dir, tar_dir):for subdir in os.listdir( src_dir ):# ID子目录名称
src=src_dir+"/"+subdir
tar_ID_dir=tar_dir+"/"+subdir
if os.path.isdir(tar_ID_dir):
rmtree(tar_ID_dir)
os.mkdir(tar_ID_dir)for file_name in os.listdir(src):# 提取ID子目录中文件名称
img=Image.open(src+"/"+file_name)
w, h =img.size
#img.show()
imax =max(h,w)if imax <280:
copyfile(src+"/"+file_name, tar_ID_dir+"/"+file_name)else:
rate_hw =280/imax
w,h =int(rate_hw*w),int(rate_hw*h)
new_img = img.resize((w, h),Image.BILINEAR)#new_img.show()
new_img.save(tar_ID_dir+"/"+file_name)print(tar_ID_dir+"/"+file_name)returnif __name__ =='__main__':
src_dir ="~/your_datasets/resize/train_800"
tar_dir ="~/your_datasets/train_box"if os.path.isdir(tar_dir):
rmtree(tar_dir)
os.mkdir(tar_dir)
gallary_dir ="your_datasets/resize/gallary"if os.path.isdir(gallary_dir):
rmtree(gallary_dir)
os.mkdir(gallary_dir)
make_fileprefix(src_dir, tar_dir, gallary_dir)
版权归原作者 王定邦 所有, 如有侵权,请联系我们删除。