ILSVRC2015_VID数据集说明_咸咸咸虾的博客-CSDN博客_ilsvrc2015

由于我下载的数据集中Data与Annotations中的数据不对等,也就是说数据集中的数据标签缺失,如data的train中有ILSVRC2015_train_00001006这个数据,而Annotations的train中没有ILSVRC2015_train_00001006这个xml标签,这就需要对官网提供的train_seg.npy和val_seg.npy重新构造。

本代码也可以用来构造自己的数据集形成.npy文件放入yolov模型中训练,但自建数据集应与ILSVRC2015数据集结构相同。

 先读取.npy文件看一下它里面的结构,他其实就是一个个list,每一个lis代表一个视频文件,里面是一张张图片也就是帧。

import numpy as np

file = np.load(r'D:\Aware_model\YOLOV-master\data\val_seq.npy',allow_pickle=True)

print(file)

np.savetxt(r'D:\Aware_model\YOLOV-master\data\val_seq.txt',file,delimiter=',' ,fmt = '%s')

 知道.npy文件里面是什么格式后我们就可以根据现有数据构建自己的数据集配置文件也就是.npy文件。先看Annotations文件中那些视频有标注文件,形成视频文件名list;然后计算data中的视频文件名,判断data的文件名Annotations是否在,存在的话就说明他有标注。

import os

import numpy as np

# 获取标签判断数据是否存在,有的data存在,可是没有标签

list_train=[]

list_val=[]

path1 = r"D:\Aware_model\ILSVRC2015\Annotations\VID\train\ILSVRC2015_VID_train_0000"

path2 = r"D:\Aware_model\ILSVRC2015\Annotations\VID\train\ILSVRC2015_VID_train_0001"

path3 = r"D:\Aware_model\ILSVRC2015\Annotations\VID\train\ILSVRC2015_VID_train_0002"

path4 = r"D:\Aware_model\ILSVRC2015\Annotations\VID\train\ILSVRC2015_VID_train_0003"

path_val = r'D:\Aware_model\ILSVRC2015\Annotations\VID\val'

f1 = os.listdir(path1) # 获取ILSVRC2015_VID_train_0000下所有文件夹的名字

f2 = os.listdir(path2)

f3 = os.listdir(path3)

f4 = os.listdir(path4)

f_val = os.listdir(path_val)

list_train.append(f1)

list_train.append(f2)

list_train.append(f3)

list_train.append(f4)

list_val.append(f_val)

list_train = [item for list in list_train for item in list] #所有train文件下的文件名trian ['ILSVRC2015_train_00084001', 'ILSVRC2015_train_00085000', 'ILSVRC2015_train_00085001',。。。】

list_val = [item for list in list_val for item in list]

print('trian',list_train)

print('val',list_val)

# def list_txt(path, list=None):

# '''

#

# :param path: 储存list的位置

# :param list: list数据

# :return: None/relist 当仅有path参数输入时为读取模式将txt读取为list

# 当path参数和list都有输入时为保存模式将list保存为txt

# '''

# if list != None:

# file = open(path, 'a')

# file.write(str(list))

# file.close()

# return None

# else:

# file = open(path, 'r')

# rdlist = eval(file.read())

# file.close()

# return rdlist

#获取文件数据的路径形成npy文件

def get_path(id,path,sava_path):

paths2=[]

if id=='val': #path:'D:\Aware_model\ILSVRC2015\Data\VID\val'

for dirname, _, filenames in os.walk(path): # dirname:D:\Aware_model\ILSVRC2015\Data\VID\val\ILSVRC2015_val_00000000

# print(dirname," ......" ,filenames) # filenames:['000000.JPEG', '000001.JPEG', '000002.JPEG', '000003.JPEG', '000004.JPEG'...]

name = dirname.split('\\')[-1] #ILSVRC2015_val_00000000

# print(name)

# 由于我下载的文件中JPG与xml不对应 比如图像的train中有ILSVRC2015_train_00084001 而xml的train的文件中没有

# 也就是数据与标签不对应,因此将标签作为一个list判断数据在不在标签的list中

if name in list_val: #如果在的话

# print(name)

paths = []

for filename in filenames:

paths.append(os.path.join(dirname, filename).replace('\\','/').replace('D:/Aware_model/','')) #图像的路径

# print(paths)

if len(paths) >0:

paths2.append(paths)

np.save(sava_path, paths2, allow_pickle=True, fix_imports=True)

else:

for dirname, _, filenames in os.walk(path):

name = dirname.split('\\')[-1]

# print(name,list_train)

if name in list_train:

print(name)

paths = []

for filename in filenames:

paths.append(os.path.join(dirname, filename).replace('\\', '/').replace('D:/Aware_model/', ''))

# print(paths)

if len(paths) > 0:

paths2.append(paths)

np.save(sava_path, paths2, allow_pickle=True, fix_imports=True)

get_path('val',r'D:\Aware_model\ILSVRC2015\Data\VID\val',r'D:\Aware_model\YOLOV-master\data\val_seq.npy')

get_path('train',r'D:\Aware_model\ILSVRC2015\Data\VID\train',r'D:\Aware_model\YOLOV-master\data\train_seq.npy')

我在给大家简单介绍一下yolov怎么读取.npy文件的。以vid_train文件中的读取val_seg.npy为例,点击他会跳到vid.py文件中,主要的操作就是按照每一个视频读取,然后batch为一个长度,在每一个视频中截取,比如第一个视频有300个图片,按照10个一组,就是30x10,然后在所有视频组合在一起,比如有视频1000个,那就有30000x10组,然后对其打乱,你在选取前200个作为测试集。

 

def photo_to_sequence(self,dataset_path,lframe,gframe):

'''

Args:

dataset_path: list,every element is a list contain all frames in a video dir

Returns:

split result

'''

res = []

dataset = np.load(dataset_path,allow_pickle=True).tolist()

for element in dataset:

# id=element[0].split('/')[-2].split('_')[-1]

ele_len = len(element)

if ele_len < lframe + gframe :

# TODO fix the unsolved part

continue

#TODO fix the unsolved part

else:

# else:

if self.mode == 'random': # 打乱图片按帧的长度获取

split_num = int(ele_len / (gframe))

random.shuffle(element)

for i in range(split_num):

res.append(element[i * gframe:(i + 1) * gframe])

elif self.mode == 'uniform':

split_num = int(ele_len / (gframe))

all_uniform_frame = element[:split_num * gframe]

for i in range(split_num):

res.append(all_uniform_frame[i::split_num])

elif self.mode == 'gl':

split_num = int(ele_len / (lframe))

all_local_frame = element[:split_num * lframe]

for i in range(split_num):

g_frame = random.sample(element[:i * lframe] + element[(i + 1) * lframe:], gframe)

res.append(all_local_frame[i * lframe:(i + 1) * lframe] + g_frame)

else:

print('unsupport mode, exit')

exit(0)

# test = []

# for ele in res:

# test.extend(ele)

# random.shuffle(test)

# i = 0

# for ele in res:

# for j in range(gframe):

# ele[j] = test[i]

# i += 1

if self.val:

random.seed(42)

random.shuffle(res)

if self.tnum == -1:

return res

else:

return res[:200]#[1000:1250]#[2852:2865] :self.tnums

else:

random.shuffle(res)

return res[:3000] #15000

最后介绍一下,他怎么将图片和xml文件对应的,简单来说,比如你图片的路径为ILSVRC2015/Data/VID/val/ILSVRC2015_val_00000000/000000.JPEG,他会将Data、JPEG、改成Annotations和xml这样就是对应的标签了。代码也是在vid.py中,他的跳转是运行vid_train.py文件中的self.prefetcher = DataPrefetcher(self.train_loader) #迭代器 获取picture对应的label   这个时会获取图片的label。

def get_annotation(self,path,test_size):

path = path.replace("Data","Annotations").replace("JPEG","xml")

if os.path.isdir(path): # 判断某一路径是否为目录

files = get_xml_list(path)

else:

files = [path]

files.sort()

anno_res = []

for xmls in files:

# if os.path.exists(path) is False:

# continue

photoname = xmls.replace("Annotations","Data").replace("xml","JPEG")

file = minidom.parse(xmls)

root = file.documentElement

objs = root.getElementsByTagName("object")

width = int(root.getElementsByTagName('width')[0].firstChild.data)

height = int(root.getElementsByTagName('height')[0].firstChild.data)

tempnode = []

for obj in objs:

nameNode = obj.getElementsByTagName("name")[0].firstChild.data

xmax = int(obj.getElementsByTagName("xmax")[0].firstChild.data)

xmin = int(obj.getElementsByTagName("xmin")[0].firstChild.data)

ymax = int(obj.getElementsByTagName("ymax")[0].firstChild.data)

ymin = int(obj.getElementsByTagName("ymin")[0].firstChild.data)

x1 = np.max((0,xmin))

y1 = np.max((0,ymin))

x2 = np.min((width,xmax))

y2 = np.min((height,ymax))

if x2 >= x1 and y2 >= y1:

#tempnode.append((name_num[nameNode],x1,y1,x2,y2,))

tempnode.append(( x1, y1, x2, y2,name_num[nameNode],))

num_objs = len(tempnode)

res = np.zeros((num_objs, 5))

r = min(test_size[0] / height, test_size[1] / width)

for ix, obj in enumerate(tempnode):

res[ix, 0:5] = obj[0:5]

res[:, :-1] *= r

anno_res.append(res)

return anno_res

def pull_item(self,path):

"""

One image / label pair for the given index is picked up and pre-processed.

Args:

index (int): data index

Returns:

img (numpy.ndarray): pre-processed image

padded_labels (torch.Tensor): pre-processed label data.

The shape is :math:`[max_labels, 5]`.

each label consists of [class, xc, yc, w, h]:

class (float): class index.

xc, yc (float) : center of bbox whose values range from 0 to 1.

w, h (float) : size of bbox whose values range from 0 to 1.

info_img : tuple of h, w.

h, w (int): original shape of the image

img_id (int): same as the input index. Used for evaluation.

"""

path = os.path.join(self.dataset_pth,path)

annos = self.get_annotation(path, self.img_size)[0] # 获取图像的标签 (左上角 右下角 类别) 1x5

img = cv2.imread(path)

height, width = img.shape[:2]

img_info = (height, width)

r = min(self.img_size[0] / img.shape[0], self.img_size[1] / img.shape[1])

img = cv2.resize( # 图像大小调整

img,

(int(img.shape[1] * r), int(img.shape[0] * r)),

interpolation=cv2.INTER_LINEAR,

).astype(np.uint8)

return img, annos, img_info, path

def __getitem__(self, path):

img, target, img_info, path = self.pull_item(path)

if self.preproc is not None:

img, target = self.preproc(img, target, self.input_dim) #变成中心点长宽

return img, target, img_info,path

yolov注意力机制代码部分的讲解:

YOLOV :基于YOLOX,使静态图像对象检测器在视频对象检测方面表现出色,注意力机制的魅力_一勺汤的博客-CSDN博客

文章链接

评论可见,请评论后查看内容,谢谢!!!
 您阅读本篇文章共花了: