专注合肥网站推广,网站怎样添加友情链接,阿玛尼手表官方网站查询正品,网站建设网银背景#xff1a; 最近在研究BundleFusion#xff0c;跑通官方数据集后#xff0c;就想着制作自己的数据集来运行bundlefusion#xff0e;KinectV2相机可直接获取的图像的分辨率分为三个HD 1920x1080, QHD: 960X540#xff0c;SD: 512x424.我选择是中间的分辨率qhd.
录制…背景 最近在研究BundleFusion跑通官方数据集后就想着制作自己的数据集来运行bundlefusionKinectV2相机可直接获取的图像的分辨率分为三个HD 1920x1080, QHD: 960X540SD: 512x424.我选择是中间的分辨率qhd.
录制了一段时间我去查看彩色图和深度图发现两者的帧数不相同我就很纳闷了还想着用什么方法对两个进行同步处理但是我去查看了一下TUM数据集发现数据集里面的深度图和彩色图的帧数也不相同我恍然大悟在orb-slam2运行tum数据集时需要使用TUM数据集中的associate.py脚本按照时间戳的远近将深度图和彩色图匹配起来得到associate.txt文件这个里面是彩色图时间戳以时间戳命名的图像的名字以及对应这个彩色图的深度图的时间戳和以这个时间戳命名的图像的名字这样在运行代码的时候就按照这个匹配结果去文件夹下读取相应的图像
可以在我的git上下载完整代码然后放到你的catkin_ws/src下然后catkin_make进行编译
https://github.com/Serena2018/save_color_depth_from_kinect2_with_ros
当然你需要提前配置好libfreenect2和iai-kinect2,然后运行roslaunch kinect2_bridge kinect2_bridge.launch
然后使用 rostopic list 查看当前系统上发布出来的话题
这样运行上面下载的代码就可以订阅发布出来的彩色图像和深度图像的数据 然后我使用了TUM数据集提供的associate.py工具在保存下来的图像中找到对应得到associate.txt文件然后直接读取这个文件去文件目录下找相应的图像并按照bundlefusion需要的图像的命名格式对图像进行重命名将重命名后的图像保存到新路径下
## in another script we have find the association of color and depth and now
## in this script we will pick the correspondents of color and depth images
## according their timestamps then rename them according the standards of
## bundlefusion dataset and then store them in separate folders.## step one: read the assoc.txt into this project.import sys
import os
import shutildef read_file_list(filename):Reads a trajectory from a text file.File format:The file format is stamp d1 d2 d3 ..., where stamp denotes the time stamp (to be matched)and d1 d2 d3.. is arbitary data (e.g., a 3D position and 3D orientation) associated to this timestamp.Input:filename -- File nameOutput:dict -- dictionary of (stamp,data) tuplesfile open(filename)data file.read()lines data.replace(,, ).replace(\t, ).split(\n)#strip() function can be used to remove space located in the head or the tail places of a string# v.strip(0)remove 0# list [[v.strip() for v in line.split( ) if v.strip() ! ] for line in lines if# len(line) 0 and line[0] ! #]# list [(float(l[0]), l[1:]) for l in list if len(l) 1]list []listResult []for line in lines:tmpList []if len(line) 0 and line[0] ! #:for v in line.split( ):if v.strip() ! :tmpList.append(v.strip())list.append(tmpList)for l in list:if len(l) 1:listResult.append((float(l[0]), l[1:]))return dict(listResult)def associate(first_list, second_list, offset, max_difference):Associate two dictionaries of (stamp,data). As the time stamps never match exactly, we aimto find the closest match for every input tuple.Input:first_list -- first dictionary of (stamp,data) tuplessecond_list -- second dictionary of (stamp,data) tuplesoffset -- time offset between both dictionaries (e.g., to model the delay between the sensors)max_difference -- search radius for candidate generationOutput:matches -- list of matched tuples ((stamp1,data1),(stamp2,data2))## obatin all keysfirst_keys list(first_list)second_keys list(second_list)potential_matches [(abs(a - (b offset)), a, b)for a in first_keysfor b in second_keysif abs(a - (b offset)) max_difference]potential_matches.sort()matches []for diff, a, b in potential_matches:if a in first_keys and b in second_keys:first_keys.remove(a)second_keys.remove(b)matches.append((a, b))matches.sort()return matchesdef read_data_from_file(file_path):associ_file open(file_path)data associ_file.read()lines data.replace(,, ).replace(\t, ).split(\n)print(fread {len(lines)} from {file_path}.)# come here we have obtain every line of this associ.txt and in each line we can index arbitrary part of it# we mainly work on the first and the third part and then to find the correspondent color and depth image,# and the rename them and store them.list [[v.strip() for v in line.split( ) if v.strip() ! ] for line in lines if len(line) 0 and line[0] ! #]return listdef load_images_rename_store(file_data, image_folder, output_folder):print(fThe length of file is {len(file_data)}.)print(fthe path of image_folder is {image_folder}.)print(fthe path of output_folder is {output_folder}.)rgb_path image_folder /rgb/depth_path image_folder /depth/print(**********************)rgbs os.listdir(rgb_path)# print(len(rgbs))# for rgb in sorted(rgbs):# print(rgb)print(**********************)depths os.listdir(depth_path)# print(len(depths))# for depth in sorted(depths):# print(depth)couples []print(............................)for data in file_data:couple_list []##I must search the frame name in the folder by the exactly name from the filergb_name data[1].split(/)[1]depth_name data[3].split(/)[1]couple_list.append(rgb_name)couple_list.append(depth_name)couples.append(couple_list)print(flength of couples is {len(couples)}.)# for couple in sorted(couples):# print(couple)frame_id 0for couple in sorted(couples):if couple[0] in sorted(rgbs) and couple[1] in sorted(depths):print(f\nframe_id is {frame_id}.\n)print(rgb_path / couple[0])print(output_folder /rgb/ frame- str(frame_id).zfill(6) .color.png)print(\n)print(depth_path / couple[1])print(output_folder /depth/ frame- str(frame_id).zfill(6) .depth.png)shutil.copyfile(rgb_path / couple[0], output_folder /rgb/ frame- str(frame_id).zfill(6) .color.png)shutil.copyfile(depth_path / couple[1], output_folder /depth/ frame- str(frame_id).zfill(6) .depth.png)frame_id frame_id 1## argvs: file_path, images_path, output_path
if __name__ __main__:if len(sys.argv) 8:path_rgb_file sys.argv[1]path_depth_file sys.argv[2]associate_file_name sys.argv[3]offset sys.argv[4]max_diff sys.argv[5]image_path sys.argv[6]output_path sys.argv[7]first_list read_file_list(path_rgb_file)second_list read_file_list(path_depth_file)matches associate(first_list, second_list, float(offset), float(max_diff))filename image_path associate_file_name# when you use with open you dont need to use close.with open(filename, w) as fp:for a, b in matches:fp.write(%f %s %f %s % (a, .join(first_list[a]), b - float(offset), .join(second_list[b])) \n)# fp open(filename, w)# for a, b in matches:# fp.write(# %f %s %f %s % (a, .join(first_list[a]), b - float(offset), .join(second_list[b])) \n)# print(%f %s %f %s % (a, .join(first_list[a]), b - float(offset), .join(second_list[b])))# fp.close()file_path image_path associate_file_namelist read_data_from_file(file_path)load_images_rename_store(list, image_path, output_path)else:print(usage: path_rgb.txt path_depth.txt offset(0 default) max_diff(0.02 default), images_path, output_path)