Add camera parameters
This commit is contained in:
parent
42599ee465
commit
56f2aa7d5d
@ -118,16 +118,16 @@ def create_data(out_root, idx, n_samples, imsize, patterns, K, baseline, blend_i
|
||||
cam_x = cam_x_ + rng.uniform(-0.1,0.1)
|
||||
cam_y = cam_y_ + rng.uniform(-0.1,0.1)
|
||||
cam_z = cam_z_ + rng.uniform(-0.1,0.1)
|
||||
|
||||
|
||||
tcam = np.array([cam_x, cam_y, cam_z], dtype=np.float32)
|
||||
|
||||
if np.linalg.norm(tcam[0:2])<1e-9:
|
||||
Rcam = np.eye(3, dtype=np.float32)
|
||||
Rcam = np.eye(3, dtype=np.float32)
|
||||
else:
|
||||
Rcam = get_rotation_matrix(center, center-tcam)
|
||||
|
||||
tproj = tcam + basevec
|
||||
Rproj = Rcam
|
||||
tproj = tcam + basevec
|
||||
Rproj = Rcam
|
||||
|
||||
ret['R'].append(Rcam)
|
||||
ret['t'].append(tcam)
|
||||
@ -167,7 +167,7 @@ def create_data(out_root, idx, n_samples, imsize, patterns, K, baseline, blend_i
|
||||
ambient = pyrenderer.normal().copy()
|
||||
ambient = np.mean(ambient, axis=2)
|
||||
|
||||
# get the noise free IR image $J$
|
||||
# get the noise free IR image $J$
|
||||
im = blend_im_rnd * im + (1 - blend_im_rnd) * ambient
|
||||
ret[f'ambient{s}'].append( ambient[None].astype(np.float32) )
|
||||
|
||||
@ -207,13 +207,13 @@ def create_data(out_root, idx, n_samples, imsize, patterns, K, baseline, blend_i
|
||||
if __name__=='__main__':
|
||||
|
||||
np.random.seed(42)
|
||||
|
||||
|
||||
# output directory
|
||||
with open('../config.json') as fp:
|
||||
config = json.load(fp)
|
||||
data_root = Path(config['DATA_ROOT'])
|
||||
shapenet_root = config['SHAPENET_ROOT']
|
||||
|
||||
|
||||
data_type = 'syn'
|
||||
out_root = data_root / f'{data_type}'
|
||||
out_root.mkdir(parents=True, exist_ok=True)
|
||||
@ -228,27 +228,28 @@ if __name__=='__main__':
|
||||
except:
|
||||
pass
|
||||
|
||||
# load shapenet models
|
||||
# load shapenet models
|
||||
obj_classes = ['chair']
|
||||
objs = get_objs(shapenet_root, obj_classes)
|
||||
|
||||
|
||||
# camera parameters
|
||||
imsize = (480, 640)
|
||||
imsize = (488, 648)
|
||||
imsizes = [(imsize[0]//(2**s), imsize[1]//(2**s)) for s in range(4)]
|
||||
K = np.array([[567.6, 0, 324.7], [0, 570.2, 250.1], [0 ,0, 1]], dtype=np.float32)
|
||||
# K = np.array([[567.6, 0, 324.7], [0, 570.2, 250.1], [0 ,0, 1]], dtype=np.float32)
|
||||
K = np.array([[1929.5936336276382, 0, 113.66561071478046], [0, 1911.2517985448746, 473.70108079885887], [0 ,0, 1]], dtype=np.float32)
|
||||
focal_lengths = [K[0,0]/(2**s) for s in range(4)]
|
||||
baseline=0.075
|
||||
blend_im = 0.6
|
||||
noise = 0
|
||||
|
||||
|
||||
# capture the same static scene from different view points as a track
|
||||
track_length = 4
|
||||
|
||||
|
||||
# load pattern image
|
||||
pattern_path = './kinect_pattern.png'
|
||||
pattern_crop = True
|
||||
patterns = get_patterns(pattern_path, imsizes, pattern_crop)
|
||||
|
||||
|
||||
# write settings to file
|
||||
settings = {
|
||||
'imsizes': imsizes,
|
||||
@ -261,7 +262,7 @@ if __name__=='__main__':
|
||||
print(f'write settings to {out_path}')
|
||||
with open(str(out_path), 'wb') as f:
|
||||
pickle.dump(settings, f, pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
|
||||
# start the job
|
||||
n_samples = 2**10 + 2**13
|
||||
for idx in range(start, n_samples):
|
||||
|
Loading…
Reference in New Issue
Block a user