Remove useless stuf
This commit is contained in:
parent
3f947c9dd2
commit
28ee9003a6
@ -106,57 +106,6 @@ class Worker(torchext.Worker):
|
||||
out = net(self.data['im0'])
|
||||
return out
|
||||
|
||||
@staticmethod
|
||||
def find_corr_points_and_F(left, right):
|
||||
sift = cv2.SIFT_create()
|
||||
# find the keypoints and descriptors with SIFT
|
||||
kp1, des1 = sift.detectAndCompute(cv2.normalize(left, None, 0, 255, cv2.NORM_MINMAX).astype('uint8'), None)
|
||||
kp2, des2 = sift.detectAndCompute(cv2.normalize(right, None, 0, 255, cv2.NORM_MINMAX).astype('uint8'), None)
|
||||
# FLANN parameters
|
||||
FLANN_INDEX_KDTREE = 1
|
||||
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
|
||||
search_params = dict(checks=50)
|
||||
flann = cv2.FlannBasedMatcher(index_params, search_params)
|
||||
matches = flann.knnMatch(des1, des2, k=2)
|
||||
pts1 = []
|
||||
pts2 = []
|
||||
# ratio test as per Lowe's paper
|
||||
for i, (m, n) in enumerate(matches):
|
||||
if m.distance < 0.8 * n.distance:
|
||||
pts2.append(kp2[m.trainIdx].pt)
|
||||
pts1.append(kp1[m.queryIdx].pt)
|
||||
|
||||
pts1 = np.int32(pts1)
|
||||
pts2 = np.int32(pts2)
|
||||
F, mask = cv2.findFundamentalMat(pts1, pts2, cv2.FM_LMEDS)
|
||||
# We select only inlier points
|
||||
pts1 = pts1[mask.ravel() == 1]
|
||||
pts2 = pts2[mask.ravel() == 1]
|
||||
return pts1, pts2, F
|
||||
|
||||
def calc_sgbm_gt(self):
|
||||
sgbm_matcher = cv2.StereoSGBM_create()
|
||||
disp_gt = []
|
||||
# cam_view = np.array(np.array_split(self.data['im0'].detach().to('cpu').numpy(), 4)[2:])
|
||||
# for i in range(self.data['im0'].shape[0]):
|
||||
for i in range(1):
|
||||
cam_view = self.data['im0'].detach().to('cpu').numpy()[i, 0]
|
||||
pattern = self.pattern_proj.to('cpu').numpy()[i, 0]
|
||||
pts_l, pts_r, F = self.find_corr_points_and_F(cam_view, pattern)
|
||||
H_l, _ = cv2.findHomography(pts_l, pts_r)
|
||||
H_r, _ = cv2.findHomography(pts_r, pts_l)
|
||||
|
||||
left_rect = cv2.warpPerspective(cam_view, H_l, cam_view.shape)
|
||||
right_rect = cv2.warpPerspective(pattern, H_r, pattern.shape)
|
||||
|
||||
transform = transforms.ToTensor()
|
||||
disparity_gt = transform(cv2.normalize(
|
||||
sgbm_matcher.compute(cv2.normalize(left_rect, None, 0, 255, cv2.NORM_MINMAX).astype('uint8'),
|
||||
cv2.normalize(right_rect, None, 0, 255, cv2.NORM_MINMAX).astype('uint8')), None,
|
||||
alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F).T)
|
||||
disp_gt.append(disparity_gt)
|
||||
return disp_gt
|
||||
|
||||
def loss_forward(self, out, train):
|
||||
out, edge = out
|
||||
if not (isinstance(out, tuple) or isinstance(out, list)):
|
||||
@ -181,24 +130,8 @@ class Worker(torchext.Worker):
|
||||
edge0 = 1 - torch.sigmoid(edge[0])
|
||||
val = 0
|
||||
if isinstance(out[0], tuple):
|
||||
# val = self.disparity_loss(out[0][1], edge0)
|
||||
# FIXME disparity_loss ist unsupervised, wir wollen supervised(?)
|
||||
# warum nicht einfach so die GT die wir eh schon haben?
|
||||
# gt = self.data[f'disp0'].type('torch.LongTensor')
|
||||
|
||||
# NOTE use supervised disparity loss
|
||||
val += self.sup_disp_loss(out[0][1], self.data['disp0'])
|
||||
# disp_gt = self.calc_sgbm_gt()
|
||||
# if len(disp_gt) > 1:
|
||||
# disparity_gt = torch.stack(disp_gt).to('cuda')
|
||||
# # val += self.sup_disp_loss(out[0][1], disparity_gt)
|
||||
# else:
|
||||
# disparity_gt = disp_gt[0].to('cuda')
|
||||
# val += self.sup_disp_loss(out[0][1][0], disparity_gt)
|
||||
# print(disparity_gt)
|
||||
# print(disparity_gt.shape)
|
||||
# print(out[0][1])
|
||||
# print(out[0][1].shape)
|
||||
if isinstance(out[0], tuple):
|
||||
val += self.disparity_loss(out[0][0], edge0)
|
||||
else:
|
||||
val += self.disparity_loss(out[0], edge0)
|
||||
|
Loading…
Reference in New Issue
Block a user