Traceback (most recent call last):
File "/root/.pycharm_helpers/pydev/pydevd.py", line 1483, in _exec
pydev_imports.execfile(file, globals, locals) # execute the script
File "/root/.pycharm_helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "/root/autodl-tmp/rxh/mmrotate/tools/train.py", line 192, in
main()
File "/root/autodl-tmp/rxh/mmrotate/tools/train.py", line 181, in main
train_detector(
File "/root/autodl-tmp/rxh/mmrotate/mmrotate/apis/train.py", line 140, in train_detector
runner.run(data_loaders, cfg.workflow)
File "/root/miniconda3/envs/mmrotate/lib/python3.8/site-packages/mmcv/runner/epoch_based_runner.py", line 136, in run
epoch_runner(data_loaders[i], **kwargs)
File "/root/autodl-tmp/rxh/mmrotate/mmrotate/utils/my_epoch_based_runner.py", line 23, in train
self.run_iter(data_batch, train_mode=True, **kwargs)
File "/root/miniconda3/envs/mmrotate/lib/python3.8/site-packages/mmcv/runner/epoch_based_runner.py", line 31, in run_iter
outputs = self.model.train_step(data_batch, self.optimizer,
python-BaseException
File "/root/miniconda3/envs/mmrotate/lib/python3.8/site-packages/mmcv/parallel/data_parallel.py", line 77, in train_step
return self.module.train_step(*inputs[0], **kwargs[0])
File "/root/miniconda3/envs/mmrotate/lib/python3.8/site-packages/mmdet/models/detectors/base.py", line 248, in train_step
losses = self(**data)
File "/root/miniconda3/envs/mmrotate/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "/root/miniconda3/envs/mmrotate/lib/python3.8/site-packages/mmcv/runner/fp16_utils.py", line 116, in new_func
return old_func(*args, **kwargs)
File "/root/miniconda3/envs/mmrotate/lib/python3.8/site-packages/mmdet/models/detectors/base.py", line 172, in forward
return self.forward_train(img, img_metas, **kwargs)
File "/root/autodl-tmp/rxh/mmrotate/mmrotate/models/detectors/single_stage.py", line 87, in forward_train
losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes,
File "/root/miniconda3/envs/mmrotate/lib/python3.8/site-packages/mmdet/models/dense_heads/base_dense_head.py", line 335, in forward_train
losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
File "/root/miniconda3/envs/mmrotate/lib/python3.8/site-packages/mmcv/runner/fp16_utils.py", line 205, in new_func
return old_func(*args, **kwargs)
File "/root/autodl-tmp/rxh/mmrotate/mmrotate/models/dense_heads/rotated_match_head.py", line 331, in loss
losses_cls, losses_bbox = multi_apply(
File "/root/miniconda3/envs/mmrotate/lib/python3.8/site-packages/mmdet/core/utils/misc.py", line 30, in multi_apply
return tuple(map(list, zip(*map_results)))
File "/root/autodl-tmp/rxh/mmrotate/mmrotate/models/dense_heads/rotated_match_head.py", line 249, in loss_single
mask_pos, mask_neg, bbox_indexes = match(anchors_xyxy, bboxes_xyxy, bbox_target, anchors[:, :4], self.iou_thresh, process=globalvar.get_value('process'), batch=batch)
File "/root/autodl-tmp/rxh/mmrotate/mmrotate/models/dense_heads/rotated_match_head.py", line 381, in match
sampled_bg_indices = np.random.choice(bg_indices.cpu(), size=num_bg)
File "mtrand.pyx", line 915, in numpy.random.mtrand.RandomState.choice
ValueError: 'a' cannot be empty unless no samples are taken
hard_indices = ((max_ious > 0.1) & (max_ious < iou_thresh[0])).nonzero().squeeze()
bg_indices = (max_ious < 1e-2).nonzero().squeeze()
sampled_hard_indices = np.random.choice(hard_indices.cpu(), size=num_hard)
sampled_bg_indices = np.random.choice(bg_indices.cpu(), size=num_bg)
在debug后发现bg_indices是空的,原因是max_ious没有小于1e-2的,
tensor([0.1603, 0.1990, 0.2419, 0.1708, 0.2114, 0.2560, 0.1604, 0.1991, 0.2420,
0.2273, 0.2722, 0.3195, 0.2264, 0.2711, 0.3182, 0.1992, 0.2778, 0.3442,
0.2928, 0.4537, 0.5659, 0.2928, 0.4075, 0.4993, 0.2671, 0.3646, 0.4870,
0.2928, 0.4537, 0.5900, 0.2928, 0.4222, 0.4993, 0.2671, 0.3646, 0.4870,
......
0.0886, 0.1406, 0.2088, 0.0732, 0.1130, 0.1426, 0.0998, 0.1542, 0.2280,
0.0447, 0.0686, 0.0991, 0.0367, 0.0592, 0.0886, 0.0512, 0.0761, 0.1076,
0.0230, 0.0414, 0.0669, 0.0239, 0.0424, 0.0680, 0.0169, 0.0344, 0.0591],
device='cuda:0')
这个1e-2是怎么取值的,我把这个值调大会不会和背景没什么关系了,还有hard_indices有时候也会出现这种情况,这种要怎么办呢
Traceback (most recent call last):
File "/root/.pycharm_helpers/pydev/pydevd.py", line 1483, in _exec
pydev_imports.execfile(file, globals, locals) # execute the script
File "/root/.pycharm_helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "/root/autodl-tmp/rxh/mmrotate/tools/train.py", line 192, in
main()
File "/root/autodl-tmp/rxh/mmrotate/tools/train.py", line 181, in main
train_detector(
File "/root/autodl-tmp/rxh/mmrotate/mmrotate/apis/train.py", line 140, in train_detector
runner.run(data_loaders, cfg.workflow)
File "/root/miniconda3/envs/mmrotate/lib/python3.8/site-packages/mmcv/runner/epoch_based_runner.py", line 136, in run
epoch_runner(data_loaders[i], **kwargs)
File "/root/autodl-tmp/rxh/mmrotate/mmrotate/utils/my_epoch_based_runner.py", line 23, in train
self.run_iter(data_batch, train_mode=True, **kwargs)
File "/root/miniconda3/envs/mmrotate/lib/python3.8/site-packages/mmcv/runner/epoch_based_runner.py", line 31, in run_iter
outputs = self.model.train_step(data_batch, self.optimizer,
python-BaseException
File "/root/miniconda3/envs/mmrotate/lib/python3.8/site-packages/mmcv/parallel/data_parallel.py", line 77, in train_step
return self.module.train_step(*inputs[0], **kwargs[0])
File "/root/miniconda3/envs/mmrotate/lib/python3.8/site-packages/mmdet/models/detectors/base.py", line 248, in train_step
losses = self(**data)
File "/root/miniconda3/envs/mmrotate/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "/root/miniconda3/envs/mmrotate/lib/python3.8/site-packages/mmcv/runner/fp16_utils.py", line 116, in new_func
return old_func(*args, **kwargs)
File "/root/miniconda3/envs/mmrotate/lib/python3.8/site-packages/mmdet/models/detectors/base.py", line 172, in forward
return self.forward_train(img, img_metas, **kwargs)
File "/root/autodl-tmp/rxh/mmrotate/mmrotate/models/detectors/single_stage.py", line 87, in forward_train
losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes,
File "/root/miniconda3/envs/mmrotate/lib/python3.8/site-packages/mmdet/models/dense_heads/base_dense_head.py", line 335, in forward_train
losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
File "/root/miniconda3/envs/mmrotate/lib/python3.8/site-packages/mmcv/runner/fp16_utils.py", line 205, in new_func
return old_func(*args, **kwargs)
File "/root/autodl-tmp/rxh/mmrotate/mmrotate/models/dense_heads/rotated_match_head.py", line 331, in loss
losses_cls, losses_bbox = multi_apply(
File "/root/miniconda3/envs/mmrotate/lib/python3.8/site-packages/mmdet/core/utils/misc.py", line 30, in multi_apply
return tuple(map(list, zip(*map_results)))
File "/root/autodl-tmp/rxh/mmrotate/mmrotate/models/dense_heads/rotated_match_head.py", line 249, in loss_single
mask_pos, mask_neg, bbox_indexes = match(anchors_xyxy, bboxes_xyxy, bbox_target, anchors[:, :4], self.iou_thresh, process=globalvar.get_value('process'), batch=batch)
File "/root/autodl-tmp/rxh/mmrotate/mmrotate/models/dense_heads/rotated_match_head.py", line 381, in match
sampled_bg_indices = np.random.choice(bg_indices.cpu(), size=num_bg)
File "mtrand.pyx", line 915, in numpy.random.mtrand.RandomState.choice
ValueError: 'a' cannot be empty unless no samples are taken
在debug后发现bg_indices是空的,原因是max_ious没有小于1e-2的,
tensor([0.1603, 0.1990, 0.2419, 0.1708, 0.2114, 0.2560, 0.1604, 0.1991, 0.2420,
0.2273, 0.2722, 0.3195, 0.2264, 0.2711, 0.3182, 0.1992, 0.2778, 0.3442,
0.2928, 0.4537, 0.5659, 0.2928, 0.4075, 0.4993, 0.2671, 0.3646, 0.4870,
0.2928, 0.4537, 0.5900, 0.2928, 0.4222, 0.4993, 0.2671, 0.3646, 0.4870,
......
0.0886, 0.1406, 0.2088, 0.0732, 0.1130, 0.1426, 0.0998, 0.1542, 0.2280,
0.0447, 0.0686, 0.0991, 0.0367, 0.0592, 0.0886, 0.0512, 0.0761, 0.1076,
0.0230, 0.0414, 0.0669, 0.0239, 0.0424, 0.0680, 0.0169, 0.0344, 0.0591],
device='cuda:0')
这个1e-2是怎么取值的,我把这个值调大会不会和背景没什么关系了,还有hard_indices有时候也会出现这种情况,这种要怎么办呢