前言
对于PP参考算法,导出的模型默认有预处理,如果您需要导出不含预处理的PP模型,可以按照以下步骤修改配置。这里以J5 OE 1.1.62为例介绍修改流程,不同版本可能会有微小区别。
Config
horizon_model_train_sample/scripts/configs/detection/pointpillars/pointpillars_kitti_car.py
-
注释deploy_model中的pre_process部分:
# model settings
deploy_model = dict(
type="PointPillarsDetector",
feature_map_shape=get_feature_map_size(pc_range, voxel_size),
is_deploy=True,
# pre_process=dict(
# type="PointPillarsPreProcess",
# pc_range=pc_range,
# voxel_size=voxel_size,
# max_voxels_num=max_voxels_num,
# max_points_in_voxel=max_points_in_voxel,
# ),
reader=dict(
将deploy_inputs修改为PFN输入:
deploy_inputs = dict(
voxels=torch.randn((1, 4, 12000, 100), dtype=torch.float32),
coordinates=torch.zeros([12000, 4]).int(),
)
structure
/usr/local/lib/python3.8/dist-packages/hat/models/structures/detectors/pointpillar.py
-
保留features和coors:
data = dict( # noqa C408
features=example["voxels"],
#num_points_in_voxel=example["num_points"],
coors=example["coordinates"],
batch_size=1,
input_shape=self.feature_map_shape,
)
input_features = self.reader(
features=data["features"],
#num_voxels=data["num_points_in_voxel"],
coors=data["coors"],
horizon_preprocess=self.use_horizon_preprocess,
)
PFNLayer
/usr/local/lib/python3.8/dist-packages/hat/models/task_modules/lidar/pillar_encoder.py
-
做出以下修改:
class PillarFeatureNet(nn.Module):
...
def forward(
self,
features: torch.Tensor,
# num_voxels: torch.Tensor,
coors: torch.Tensor,
horizon_preprocess: bool = False,
):
if horizon_preprocess:
# used horizon preprocess(which support quantize),
# skip default preprocess here.
features = self._extract_feature(features)
else:
# default preprocess
# assert num_voxels is not None, "`num_voxels` can not be None."
# features = self._extend_dim(features, num_voxels, coors)
features = self._extract_feature(features)
return features
compile
运行以下命令编译不带预处理的PP模型:
python3 tools/compile_perf.py --config configs/detection/pointpillars/pointpillars_kitti_car.py --out-dir ./ --opt 3