Skip to content
This repository was archived by the owner on Nov 17, 2023. It is now read-only.

Commit eaa0a3b

Browse files
committed
Clang-format-10: bad format style: skip comment was added
1 parent 16667e3 commit eaa0a3b

File tree

14 files changed

+39
-3
lines changed

14 files changed

+39
-3
lines changed

src/engine/threaded_engine_pooled.cc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -152,10 +152,12 @@ class ThreadedEnginePooled : public ThreadedEngine {
152152
LOG(FATAL) << "Please compile with CUDA enabled";
153153
#endif // MXNET_USE_CUDA
154154
}
155+
// clang-format off
155156
bool is_copy = (opr_block->opr->prop == FnProperty::kCopyFromGPU ||
156157
opr_block->opr->prop == FnProperty::kCopyToGPU);
157158
auto&& rctx = is_copy ? streams_->GetIORunContext(opr_block->ctx) :
158159
streams_->GetRunContext(opr_block->ctx);
160+
// clang-format on
159161
#if MXNET_USE_CUDA
160162
CallbackOnStart on_start;
161163
CallbackOnComplete callback;

src/kvstore/kvstore_dist.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -507,10 +507,12 @@ class KVStoreDist : public KVStoreLocal {
507507
size_t size = recv_buf.shape().Size();
508508
const int dtype = recv_buf.dtype();
509509
const int num_bytes = mshadow::mshadow_sizeof(dtype);
510+
// clang-format off
510511
PSKV& pskv = (gradient_compression_->get_type() == CompressionType::kNone) ?
511512
EncodeDefaultKey(key, size, num_bytes) :
512513
EncodeCompressedKey(key, size, false, num_bytes);
513-
char* data = static_cast<char*>(recv_buf.data().dptr_);
514+
// clang-format on
515+
char* data = static_cast<char*>(recv_buf.data().dptr_);
514516
// false means not to delete data when SArray is deleted
515517
auto vals = new ps::SArray<char>(data, size * num_bytes, false);
516518
// issue pull

src/operator/contrib/bilinear_resize-inl.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -270,12 +270,14 @@ static bool BilinearSampleOpInferShape(const nnvm::NodeAttrs& attrs,
270270
break;
271271
}
272272
case bilinear_resize::odd_scale: {
273+
// clang-format off
273274
new_height = ((dshape[2] % 2) == 0) ?
274275
(int16_t)(dshape[2] * param.scale_height.value()) :
275276
(int16_t)((dshape[2] - 1) * param.scale_height.value()) + 1;
276277
new_width = ((dshape[3] % 2) == 0) ?
277278
(int16_t)(dshape[3] * param.scale_width.value()) :
278279
(int16_t)((dshape[3] - 1) * param.scale_width.value()) + 1;
280+
// clang-format on
279281
break;
280282
}
281283
case bilinear_resize::like: {

src/operator/contrib/bounding_box.cu

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -489,9 +489,11 @@ __launch_bounds__(NMS<DType>::THRESHOLD) __global__
489489
#pragma unroll
490490
for (int i = 0; i < n_threads / warp_size; ++i) {
491491
uint32_t my_mask = my_next_mask;
492+
// clang-format off
492493
my_next_mask = (((i + 1) < n_threads / warp_size) && (my_element_in_batch < topk)) ?
493494
nms_results[(i + 1) * topk * num_batches + my_element] :
494495
full_mask;
496+
// clang-format on
495497
if (my_warp == i && !__all_sync(full_mask, my_mask == full_mask)) {
496498
my_mask = my_mask | earlier_threads_mask;
497499
// Loop over warp_size - 1 because the last

src/operator/contrib/multi_lamb.cc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,10 @@ struct MultiLAMBKernelStep1 {
4444
using namespace mshadow_op;
4545
for (size_t index = 0; index < kernel_params.ntensors; ++index) {
4646
if ((size_t)i < kernel_params.sizes[index]) {
47+
// clang-format off
4748
MPDType w = has_mixed_precision ? kernel_params.weights32[index][i] :
4849
MPDType(kernel_params.weights[index][i]);
50+
// clang-format on
4951
MPDType scaled_grad = static_cast<MPDType>(kernel_params.grads[index][i]) * rescale_grad;
5052
if (clip_gradient >= 0.0f)
5153
scaled_grad = mshadow_op::clip::Map(scaled_grad, static_cast<MPDType>(clip_gradient));

src/operator/contrib/multi_lans.cc

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,10 @@ struct MultiLANSKernelStep1 {
4545
using namespace mshadow_op;
4646
for (size_t index = 0; index < kernel_params.ntensors; ++index) {
4747
if ((size_t)i < kernel_params.sizes[index]) {
48+
// clang-format off
4849
MPDType w = has_mixed_precision ? kernel_params.weights32[index][i] :
4950
MPDType(kernel_params.weights[index][i]);
51+
// clang-format on
5052
float g_norm = sqrt(g_sq_norm[index]);
5153
MPDType scaled_grad = static_cast<MPDType>(kernel_params.grads[index][i]) * rescale_grad;
5254
scaled_grad /= g_norm;
@@ -95,8 +97,10 @@ struct MultiLANSKernelStep2 {
9597
const OpReqType req) {
9698
for (size_t index = 0; index < kernel_params.ntensors; ++index) {
9799
if ((size_t)i < kernel_params.sizes[index]) {
100+
// clang-format off
98101
MPDType w = has_mixed_precision ? kernel_params.weights32[index][i] :
99102
MPDType(kernel_params.weights[index][i]);
103+
// clang-format on
100104
float r1 = sqrt(sum_sq_weigths[index]);
101105
float r2_m = sqrt(sum_sq_temp_m[index]);
102106
float r2_g = sqrt(sum_sq_temp_g[index]);

src/operator/nn/batch_norm.cu

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -346,11 +346,13 @@ __global__ void BatchNormalizationUpdateOutputKernel(DeviceTensor input,
346346
}
347347

348348
// Write normalized and update the output
349+
// clang-format off
349350
const AccReal gamma = ((flags & FIX_GAMMA_FLAG) == 0 && weight.numElements() > 0) ?
350351
ScalarConvert<DType, AccReal>::to(weight[plane]) :
351352
ScalarConvert<int, AccReal>::to(1);
352353
const AccReal beta = bias.numElements() > 0 ? ScalarConvert<DType, AccReal>::to(bias[plane]) :
353354
ScalarConvert<int, AccReal>::to(0);
355+
// clang-format on
354356
for (int batch = 0, nbatch = input.OuterSize(); batch < nbatch; ++batch) {
355357
for (int x = threadIdx.x, nx = input.InnerSize(); x < nx; x += blockDim.x) {
356358
const DType inp = input.get_ref(batch, plane, x);

src/operator/nn/dnnl/dnnl_base.cc

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -238,6 +238,7 @@ const dnnl::memory* GetWeights(const NDArray& arr, int num_groups) {
238238
H = 3;
239239
W = 4;
240240
}
241+
// clang-format off
241242
if (ndim == 2) {
242243
tz = dnnl::memory::dims{arr.shape()[O], arr.shape()[I]};
243244
format_tag = dnnl::memory::format_tag::oi;
@@ -271,6 +272,7 @@ const dnnl::memory* GetWeights(const NDArray& arr, int num_groups) {
271272
} else {
272273
LOG(FATAL) << "The weight array has an unsupported number of dimensions";
273274
}
275+
// clang-format on
274276
const auto md = dnnl::memory::desc{tz, type, format_tag};
275277
return arr.GetDNNLData(md);
276278
}

src/operator/nn/dnnl/dnnl_rnn.cc

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -196,6 +196,7 @@ RnnPrimitive GetRnnFwdPrim(const DNNLRnnLayerParam& layer_param,
196196
auto src_state_desc = memory::desc(layer_param.state_dims, data_type, tag::ldnc);
197197
auto src_cell_desc = memory::desc(layer_param.cell_dims, data_type, tag::ldnc);
198198
auto weight_peep_desc = memory::desc();
199+
// clang-format off
199200
auto weight_proj_desc = layer_param.proj_size > 0 ?
200201
memory::desc(layer_param.weight_proj_dims, weight_type, tag::any) :
201202
memory::desc();
@@ -205,6 +206,7 @@ RnnPrimitive GetRnnFwdPrim(const DNNLRnnLayerParam& layer_param,
205206
auto dst_cell_desc = layer_param.state_outputs ?
206207
memory::desc(layer_param.cell_dims, data_type, tag::ldnc) :
207208
memory::desc();
209+
// clang-format on
208210

209211
auto fwd = RnnPrimitive();
210212
switch (mode) {
@@ -265,19 +267,23 @@ RnnBwdPrimitive GetRnnBwdPrim(const DNNLRnnForwardTraining& fwd,
265267
memory::data_type data_type = get_dnnl_type(data.dtype());
266268
memory::data_type weight_type = get_dnnl_type(params.dtype());
267269
const prop_kind prop = prop_kind::backward;
270+
// clang-format off
268271
rnn_direction dnnl_rnn_direction = layer_param.bidirectional ?
269272
rnn_direction::bidirectional_concat :
270273
rnn_direction::unidirectional;
274+
// clang-format on
271275

272276
auto src_layer_desc = memory::desc(layer_param.src_dims, data_type, tag::tnc);
273277
auto weight_layer_desc = memory::desc(layer_param.weight_layer_dims, weight_type, tag::any);
274278
auto weight_iter_desc = memory::desc(layer_param.weight_iter_dims, weight_type, tag::any);
275279
auto bias_desc = memory::desc(layer_param.bias_dims, data_type, tag::ldgo);
276280
auto dst_layer_desc = memory::desc(layer_param.dst_dims, data_type, tag::tnc);
277281
auto src_state_desc = memory::desc(layer_param.state_dims, data_type, tag::ldnc);
282+
// clang-format off
278283
auto dst_state_desc = layer_param.state_outputs ?
279284
memory::desc(layer_param.state_dims, data_type, tag::ldnc) :
280285
memory::desc();
286+
// clang-format on
281287

282288
const void* fwd_pd = fwd.GetPrimDesc();
283289
auto bwd = RnnBwdPrimitive();
@@ -1126,9 +1132,11 @@ void DNNLRnnOp::Forward(const OpContext& ctx,
11261132
const int seq_length = default_param.seq_length_;
11271133
const int batch_size = default_param.batch_size_;
11281134
const int state_size = default_param.state_size;
1135+
// clang-format off
11291136
const int iter_size = default_param.projection_size.has_value() ?
11301137
default_param.projection_size.value() :
11311138
default_param.state_size;
1139+
// clang-format on
11321140
const int directions = default_param.bidirectional ? 2 : 1;
11331141
dnnl::memory::desc dst_desc({seq_length, batch_size, directions * iter_size},
11341142
get_dnnl_type(data_dtype),

src/operator/nn/softmax-inl.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -851,10 +851,12 @@ __global__ void masked_softmax_grad_kernel(OType* out,
851851

852852
DType final_result;
853853
for (index_t i = x; i < M; i += x_size) {
854+
// clang-format off
854855
bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i * sa_mask];
855856
final_result = negate ? -OP2::Map(ograd[base + i * sa], out[base + i * sa], ssum) :
856857
OP2::Map(ograd[base + i * sa], out[base + i * sa], ssum);
857858
final_result = mask_value ? final_result / static_cast<DType>(temperature) : DType(0.0f);
859+
// clang-format on
858860
KERNEL_ASSIGN(igrad[base + i * sa], Req, final_result);
859861
}
860862
}

0 commit comments

Comments
 (0)