Skip to content
This repository was archived by the owner on Nov 17, 2023. It is now read-only.

Commit af8723d

Browse files
committed
* Fix windows oversize by adding files
1 parent cb4a9f2 commit af8723d

File tree

8 files changed

+232
-146
lines changed

8 files changed

+232
-146
lines changed

benchmark/python/ffi/benchmark_ffi.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,9 @@ def prepare_workloads():
6262
OpArgMngr.add_workload("random.uniform", low=0, high=1, size=1)
6363
OpArgMngr.add_workload("where", pool['2x3'], pool['2x3'], pool['2x1'])
6464
OpArgMngr.add_workload("may_share_memory", pool['2x3'][:0], pool['2x3'][:1])
65+
OpArgMngr.add_workload("fmax", pool['2x2'], pool['2x2'])
66+
OpArgMngr.add_workload("fmin", pool['2x2'], pool['2x2'])
67+
OpArgMngr.add_workload("fmod", pool['2x2'], pool['2x2'])
6568

6669

6770
def benchmark_helper(f, *args, **kwargs):

python/mxnet/ndarray/numpy/_op.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1193,6 +1193,7 @@ def fmod(x1, x2, out=None, **kwargs):
11931193
_np.fmod(x1, x2, out=out)
11941194
return _api_internal.fmod(x1, x2, out)
11951195

1196+
11961197
@set_module('mxnet.ndarray.numpy')
11971198
def delete(arr, obj, axis=None):
11981199
"""
@@ -4404,6 +4405,7 @@ def fmax(x1, x2, out=None, **kwargs):
44044405
_np.fmax(x1, x2, out=out)
44054406
return _api_internal.fmax(x1, x2, out)
44064407

4408+
44074409
@set_module('mxnet.ndarray.numpy')
44084410
@wrap_np_binary_func
44094411
def minimum(x1, x2, out=None, **kwargs):

python/mxnet/symbol/numpy/_symbol.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4148,7 +4148,12 @@ def minimum(x1, x2, out=None, **kwargs):
41484148

41494149

41504150
@set_module('mxnet.symbol.numpy')
4151-
<<<<<<< HEAD
4151+
@wrap_np_binary_func
4152+
def fmin(x1, x2, out=None, **kwargs):
4153+
return _ufunc_helper(x1, x2, _npi.fmin, _np.fmin, _npi.fmin_scalar, None, out)
4154+
4155+
4156+
@set_module('mxnet.symbol.numpy')
41524157
def all(a, axis=None, out=None, keepdims=False):
41534158
"""
41544159
Test whether all array elements along a given axis evaluate to True.
@@ -4207,11 +4212,6 @@ def any(a, axis=None, out=None, keepdims=False):
42074212
in which case a reference to out is returned.
42084213
"""
42094214
return _npi.any(a, axis=axis, keepdims=keepdims, out=out)
4210-
=======
4211-
@wrap_np_binary_func
4212-
def fmin(x1, x2, out=None, **kwargs):
4213-
return _ufunc_helper(x1, x2, _npi.fmin, _np.fmin, _npi.fmin_scalar, None, out)
4214-
>>>>>>> [Numpy] Add op fmax, fmin
42154215

42164216

42174217
@set_module('mxnet.symbol.numpy')

src/api/operator/numpy/np_elemwise_broadcast_op_extended.cc renamed to src/api/operator/numpy/np_elemwise_broadcast_op_extended_sec.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@
1818
*/
1919

2020
/*!
21-
* \file np_elemwise_broadcast_op_extended.cc
22-
* \brief Implementation of the API of functions in src/operator/numpy/np_elemwise_broadcast_op_extended.cc
21+
* \file np_elemwise_broadcast_op_extended_sec.cc
22+
* \brief Implementation of the API of functions in src/operator/numpy/np_elemwise_broadcast_op_extended_sec.cc
2323
*/
2424
#include <mxnet/api_registry.h>
2525
#include <mxnet/runtime/packed_func.h>

src/operator/numpy/np_elemwise_broadcast_op_extended.cc

Lines changed: 0 additions & 93 deletions
Original file line numberDiff line numberDiff line change
@@ -371,98 +371,5 @@ MXNET_OPERATOR_REGISTER_BINARY(_backward_npi_rldexp_scalar)
371371
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = std::stod(attrs->dict["scalar"]); })
372372
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Backward<cpu, mshadow_op::rldexp_grad>);
373373

374-
MXNET_OPERATOR_REGISTER_BINARY_BROADCAST(_npi_fmax)
375-
.set_attr<FCompute>("FCompute<cpu>", BinaryBroadcastCompute<cpu, mshadow_op::fmax>)
376-
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_fmax"});
377-
378-
NNVM_REGISTER_OP(_backward_npi_fmax)
379-
.set_num_inputs(3)
380-
.set_num_outputs(2)
381-
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
382-
.set_attr<nnvm::FInplaceOption>("FInplaceOption",
383-
[](const NodeAttrs& attrs){
384-
return std::vector<std::pair<int, int> >{{0, 1}};
385-
})
386-
.set_attr<FResourceRequest>("FResourceRequest",
387-
[](const NodeAttrs& attrs) {
388-
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
389-
})
390-
.set_attr<FCompute>("FCompute<cpu>", BinaryBroadcastBackwardUseIn<cpu, mshadow_op::ge,
391-
mshadow_op::lt>);
392-
393-
MXNET_OPERATOR_REGISTER_NP_BINARY_SCALAR(_npi_fmax_scalar)
394-
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Compute<cpu, mshadow_op::fmax>)
395-
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_fmax_scalar"});
396-
397-
MXNET_OPERATOR_REGISTER_BINARY(_backward_npi_fmax_scalar)
398-
.add_argument("scalar", "float", "scalar value")
399-
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = std::stod(attrs->dict["scalar"]); })
400-
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Backward<cpu, mshadow_op::ge>);
401-
402-
MXNET_OPERATOR_REGISTER_BINARY_BROADCAST(_npi_fmin)
403-
.set_attr<FCompute>("FCompute<cpu>", BinaryBroadcastCompute<cpu, mshadow_op::fmin>)
404-
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_fmin"});
405-
406-
NNVM_REGISTER_OP(_backward_npi_fmin)
407-
.set_num_inputs(3)
408-
.set_num_outputs(2)
409-
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
410-
.set_attr<nnvm::FInplaceOption>("FInplaceOption",
411-
[](const NodeAttrs& attrs){
412-
return std::vector<std::pair<int, int> >{{0, 1}};
413-
})
414-
.set_attr<FResourceRequest>("FResourceRequest",
415-
[](const NodeAttrs& attrs) {
416-
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
417-
})
418-
.set_attr<FCompute>("FCompute<cpu>", BinaryBroadcastBackwardUseIn<cpu, mshadow_op::le,
419-
mshadow_op::gt>);
420-
421-
MXNET_OPERATOR_REGISTER_NP_BINARY_SCALAR(_npi_fmin_scalar)
422-
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Compute<cpu, mshadow_op::fmin>)
423-
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_fmin_scalar"});
424-
425-
MXNET_OPERATOR_REGISTER_BINARY(_backward_npi_fmin_scalar)
426-
.add_argument("scalar", "float", "scalar value")
427-
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = std::stod(attrs->dict["scalar"]); })
428-
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Backward<cpu, mshadow_op::le>);
429-
430-
MXNET_OPERATOR_REGISTER_BINARY_BROADCAST(_npi_fmod)
431-
.set_attr<FCompute>("FCompute<cpu>", BinaryBroadcastCompute<cpu, mshadow_op::fmod>)
432-
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_fmod"});
433-
434-
NNVM_REGISTER_OP(_backward_npi_fmod)
435-
.set_num_inputs(3)
436-
.set_num_outputs(2)
437-
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
438-
.set_attr<nnvm::FInplaceOption>("FInplaceOption",
439-
[](const NodeAttrs& attrs){
440-
return std::vector<std::pair<int, int> >{{0, 1}};
441-
})
442-
.set_attr<FResourceRequest>("FResourceRequest",
443-
[](const NodeAttrs& attrs) {
444-
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
445-
})
446-
.set_attr<FCompute>("FCompute<cpu>", BinaryBroadcastBackwardUseIn<cpu, mshadow_op::mod_grad,
447-
mshadow_op::mod_rgrad>);
448-
449-
MXNET_OPERATOR_REGISTER_NP_BINARY_SCALAR(_npi_fmod_scalar)
450-
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Compute<cpu, mshadow_op::fmod>)
451-
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_fmod_scalar"});
452-
453-
MXNET_OPERATOR_REGISTER_BINARY(_backward_npi_fmod_scalar)
454-
.add_argument("scalar", "float", "scalar value")
455-
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = std::stod(attrs->dict["scalar"]); })
456-
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Backward<cpu, mshadow_op::mod_grad>);
457-
458-
MXNET_OPERATOR_REGISTER_NP_BINARY_SCALAR(_npi_rfmod_scalar)
459-
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Compute<cpu, mshadow_op::rfmod>)
460-
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_rfmod_scalar"});
461-
462-
MXNET_OPERATOR_REGISTER_BINARY(_backward_npi_rfmod_scalar)
463-
.add_argument("scalar", "float", "scalar value")
464-
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = std::stod(attrs->dict["scalar"]); })
465-
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Backward<cpu, mshadow_op::rmod_grad>);
466-
467374
} // namespace op
468375
} // namespace mxnet

src/operator/numpy/np_elemwise_broadcast_op_extended.cu

Lines changed: 0 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -116,50 +116,5 @@ NNVM_REGISTER_OP(_backward_npi_ldexp_scalar)
116116
NNVM_REGISTER_OP(_backward_npi_rldexp_scalar)
117117
.set_attr<FCompute>("FCompute<gpu>", BinaryScalarOp::Backward<gpu, mshadow_op::rldexp_grad>);
118118

119-
NNVM_REGISTER_OP(_npi_fmax)
120-
.set_attr<FCompute>("FCompute<gpu>", BinaryBroadcastCompute<gpu, mshadow_op::fmax>);
121-
122-
NNVM_REGISTER_OP(_backward_npi_fmax)
123-
.set_attr<FCompute>("FCompute<gpu>", BinaryBroadcastBackwardUseIn<gpu, mshadow_op::ge,
124-
mshadow_op::lt>);
125-
126-
NNVM_REGISTER_OP(_npi_fmax_scalar)
127-
.set_attr<FCompute>("FCompute<gpu>", BinaryScalarOp::Compute<gpu, mshadow_op::fmax>);
128-
129-
NNVM_REGISTER_OP(_backward_npi_fmax_scalar)
130-
.set_attr<FCompute>("FCompute<gpu>", BinaryScalarOp::Backward<gpu, mshadow_op::ge>);
131-
132-
NNVM_REGISTER_OP(_npi_fmin)
133-
.set_attr<FCompute>("FCompute<gpu>", BinaryBroadcastCompute<gpu, mshadow_op::fmin>);
134-
135-
NNVM_REGISTER_OP(_backward_npi_fmin)
136-
.set_attr<FCompute>("FCompute<gpu>", BinaryBroadcastBackwardUseIn<gpu, mshadow_op::le,
137-
mshadow_op::gt>);
138-
139-
NNVM_REGISTER_OP(_npi_fmin_scalar)
140-
.set_attr<FCompute>("FCompute<gpu>", BinaryScalarOp::Compute<gpu, mshadow_op::fmin>);
141-
142-
NNVM_REGISTER_OP(_backward_npi_fmin_scalar)
143-
.set_attr<FCompute>("FCompute<gpu>", BinaryScalarOp::Backward<gpu, mshadow_op::le>);
144-
145-
NNVM_REGISTER_OP(_npi_fmod)
146-
.set_attr<FCompute>("FCompute<gpu>", BinaryBroadcastCompute<gpu, mshadow_op::fmod>);
147-
148-
NNVM_REGISTER_OP(_backward_npi_fmod)
149-
.set_attr<FCompute>("FCompute<gpu>", BinaryBroadcastBackwardUseIn<gpu, mshadow_op::mod_grad,
150-
mshadow_op::mod_rgrad>);
151-
152-
NNVM_REGISTER_OP(_npi_fmod_scalar)
153-
.set_attr<FCompute>("FCompute<gpu>", BinaryScalarOp::Compute<gpu, mshadow_op::fmod>);
154-
155-
NNVM_REGISTER_OP(_backward_npi_fmod_scalar)
156-
.set_attr<FCompute>("FCompute<gpu>", BinaryScalarOp::Backward<gpu, mshadow_op::mod_grad>);
157-
158-
NNVM_REGISTER_OP(_npi_rfmod_scalar)
159-
.set_attr<FCompute>("FCompute<gpu>", BinaryScalarOp::Compute<gpu, mshadow_op::rfmod>);
160-
161-
NNVM_REGISTER_OP(_backward_npi_rfmod_scalar)
162-
.set_attr<FCompute>("FCompute<gpu>", BinaryScalarOp::Backward<gpu, mshadow_op::rmod_grad>);
163-
164119
} // namespace op
165120
} // namespace mxnet
Lines changed: 142 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,142 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing,
13+
* software distributed under the License is distributed on an
14+
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15+
* KIND, either express or implied. See the License for the
16+
* specific language governing permissions and limitations
17+
* under the License.
18+
*/
19+
20+
/*!
21+
* Copyright (c) 2019 by Contributors
22+
* \file np_elemwise_broadcast_op_extended_sec.cc
23+
* \brief CPU Implementation of extended functions for elementwise numpy binary broadcast operator. (Second extended file)
24+
*/
25+
26+
#include "../../common/utils.h"
27+
#include "./np_elemwise_broadcast_op.h"
28+
29+
namespace mxnet {
30+
namespace op {
31+
32+
#define MXNET_OPERATOR_REGISTER_NP_BINARY_SCALAR(name) \
33+
NNVM_REGISTER_OP(name) \
34+
.set_num_inputs(1) \
35+
.set_num_outputs(1) \
36+
.set_attr_parser([](NodeAttrs* attrs) { \
37+
attrs->parsed = std::stod(attrs->dict["scalar"]); \
38+
}) \
39+
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \
40+
.set_attr<nnvm::FInferType>("FInferType", NumpyBinaryScalarType) \
41+
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
42+
[](const NodeAttrs& attrs){ \
43+
return std::vector<std::pair<int, int> >{{0, 0}}; \
44+
}) \
45+
.add_argument("data", "NDArray-or-Symbol", "source input") \
46+
.add_argument("scalar", "float", "scalar input")
47+
48+
MXNET_OPERATOR_REGISTER_BINARY_BROADCAST(_npi_fmax)
49+
.set_attr<FCompute>("FCompute<cpu>", BinaryBroadcastCompute<cpu, mshadow_op::fmax>)
50+
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_fmax"});
51+
52+
NNVM_REGISTER_OP(_backward_npi_fmax)
53+
.set_num_inputs(3)
54+
.set_num_outputs(2)
55+
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
56+
.set_attr<nnvm::FInplaceOption>("FInplaceOption",
57+
[](const NodeAttrs& attrs){
58+
return std::vector<std::pair<int, int> >{{0, 1}};
59+
})
60+
.set_attr<FResourceRequest>("FResourceRequest",
61+
[](const NodeAttrs& attrs) {
62+
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
63+
})
64+
.set_attr<FCompute>("FCompute<cpu>", BinaryBroadcastBackwardUseIn<cpu, mshadow_op::ge,
65+
mshadow_op::lt>);
66+
67+
MXNET_OPERATOR_REGISTER_NP_BINARY_SCALAR(_npi_fmax_scalar)
68+
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Compute<cpu, mshadow_op::fmax>)
69+
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_fmax_scalar"});
70+
71+
MXNET_OPERATOR_REGISTER_BINARY(_backward_npi_fmax_scalar)
72+
.add_argument("scalar", "float", "scalar value")
73+
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = std::stod(attrs->dict["scalar"]); })
74+
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Backward<cpu, mshadow_op::ge>);
75+
76+
MXNET_OPERATOR_REGISTER_BINARY_BROADCAST(_npi_fmin)
77+
.set_attr<FCompute>("FCompute<cpu>", BinaryBroadcastCompute<cpu, mshadow_op::fmin>)
78+
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_fmin"});
79+
80+
NNVM_REGISTER_OP(_backward_npi_fmin)
81+
.set_num_inputs(3)
82+
.set_num_outputs(2)
83+
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
84+
.set_attr<nnvm::FInplaceOption>("FInplaceOption",
85+
[](const NodeAttrs& attrs){
86+
return std::vector<std::pair<int, int> >{{0, 1}};
87+
})
88+
.set_attr<FResourceRequest>("FResourceRequest",
89+
[](const NodeAttrs& attrs) {
90+
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
91+
})
92+
.set_attr<FCompute>("FCompute<cpu>", BinaryBroadcastBackwardUseIn<cpu, mshadow_op::le,
93+
mshadow_op::gt>);
94+
95+
MXNET_OPERATOR_REGISTER_NP_BINARY_SCALAR(_npi_fmin_scalar)
96+
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Compute<cpu, mshadow_op::fmin>)
97+
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_fmin_scalar"});
98+
99+
MXNET_OPERATOR_REGISTER_BINARY(_backward_npi_fmin_scalar)
100+
.add_argument("scalar", "float", "scalar value")
101+
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = std::stod(attrs->dict["scalar"]); })
102+
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Backward<cpu, mshadow_op::le>);
103+
104+
MXNET_OPERATOR_REGISTER_BINARY_BROADCAST(_npi_fmod)
105+
.set_attr<FCompute>("FCompute<cpu>", BinaryBroadcastCompute<cpu, mshadow_op::fmod>)
106+
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_fmod"});
107+
108+
NNVM_REGISTER_OP(_backward_npi_fmod)
109+
.set_num_inputs(3)
110+
.set_num_outputs(2)
111+
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
112+
.set_attr<nnvm::FInplaceOption>("FInplaceOption",
113+
[](const NodeAttrs& attrs){
114+
return std::vector<std::pair<int, int> >{{0, 1}};
115+
})
116+
.set_attr<FResourceRequest>("FResourceRequest",
117+
[](const NodeAttrs& attrs) {
118+
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
119+
})
120+
.set_attr<FCompute>("FCompute<cpu>", BinaryBroadcastBackwardUseIn<cpu, mshadow_op::mod_grad,
121+
mshadow_op::mod_rgrad>);
122+
123+
MXNET_OPERATOR_REGISTER_NP_BINARY_SCALAR(_npi_fmod_scalar)
124+
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Compute<cpu, mshadow_op::fmod>)
125+
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_fmod_scalar"});
126+
127+
MXNET_OPERATOR_REGISTER_BINARY(_backward_npi_fmod_scalar)
128+
.add_argument("scalar", "float", "scalar value")
129+
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = std::stod(attrs->dict["scalar"]); })
130+
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Backward<cpu, mshadow_op::mod_grad>);
131+
132+
MXNET_OPERATOR_REGISTER_NP_BINARY_SCALAR(_npi_rfmod_scalar)
133+
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Compute<cpu, mshadow_op::rfmod>)
134+
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_rfmod_scalar"});
135+
136+
MXNET_OPERATOR_REGISTER_BINARY(_backward_npi_rfmod_scalar)
137+
.add_argument("scalar", "float", "scalar value")
138+
.set_attr_parser([](NodeAttrs *attrs) { attrs->parsed = std::stod(attrs->dict["scalar"]); })
139+
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Backward<cpu, mshadow_op::rmod_grad>);
140+
141+
} // namespace op
142+
} // namespace mxnet

0 commit comments

Comments
 (0)