Skip to content

Commit e6eb63a

Browse files
authored
Merge pull request tqchen#7 from ZihengJiang/refactor
reorganize
2 parents 2e048a5 + 14fc77a commit e6eb63a

File tree

11 files changed

+553
-355
lines changed

11 files changed

+553
-355
lines changed

.gitignore

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,5 +34,5 @@ lib
3434
dmlc-core
3535
cli_test
3636
*.pyc
37-
nnvm
3837
test.*
38+
log

Makefile

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,17 @@ TORCH_PATH=${TORCH_HOME}
22

33
ROOTDIR = $(CURDIR)
44

5+
ifndef CUDA_PATH
6+
CUDA_PATH = /usr/local/cuda
7+
endif
58

69
ifndef NNVM_PATH
710
NNVM_PATH = $(ROOTDIR)/nnvm
811
endif
912

1013
export LDFLAGS = -pthread -lm
1114
export CFLAGS = -std=c++11 -Wall -O2 -msse2 -Wno-unknown-pragmas -funroll-loops\
12-
-fPIC -I${NNVM_PATH}/include -Iinclude -Idmlc-core/include
15+
-fPIC -Iinclude -Idmlc-core/include -I$(NNVM_PATH)/include
1316

1417
.PHONY: clean all test lint doc
1518

@@ -18,12 +21,13 @@ UNAME_S := $(shell uname -s)
1821
ifeq ($(UNAME_S), Darwin)
1922
WHOLE_ARCH= -all_load
2023
NO_WHOLE_ARCH= -noall_load
21-
CFLAGS += -I$(TORCH_PATH)/install/include -I$(TORCH_PATH)/install/include/TH
24+
CFLAGS += -I$(TORCH_PATH)/install/include -I$(TORCH_PATH)/install/include/TH
2225
LDFLAGS += -L$(TORCH_PATH)/install/lib -llua -lluaT -lTH
2326
else
2427
WHOLE_ARCH= --whole-archive
2528
NO_WHOLE_ARCH= --no-whole-archive
26-
CFLAGS += -I$(TORCH_PATH)/install/include -I$(TORCH_PATH)/install/include/TH -I$(TORCH_PATH)/install/include/THC/
29+
CFLAGS += -I$(TORCH_PATH)/install/include -I$(TORCH_PATH)/install/include/TH \
30+
-I$(TORCH_PATH)/install/include/THC/
2731
LDFLAGS += -L$(TORCH_PATH)/install/lib -lluajit -lluaT -lTH -lTHC
2832
endif
2933

@@ -49,8 +53,8 @@ build/src/%_gpu.o: src/%.cu
4953

5054
lib/libtinyflow.so: $(ALL_DEP)
5155
@mkdir -p $(@D)
52-
$(CXX) $(CFLAGS) -shared -o $@ $(filter %.o, $^) $(LDFLAGS) \
53-
-Wl,${WHOLE_ARCH} $(filter %.a, $^) -Wl,${NO_WHOLE_ARCH}
56+
$(CXX) $(CFLAGS) -shared -o $@ $(filter %.o, $^) \
57+
-Wl,${WHOLE_ARCH} $(filter %.a, $^) -Wl,${NO_WHOLE_ARCH} $(LDFLAGS)
5458

5559
$(NNVM_PATH)/lib/libnnvm.a:
5660
+ cd $(NNVM_PATH); make lib/libnnvm.a; cd $(ROOTDIR)

include/tinyflow/base.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ struct TBlob {
4141
/*! \brief device mask of the corresponding device type */
4242
int dev_mask{kCPU};
4343
/*! \brief type of the tensor */
44-
int dtype{0};
44+
int dtype{kFloat32};
4545
};
4646

4747
/*!

src/op_nn.cc

Lines changed: 2 additions & 99 deletions
Original file line numberDiff line numberDiff line change
@@ -109,25 +109,13 @@ NNVM_REGISTER_OP(softmax)
109109
.describe("Softmax operation")
110110
.set_num_inputs(1)
111111
.include("nn_module")
112-
.set_attr<FLuaCreateNNModule>(
113-
"FLuaCreateNNModule", R"(
114-
function(ishape, kwarg)
115-
return nn.SoftMax()
116-
end
117-
)")
118112
.set_attr<FInferShape>("FInferShape", SameShape);
119113

120114

121115
NNVM_REGISTER_OP(relu)
122116
.describe("Relu operation")
123117
.set_num_inputs(1)
124118
.include("nn_module")
125-
.set_attr<FLuaCreateNNModule>(
126-
"FLuaCreateNNModule", R"(
127-
function(ishape, kwarg)
128-
return nn.ReLU()
129-
end
130-
)")
131119
.set_attr<FInferShape>("FInferShape", SameShape)
132120
.set_attr<bool>("TBackwardNeedOutputs", true);
133121

@@ -136,12 +124,6 @@ NNVM_REGISTER_OP(tanh)
136124
.describe("Tanh operation")
137125
.set_num_inputs(1)
138126
.include("nn_module")
139-
.set_attr<FLuaCreateNNModule>(
140-
"FLuaCreateNNModule", R"(
141-
function(ishape, kwarg)
142-
return nn.Tanh()
143-
end
144-
)")
145127
.set_attr<FInferShape>("FInferShape", SameShape);
146128

147129

@@ -193,17 +175,6 @@ NNVM_REGISTER_OP(linear)
193175
}
194176
})
195177
.include("nn_module")
196-
.set_attr<FLuaCreateNNModule>(
197-
"FLuaCreateNNModule", R"(
198-
function(ishape, kwarg)
199-
local wshape = ishape[2]
200-
local m = nn.Linear(wshape[2], wshape[1])
201-
if #ishape == 2 then
202-
m = m:noBias()
203-
end
204-
return m
205-
end
206-
)")
207178
.set_attr<FInferShape>("FInferShape", LinearShape);
208179

209180

@@ -273,37 +244,6 @@ NNVM_REGISTER_OP(conv2d)
273244
})
274245
.set_attr_parser(ParamParser<ConvPoolParam>)
275246
.include("nn_module")
276-
.set_attr<FLuaCreateNNModule>(
277-
"FLuaCreateNNModule", R"(
278-
function(ishape, kwarg)
279-
local dshape = ishape[2]
280-
local fshape = ishape[2]
281-
local outPlane = fshape[1]
282-
local inPlane = fshape[2]
283-
local kH = fshape[3]
284-
local kW = fshape[4]
285-
local inH = dshape[3]
286-
local inW = dshape[4]
287-
local stride = nn_parse_tuple(kwarg.strides, {1,1,1,1})
288-
local dH = stride[2]
289-
local dW = stride[3]
290-
local padH = 0
291-
local padW = 0
292-
293-
assert(kwarg.data_format == 'NCHW')
294-
if kwarg.padding == 'SAME' then
295-
padW = math.floor((kW - 1) / 2)
296-
padH = math.floor((kH - 1) / 2)
297-
end
298-
local m = nn.SpatialConvolution(
299-
inPlane, outPlane,
300-
kW, kH, dW, dH, padW, padH)
301-
if #ishape == 2 then
302-
m = m:noBias()
303-
end
304-
return m
305-
end
306-
)")
307247
.set_attr<FListInputNames>("FListInputNames", [](const NodeAttrs& attrs) {
308248
if (dmlc::get<ConvPoolParam>(attrs.parsed).no_bias) {
309249
return std::vector<std::string>{"data", "weight"};
@@ -320,54 +260,18 @@ NNVM_REGISTER_OP(max_pool)
320260
.set_num_inputs(1)
321261
.set_attr_parser(ParamParser<ConvPoolParam>)
322262
.include("nn_module")
323-
.set_attr<FLuaCreateNNModule>(
324-
"FLuaCreateNNModule", R"(
325-
function(ishape, kwarg)
326-
local ksize = nn_parse_tuple(kwarg.ksize)
327-
local stride = nn_parse_tuple(kwarg.strides, {1,1,1,1})
328-
local kH = ksize[2]
329-
local kW = ksize[3]
330-
local dH = stride[2]
331-
local dW = stride[3]
332-
local padH = 0
333-
local padW = 0
334-
assert(kwarg.data_format == 'NCHW')
335-
if kwarg.padding == 'SAME' then
336-
padW = math.floor((kW - 1) / 2)
337-
padH = math.floor((kH - 1) / 2)
338-
end
339-
return nn.SpatialMaxPooling(kW, kH, dW, dH, padW, padH)
340-
end
341-
)")
342263
.set_attr<FInferShape>("FInferShape", ConvPoolShape);
343264

344265

345266
NNVM_REGISTER_OP(mean_sparse_softmax_cross_entropy_with_logits)
346267
.describe("Softmax cross entropy given logit and label")
347268
.set_num_inputs(2)
348-
.include("nn_criterion")
349-
.set_attr<FLuaCreateNNModule>(
350-
"FLuaCreateNNModule", R"(
351-
function(ishape, kwarg)
352-
return nn_zero_index_target_criterion(
353-
nn.CrossEntropyCriterion())
354-
end
355-
)");
356-
357-
const char* LuaReshape = R"(
358-
function(x, y, kwarg)
359-
if x[1]:storage() == y[1]:storage() then
360-
return function() end
361-
else
362-
return function() y[1]:copy(x[1]:resizeAs(y[1])) end
363-
end
364-
end
365-
)";
269+
.include("nn_criterion");
270+
366271

367272
NNVM_REGISTER_OP(flatten_layer)
368273
.describe("Flatten to 2D")
369274
.set_num_inputs(1)
370-
.set_attr<FLuaCompute>("FLuaCompute", LuaReshape)
371275
.set_attr<FInplaceOption>("FInplaceOption", InplaceIn0Out0)
372276
.set_attr<FInferShape>(
373277
"FInferShape", [](const NodeAttrs& attrs,
@@ -388,7 +292,6 @@ NNVM_REGISTER_OP(flatten_layer)
388292

389293
NNVM_REGISTER_OP(_flatten_backward)
390294
.set_num_inputs(1)
391-
.set_attr<FLuaCompute>("FLuaCompute", LuaReshape)
392295
.set_attr<FInplaceOption>("FInplaceOption", InplaceIn0Out0)
393296
.set_attr<FBackwardOutToInIndex>(
394297
"FBackwardOutToInIndex", [](const NodeAttrs& attrs) {

src/op_special.cc

Lines changed: 2 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,7 @@ const FLuaCompute kLuaNOP = "function(x, y, kwarg) return function() end end";
1313

1414
NNVM_REGISTER_OP(placeholder)
1515
.describe("placeholder op")
16-
.set_num_inputs(0)
17-
.set_attr<FLuaCompute>("FLuaCompute", kLuaNOP);
16+
.set_num_inputs(0);
1817

1918
template<typename Attr>
2019
inline bool EmptyAttr(const NodeAttrs& attrs,
@@ -27,7 +26,6 @@ NNVM_REGISTER_OP(_nop)
2726
.describe("no operation")
2827
.set_num_inputs(0)
2928
.set_num_outputs(1)
30-
.set_attr<FLuaCompute>("FLuaCompute", kLuaNOP)
3129
.set_attr<FInferShape>("FInferShape", EmptyAttr<TShape>)
3230
.set_attr<FInferType>("FInferType", EmptyAttr<int>);
3331

@@ -39,19 +37,7 @@ NNVM_REGISTER_OP(assign)
3937
return std::vector<uint32_t>{0};
4038
})
4139
.set_attr<FInferShape>("FInferShape", SameShape)
42-
.set_attr<FInplaceOption>("FInplaceOption", InplaceIn1Out0)
43-
.set_attr<FLuaCompute>(
44-
"FLuaCompute", R"(
45-
function(x, y, kwarg)
46-
return function()
47-
x[1]:copy(x[2])
48-
-- normally inplace optimization prevent this
49-
if y[1]:storage() ~= x[2]:storage() then
50-
y[1]:copy(x[2])
51-
end
52-
end
53-
end
54-
)");
40+
.set_attr<FInplaceOption>("FInplaceOption", InplaceIn1Out0);
5541

5642
// special no gradient op to report error when take
5743
// gradient wrt non-differentiable inputs

0 commit comments

Comments
 (0)