Skip to content

Commit 05bc877

Browse files
killeentsoumith
authored andcommitted
make THPPointer have explicit constructors (pytorch#1636)
1 parent 6a7c564 commit 05bc877

22 files changed

+117
-117
lines changed

tools/cwrap/plugins/THPPlugin.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ class THPPlugin(CWrapPlugin):
127127
""")
128128

129129
ALLOCATE_TMPL = Template("""\
130-
THP${type}TensorPtr _${name}_guard = (THP${type}Tensor*) THP${type}Tensor_NewEmpty();
130+
THP${type}TensorPtr _${name}_guard((THP${type}Tensor*) THP${type}Tensor_NewEmpty());
131131
if (!_${name}_guard.get()) return NULL;
132132
THP${type}Tensor* $name = _${name}_guard.get();
133133
""")

torch/csrc/Generator.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ static PyObject * THPGenerator_pynew(PyTypeObject *type, PyObject *args, PyObjec
3333
THPUtils_setError("torch.Generator constructor doesn't accept any arguments");
3434
return NULL;
3535
}
36-
THPGeneratorPtr self = (THPGenerator *)type->tp_alloc(type, 0);
36+
THPGeneratorPtr self((THPGenerator *)type->tp_alloc(type, 0));
3737
self->cdata = THGenerator_new();
3838

3939
return (PyObject*)self.release();
@@ -44,7 +44,7 @@ static PyObject * THPGenerator_getState(THPGenerator *self)
4444
{
4545
HANDLE_TH_ERRORS
4646
THGenerator *generator = self->cdata;
47-
THPByteTensorPtr res = (THPByteTensor *)THPByteTensor_NewEmpty();
47+
THPByteTensorPtr res((THPByteTensor *)THPByteTensor_NewEmpty());
4848
if (!res) return NULL;
4949
THByteTensor_getRNGState(generator, res->cdata);
5050
return (PyObject *)res.release();

torch/csrc/Module.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ static PyObject * THPModule_initNames(PyObject *self, PyObject *arg)
6363
{
6464
static std::vector<std::string> names;
6565

66-
THPObjectPtr types = PySequence_Fast(arg, "expected a sequence");
66+
THPObjectPtr types(PySequence_Fast(arg, "expected a sequence"));
6767
if (!types) return NULL;
6868

6969
int num_classes = PySequence_Fast_GET_SIZE(types.get());
@@ -73,7 +73,7 @@ static PyObject * THPModule_initNames(PyObject *self, PyObject *arg)
7373
THPUtils_assert(PyType_Check(obj), "expected a PyTypeObject");
7474
PyTypeObject* type = (PyTypeObject*)obj;
7575

76-
THPObjectPtr module_name = PyObject_GetAttrString(obj, "__module__");
76+
THPObjectPtr module_name(PyObject_GetAttrString(obj, "__module__"));
7777
if (!module_name) return NULL;
7878
THPUtils_assert(THPUtils_checkString(module_name.get()),
7979
"expected __module__ to be a string");

torch/csrc/Size.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ PyObject * THPSize_New(int dim, long *sizes)
2525

2626
static PyObject * THPSize_pynew(PyTypeObject *type, PyObject *args, PyObject *kwargs)
2727
{
28-
THPObjectPtr self = PyTuple_Type.tp_new(type, args, kwargs);
28+
THPObjectPtr self(PyTuple_Type.tp_new(type, args, kwargs));
2929
if (self) {
3030
for (Py_ssize_t i = 0; i < PyTuple_Size(self); ++i) {
3131
PyObject *item = PyTuple_GET_ITEM(self.get(), i);

torch/csrc/autograd/functions/init.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ PyObject* getTupleAttr(PyObject* obj, void* _unused)
8181
THPCppFunction* self = (THPCppFunction*)obj;
8282
auto& arr = ((T*)(self->cdata.get()))->*ptr;
8383
auto num_elems = arr.size();
84-
THPObjectPtr py_tuple = PyTuple_New(num_elems);
84+
THPObjectPtr py_tuple(PyTuple_New(num_elems));
8585
if (!py_tuple) return NULL;
8686
for (size_t i = 0; i < num_elems; ++i) {
8787
PyTuple_SET_ITEM(py_tuple.get(), i, Convert(arr[i]));
@@ -203,7 +203,7 @@ static struct PyGetSetDef accumulate_grad_properties[] = {
203203

204204
bool THPAutograd_initFunctions(PyObject* _unused)
205205
{
206-
THPObjectPtr module = PyModule_New("torch._C._functions");
206+
THPObjectPtr module(PyModule_New("torch._C._functions"));
207207
if (!module) return false;
208208

209209
static PyTypeObject BatchNormClass, BatchNormBackwardClass;
@@ -233,7 +233,7 @@ bool THPAutograd_initFunctions(PyObject* _unused)
233233
static PyTypeObject IdentityClass;
234234
addClass<Identity, NoCtor>(module, IdentityClass, "Identity");
235235

236-
THPObjectPtr parent = PyImport_ImportModule("torch._C");
236+
THPObjectPtr parent(PyImport_ImportModule("torch._C"));
237237
if (!parent) return false;
238238
PyModule_AddObject(parent.get(), "_functions", module.release());
239239
return true;

torch/csrc/autograd/python_cpp_function.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ PyObject* THPCppFunction_call(PyObject* self, PyObject* args, PyObject *kwargs)
5353
return THPVariable_Wrap(output[0]);
5454
}
5555

56-
THPObjectPtr tuple = PyTuple_New(num_outputs);
56+
THPObjectPtr tuple(PyTuple_New(num_outputs));
5757
for (int i = 0; i != num_outputs; ++i) {
5858
PyTuple_SET_ITEM(tuple.get(), i, THPVariable_Wrap(output[i]));
5959
}
@@ -94,11 +94,11 @@ PyObject* THPCppFunction_next_functions(THPCppFunction* self, PyObject* hook)
9494
{
9595
auto& next_functions = self->cdata->next_functions;
9696
auto num_next = next_functions.size();
97-
THPObjectPtr py_functions = PyTuple_New(num_next);
97+
THPObjectPtr py_functions(PyTuple_New(num_next));
9898
if (!py_functions) return NULL;
9999
for (size_t i = 0; i < num_next; ++i) {
100100
auto& c_tuple = next_functions[i];
101-
THPObjectPtr tuple = PyTuple_New(2);
101+
THPObjectPtr tuple(PyTuple_New(2));
102102
if (!tuple) return NULL;
103103
PyObject *py_fn = functionToPyObject(c_tuple.first);
104104
if (!py_fn) return NULL;
@@ -181,7 +181,7 @@ PyObject* functionToPyObject(std::shared_ptr<Function> cdata)
181181
}
182182

183183
PyTypeObject* type = (PyTypeObject*)it->second.get();
184-
THPObjectPtr obj = type->tp_alloc(type, 0);
184+
THPObjectPtr obj(type->tp_alloc(type, 0));
185185
if (!obj) return NULL;
186186
THPCppFunction* f = (THPCppFunction*)obj.get();
187187
new (&f->cdata) std::shared_ptr<Function>(cdata);
@@ -207,9 +207,9 @@ PyObject* registerFunctionHook(Function& fn, PyObject* hook)
207207
}
208208
}
209209

210-
THPObjectPtr register_fn = PyObject_GetAttrString(THPFunctionClass, "_register_hook");
210+
THPObjectPtr register_fn(PyObject_GetAttrString(THPFunctionClass, "_register_hook"));
211211
if (!register_fn) return NULL;
212-
THPObjectPtr res = PyObject_CallFunctionObjArgs(register_fn.get(), dict, hook, NULL);
212+
THPObjectPtr res(PyObject_CallFunctionObjArgs(register_fn.get(), dict, hook, NULL));
213213
if (!res) return NULL;
214214

215215
if (dict == Py_None) {

torch/csrc/autograd/python_cpp_function.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ struct THPCppFunction {
1818
template<typename Ctor>
1919
PyObject* CppFunction_pynew(PyTypeObject *type, PyObject *args, PyObject *kwds)
2020
{
21-
THPObjectPtr obj = type->tp_alloc(type, 0);
21+
THPObjectPtr obj(type->tp_alloc(type, 0));
2222
if (!obj) return NULL;
2323
THPCppFunction* f = (THPCppFunction*)obj.get();
2424
HANDLE_TH_ERRORS

torch/csrc/autograd/python_function.cpp

Lines changed: 27 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -59,11 +59,11 @@ static PyObject* _allocate_grad_output(output_info_type& info, AutoGPU& gpu_guar
5959
gpu_guard.setDevice(std::get<1>(info));
6060
std::vector<long> &sizes = std::get<2>(info);
6161

62-
THPObjectPtr grad_size = THPSize_New(sizes.size(), sizes.data());
62+
THPObjectPtr grad_size(THPSize_New(sizes.size(), sizes.data()));
6363
if (!grad_size) throw python_error();
64-
THPObjectPtr new_grad = PyObject_CallFunctionObjArgs(tensor_cls, grad_size.get(), NULL);
64+
THPObjectPtr new_grad(PyObject_CallFunctionObjArgs(tensor_cls, grad_size.get(), NULL));
6565
if (!new_grad) throw python_error();
66-
THPObjectPtr result = PyObject_CallMethod(new_grad.get(), "zero_", "");
66+
THPObjectPtr result(PyObject_CallMethod(new_grad.get(), "zero_", ""));
6767
if (!result) throw python_error();
6868
return new_grad.release();
6969
}
@@ -73,7 +73,7 @@ namespace torch { namespace autograd {
7373
auto PyFunction::legacy_apply(const variable_list& inputs) -> variable_list {
7474
AutoGIL gil;
7575

76-
THPObjectPtr pyInputs = PyTuple_New(inputs.size());
76+
THPObjectPtr pyInputs(PyTuple_New(inputs.size()));
7777
if (!pyInputs) throw python_error();
7878

7979
for (size_t i = 0; i != inputs.size(); ++i) {
@@ -88,8 +88,8 @@ auto PyFunction::legacy_apply(const variable_list& inputs) -> variable_list {
8888
PyTuple_SET_ITEM(pyInputs.get(), i, input);
8989
}
9090

91-
THPObjectPtr r = PyObject_CallMethod(
92-
obj, "_do_backward", "OO", pyInputs.get(), Py_True);
91+
THPObjectPtr r(PyObject_CallMethod(
92+
obj, "_do_backward", "OO", pyInputs.get(), Py_True));
9393
if (!r) throw python_error();
9494

9595
auto num_outputs = PyTuple_GET_SIZE(r.get());
@@ -126,32 +126,32 @@ auto PyFunction::apply(const variable_list& inputs) -> variable_list {
126126
AutoGPU _gpu_guard(-1);
127127
THPFunction* py_fn = (THPFunction*)obj;
128128

129-
THPObjectPtr _legacy = PyObject_GetAttrString(obj, "_is_legacy");
129+
THPObjectPtr _legacy(PyObject_GetAttrString(obj, "_is_legacy"));
130130
if (_legacy == Py_True) {
131131
return legacy_apply(inputs);
132132
}
133133

134134
// Massage a C++ variable_list into a Python arguments tuple
135135
auto num_inputs = inputs.size();
136-
THPObjectPtr pyInputs = PyTuple_New(num_inputs);
136+
THPObjectPtr pyInputs(PyTuple_New(num_inputs));
137137
if (!pyInputs) throw python_error();
138138
auto& output_info = *py_fn->output_info;
139139
for (size_t i = 0; i < num_inputs; ++i) {
140140
PyObject* input;
141141
if (inputs[i]) {
142142
input = THPVariable_Wrap(inputs[i]);
143143
} else {
144-
THPObjectPtr tensor = _allocate_grad_output(output_info[i], _gpu_guard);
144+
THPObjectPtr tensor(_allocate_grad_output(output_info[i], _gpu_guard));
145145
input = THPVariable_NewLeaf(tensor);
146146
}
147147
if (!input) throw python_error();
148148
PyTuple_SET_ITEM(pyInputs.get(), i, input);
149149
}
150150

151151
// TODO: theoretically we could take a shortcut here and call apply directly
152-
THPObjectPtr apply_fn = PyObject_GetAttrString(obj, "apply");
152+
THPObjectPtr apply_fn(PyObject_GetAttrString(obj, "apply"));
153153
if (!apply_fn) throw python_error();
154-
THPObjectPtr r = PyObject_CallObject(apply_fn, pyInputs.get());
154+
THPObjectPtr r(PyObject_CallObject(apply_fn, pyInputs.get()));
155155
if (!r) throw python_error();
156156
_ensure_tuple(r);
157157

@@ -625,12 +625,12 @@ std::pair<UnpackedInput, InputFlags> unpack_input(PyObject *args) {
625625
return std::make_pair(std::move(unpacked), std::move(flags));
626626
}
627627

628-
PyObject* process_outputs(THPFunction* grad_fn, const UnpackedInput& unpacked, THPObjectPtr raw_output, bool is_volatile) {
628+
PyObject* process_outputs(THPFunction* grad_fn, const UnpackedInput& unpacked, THPObjectPtr&& raw_output, bool is_volatile) {
629629
bool unpack_output = _ensure_tuple(raw_output);
630630

631631
auto num_outputs = PyTuple_GET_SIZE(raw_output.get());
632632

633-
THPObjectPtr outputs = PyTuple_New(num_outputs);
633+
THPObjectPtr outputs(PyTuple_New(num_outputs));
634634
if (!outputs) throw python_error();
635635

636636
grad_fn->cdata.num_inputs = num_outputs;
@@ -678,9 +678,9 @@ PyObject *THPFunction_do_forward(THPFunction *self, PyObject *_inputs)
678678
self->needs_input_grad = input_info.needs_input_grad.release();
679679

680680
// Now we're ready to call a forward (implemented in Python)
681-
THPObjectPtr forward_fn = PyObject_GetAttrString((PyObject*)self, "forward");
681+
THPObjectPtr forward_fn(PyObject_GetAttrString((PyObject*)self, "forward"));
682682
if (!forward_fn) return NULL;
683-
THPObjectPtr raw_output = PyObject_CallObject(forward_fn, unpacked_input.tensor_input);
683+
THPObjectPtr raw_output(PyObject_CallObject(forward_fn, unpacked_input.tensor_input));
684684
if (!raw_output) return NULL;
685685

686686
return process_outputs(self, unpacked_input, std::move(raw_output), is_volatile);
@@ -691,9 +691,9 @@ PyObject *THPFunction_apply(PyObject *cls, PyObject *_inputs)
691691
{
692692
HANDLE_TH_ERRORS
693693

694-
THPObjectPtr backward_cls = PyObject_GetAttrString(cls, "_backward_cls");
694+
THPObjectPtr backward_cls(PyObject_GetAttrString(cls, "_backward_cls"));
695695
if (!backward_cls) return NULL;
696-
THPObjectPtr ctx_obj = PyObject_CallFunctionObjArgs(backward_cls, NULL);
696+
THPObjectPtr ctx_obj(PyObject_CallFunctionObjArgs(backward_cls, NULL));
697697
if (!ctx_obj) return NULL;
698698
THPFunction* ctx = (THPFunction*)ctx_obj.get();
699699

@@ -708,7 +708,7 @@ PyObject *THPFunction_apply(PyObject *cls, PyObject *_inputs)
708708

709709
// Prepend ctx to tensor_input, in preparation for static method call
710710
auto num_args = PyTuple_GET_SIZE(_inputs);
711-
THPObjectPtr ctx_tensor_input = PyTuple_New(num_args + 1);
711+
THPObjectPtr ctx_tensor_input(PyTuple_New(num_args + 1));
712712
PyTuple_SET_ITEM(ctx_tensor_input.get(), 0, ctx_obj.release());
713713
for (int i = 0; i < num_args; ++i) {
714714
PyObject *arg = PyTuple_GET_ITEM(unpacked_input.tensor_input.get(), i);
@@ -717,9 +717,9 @@ PyObject *THPFunction_apply(PyObject *cls, PyObject *_inputs)
717717
}
718718

719719
// Call forward
720-
THPObjectPtr forward_fn = PyObject_GetAttrString(cls, "forward");
720+
THPObjectPtr forward_fn(PyObject_GetAttrString(cls, "forward"));
721721
if (!forward_fn) return NULL;
722-
THPObjectPtr tensor_outputs = PyObject_CallObject(forward_fn, ctx_tensor_input);
722+
THPObjectPtr tensor_outputs(PyObject_CallObject(forward_fn, ctx_tensor_input));
723723
if (!tensor_outputs) return NULL;
724724

725725
return process_outputs(ctx, unpacked_input, std::move(tensor_outputs), is_volatile);
@@ -800,14 +800,14 @@ PyObject * THPFunction_do_backward(THPFunction *self, PyObject *args)
800800
// Some of the output might have been unused, so we have to allocate
801801
// zero-filled buffers instead
802802
Py_INCREF(raw_grad_output);
803-
THPObjectPtr grad_output = raw_grad_output;
803+
THPObjectPtr grad_output(raw_grad_output);
804804
_prepare_grad_output(self, grad_output);
805805

806806
// self.backward(*grad_output)
807-
THPObjectPtr backward_fn = PyObject_GetAttrString((PyObject*)self, "backward");
807+
THPObjectPtr backward_fn(PyObject_GetAttrString((PyObject*)self, "backward"));
808808
THPUtils_assert(backward_fn.get(), "function %s doesn't implement a required "
809809
"'backward' method", THPUtils_typename((PyObject*)self));
810-
THPObjectPtr grad_input = PyObject_CallObject(backward_fn, grad_output.get());
810+
THPObjectPtr grad_input(PyObject_CallObject(backward_fn, grad_output.get()));
811811
if (!grad_input) return NULL;
812812
_ensure_tuple(grad_input);
813813

@@ -854,7 +854,7 @@ PyObject *THPFunction_saved_tensors(THPFunction *self, void *_unused)
854854
return PyTuple_New(0);
855855

856856
int num_saved = self->saved_variables->size();
857-
THPObjectPtr saved_tensors = PyTuple_New(num_saved);
857+
THPObjectPtr saved_tensors(PyTuple_New(num_saved));
858858
if (!saved_tensors)
859859
return NULL;
860860
auto& saved_variables = *self->saved_variables;
@@ -879,7 +879,7 @@ PyObject *THPFunction_saved_variables(THPFunction *self, void *_unused)
879879
return PyTuple_New(0);
880880

881881
int num_saved = self->saved_variables->size();
882-
THPObjectPtr py_saved_variables = PyTuple_New(num_saved);
882+
THPObjectPtr py_saved_variables(PyTuple_New(num_saved));
883883
if (!py_saved_variables) return NULL;
884884
auto& saved_variables = *self->saved_variables;
885885
for (int i = 0; i < num_saved; i++) {
@@ -901,11 +901,11 @@ PyObject *THPFunction_next_functions(THPFunction *self, void *_unused)
901901
{
902902
auto& next_fns = self->cdata.next_functions;
903903
int size = next_fns.size();
904-
THPObjectPtr result = PyTuple_New(size);
904+
THPObjectPtr result(PyTuple_New(size));
905905
if (!result)
906906
return NULL;
907907
for (int i = 0; i < size; i++) {
908-
THPObjectPtr fn_tuple = PyTuple_New(2);
908+
THPObjectPtr fn_tuple(PyTuple_New(2));
909909
if (!fn_tuple) return NULL;
910910
PyObject* fn = functionToPyObject(next_fns[i].first);
911911
if (!fn) return NULL;

torch/csrc/autograd/python_hook.cpp

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
using thpp::Tensor;
1414
using torch::autograd::variable_list;
1515

16-
static THPObjectPtr wrap_variables(const variable_list& c_variables);
16+
static PyObject* wrap_variables(const variable_list& c_variables);
1717
static variable_list unwrap_variables(PyObject* py_variables);
1818
static std::string hook_name(PyObject* hook);
1919
static void check_result(PyObject* original, PyObject* result, PyObject* hook);
@@ -38,13 +38,13 @@ auto PyFunctionPreHook::operator()(const variable_list& values) -> variable_list
3838
{
3939
AutoGIL gil;
4040

41-
THPObjectPtr value = THPVariable_Wrap(values.at(value_idx));
41+
THPObjectPtr value(THPVariable_Wrap(values.at(value_idx)));
4242
if (!value) throw python_error();
4343

4444
PyObject *key, *hook;
4545
Py_ssize_t pos = 0;
4646
while (PyDict_Next(dict, &pos, &key, &hook)) {
47-
THPObjectPtr res = PyObject_CallFunctionObjArgs(hook, value.get(), nullptr);
47+
THPObjectPtr res(PyObject_CallFunctionObjArgs(hook, value.get(), nullptr));
4848
if (!res) throw python_error();
4949
if (res == Py_None) continue;
5050
check_single_result(value.get(), res.get(), hook);
@@ -71,14 +71,14 @@ auto PyFunctionPostHook::operator()(
7171
{
7272
AutoGIL gil;
7373

74-
THPObjectPtr outputs = wrap_variables(_outputs);
75-
THPObjectPtr inputs = wrap_variables(_inputs);
74+
THPObjectPtr outputs(wrap_variables(_outputs));
75+
THPObjectPtr inputs(wrap_variables(_inputs));
7676

7777
PyObject *key, *hook;
7878
Py_ssize_t pos = 0;
7979
while (PyDict_Next(dict, &pos, &key, &hook)) {
80-
THPObjectPtr res = PyObject_CallFunctionObjArgs(
81-
hook, outputs.get(), inputs.get(), nullptr);
80+
THPObjectPtr res(PyObject_CallFunctionObjArgs(
81+
hook, outputs.get(), inputs.get(), nullptr));
8282
if (!res) throw python_error();
8383
if (res == Py_None) continue;
8484
check_result(outputs, res, hook);
@@ -91,17 +91,17 @@ auto PyFunctionPostHook::operator()(
9191
}} // namespace torch::autograd
9292

9393

94-
static THPObjectPtr wrap_variables(const variable_list& c_variables)
94+
static PyObject *wrap_variables(const variable_list& c_variables)
9595
{
9696
size_t num_vars = c_variables.size();
97-
THPObjectPtr tuple = PyTuple_New(num_vars);
97+
THPObjectPtr tuple(PyTuple_New(num_vars));
9898
if (!tuple) throw python_error();
9999
for (size_t i = 0; i < num_vars; ++i) {
100-
THPObjectPtr var = THPVariable_Wrap(c_variables[i]);
100+
THPObjectPtr var(THPVariable_Wrap(c_variables[i]));
101101
if (!var) throw python_error();
102102
PyTuple_SET_ITEM(tuple.get(), i, var.release());
103103
}
104-
return tuple;
104+
return tuple.release();
105105
}
106106

107107
static variable_list unwrap_variables(PyObject* py_variables) {
@@ -185,7 +185,7 @@ static void check_single_result(PyObject* _original, PyObject* _result, PyObject
185185
}
186186

187187
static std::string hook_name(PyObject* hook) {
188-
THPObjectPtr name = PyObject_GetAttrString(hook, "__name__");
188+
THPObjectPtr name(PyObject_GetAttrString(hook, "__name__"));
189189
if (name && THPUtils_checkString(name.get())) {
190190
return THPUtils_unpackString(name.get());
191191
}

0 commit comments

Comments
 (0)