@@ -285,6 +285,7 @@ void cudnn_convolution_forward(
285285 THVoidTensor* input, THVoidTensor* weight, THVoidTensor* output,
286286 Convolution* info, bool benchmark)
287287{
288+ assertSameGPU (dataType, input, weight, output);
288289 int groups = info->groups ;
289290
290291 cudnnConvolutionFwdAlgo_t fwdAlg;
@@ -309,6 +310,7 @@ void cudnn_convolution_add_bias(
309310 THVoidTensor* bias, THVoidTensor* output,
310311 Convolution* info)
311312{
313+ assertSameGPU (dataType, bias, output);
312314 CHECK_ARG (output->nDimension <= 5 );
313315 TensorDescriptor& bdesc = info->bdesc ;
314316
@@ -329,6 +331,7 @@ void cudnn_convolution_backward_data(
329331 THVoidTensor* gradOutput, THVoidTensor* gradInput, THVoidTensor* weight,
330332 Convolution* info, bool benchmark)
331333{
334+ assertSameGPU (dataType, gradOutput, gradInput, weight);
332335 int groups = info->params .groups ;
333336
334337 cudnnConvolutionBwdDataAlgo_t bwdDataAlg;
@@ -353,6 +356,7 @@ void cudnn_convolution_backward_filter(
353356 THVoidTensor* gradOutput, THVoidTensor* input, THVoidTensor* gradWeight,
354357 Convolution* info, bool benchmark)
355358{
359+ assertSameGPU (dataType, gradOutput, input, gradWeight);
356360 int groups = info->params .groups ;
357361
358362 cudnnConvolutionBwdFilterAlgo_t bwdFilterAlg;
@@ -380,6 +384,7 @@ void cudnn_convolution_backward_bias(
380384 THCState* state, cudnnHandle_t handle, cudnnDataType_t dataType,
381385 THVoidTensor* gradOutput, THVoidTensor* gradBias, Convolution* info)
382386{
387+ assertSameGPU (dataType, gradOutput, gradBias);
383388 Constant one (dataType, 1 );
384389 Constant zero (dataType, 0 );
385390 void * gradOutput_ptr = tensorPointer (dataType, gradOutput, 0 , 1 , 0 );
0 commit comments