@@ -47,11 +47,11 @@ using namespace cv::gpu;
47
47
48
48
#if !defined (HAVE_CUDA) || defined (CUDA_DISABLER)
49
49
50
- void cv::gpu::merge (const GpuMat* /* src */ , size_t /* count */ , GpuMat& /* dst */ , Stream& /* stream */ ) { throw_no_cuda (); }
51
- void cv::gpu::merge (const std::vector<GpuMat>& /* src */ , GpuMat& /* dst */ , Stream& /* stream */ ) { throw_no_cuda (); }
50
+ void cv::gpu::merge (const GpuMat*, size_t , OutputArray , Stream&) { throw_no_cuda (); }
51
+ void cv::gpu::merge (const std::vector<GpuMat>&, OutputArray , Stream&) { throw_no_cuda (); }
52
52
53
- void cv::gpu::split (const GpuMat& /* src */ , GpuMat* /* dst */ , Stream& /* stream */ ) { throw_no_cuda (); }
54
- void cv::gpu::split (const GpuMat& /* src */ , std::vector<GpuMat>& /* dst */ , Stream& /* stream */ ) { throw_no_cuda (); }
53
+ void cv::gpu::split (InputArray , GpuMat*, Stream&) { throw_no_cuda (); }
54
+ void cv::gpu::split (InputArray , std::vector<GpuMat>&, Stream&) { throw_no_cuda (); }
55
55
56
56
void cv::gpu::transpose (const GpuMat&, GpuMat&, Stream&) { throw_no_cuda (); }
57
57
@@ -70,66 +70,60 @@ namespace cv { namespace gpu { namespace cudev
70
70
{
71
71
namespace split_merge
72
72
{
73
- void merge_caller (const PtrStepSzb* src, PtrStepSzb& dst, int total_channels, size_t elem_size, const cudaStream_t& stream);
74
- void split_caller (const PtrStepSzb& src, PtrStepSzb* dst, int num_channels, size_t elem_size1, const cudaStream_t& stream);
73
+ void merge (const PtrStepSzb* src, PtrStepSzb& dst, int total_channels, size_t elem_size, const cudaStream_t& stream);
74
+ void split (const PtrStepSzb& src, PtrStepSzb* dst, int num_channels, size_t elem_size1, const cudaStream_t& stream);
75
75
}
76
76
}}}
77
77
78
78
namespace
79
79
{
80
- void merge (const GpuMat* src, size_t n, GpuMat& dst, const cudaStream_t & stream)
80
+ void merge_caller (const GpuMat* src, size_t n, OutputArray _dst, Stream & stream)
81
81
{
82
- using namespace ::cv::gpu::cudev::split_merge;
82
+ CV_Assert ( src != 0 );
83
+ CV_Assert ( n > 0 && n <= 4 );
83
84
84
- CV_Assert ( src);
85
- CV_Assert (n > 0 );
85
+ const int depth = src[ 0 ]. depth ( );
86
+ const Size size = src[ 0 ]. size ( );
86
87
87
- int depth = src[0 ].depth ();
88
- Size size = src[0 ].size ();
88
+ for (size_t i = 0 ; i < n; ++i)
89
+ {
90
+ CV_Assert ( src[i].size () == size );
91
+ CV_Assert ( src[i].depth () == depth );
92
+ CV_Assert ( src[i].channels () == 1 );
93
+ }
89
94
90
95
if (depth == CV_64F)
91
96
{
92
97
if (!deviceSupports (NATIVE_DOUBLE))
93
98
CV_Error (cv::Error::StsUnsupportedFormat, " The device doesn't support double" );
94
99
}
95
100
96
- bool single_channel_only = true ;
97
- int total_channels = 0 ;
98
-
99
- for (size_t i = 0 ; i < n; ++i)
101
+ if (n == 1 )
100
102
{
101
- CV_Assert (src[i].size () == size);
102
- CV_Assert (src[i].depth () == depth);
103
- single_channel_only = single_channel_only && src[i].channels () == 1 ;
104
- total_channels += src[i].channels ();
103
+ src[0 ].copyTo (_dst, stream);
105
104
}
106
-
107
- CV_Assert (single_channel_only);
108
- CV_Assert (total_channels <= 4 );
109
-
110
- if (total_channels == 1 )
111
- src[0 ].copyTo (dst);
112
105
else
113
106
{
114
- dst.create (size, CV_MAKETYPE (depth, total_channels));
107
+ _dst.create (size, CV_MAKE_TYPE (depth, (int )n));
108
+ GpuMat dst = _dst.getGpuMat ();
115
109
116
110
PtrStepSzb src_as_devmem[4 ];
117
111
for (size_t i = 0 ; i < n; ++i)
118
112
src_as_devmem[i] = src[i];
119
113
120
114
PtrStepSzb dst_as_devmem (dst);
121
- merge_caller (src_as_devmem, dst_as_devmem, total_channels , CV_ELEM_SIZE (depth), stream);
115
+ cv::gpu::cudev::split_merge::merge (src_as_devmem, dst_as_devmem, ( int )n , CV_ELEM_SIZE (depth), StreamAccessor::getStream ( stream) );
122
116
}
123
117
}
124
118
125
- void split (const GpuMat& src, GpuMat* dst, const cudaStream_t & stream)
119
+ void split_caller (const GpuMat& src, GpuMat* dst, Stream & stream)
126
120
{
127
- using namespace ::cv::gpu::cudev::split_merge ;
121
+ CV_Assert ( dst != 0 ) ;
128
122
129
- CV_Assert (dst);
123
+ const int depth = src.depth ();
124
+ const int num_channels = src.channels ();
130
125
131
- int depth = src.depth ();
132
- int num_channels = src.channels ();
126
+ CV_Assert ( num_channels <= 4 );
133
127
134
128
if (depth == CV_64F)
135
129
{
@@ -139,45 +133,45 @@ namespace
139
133
140
134
if (num_channels == 1 )
141
135
{
142
- src.copyTo (dst[0 ]);
136
+ src.copyTo (dst[0 ], stream );
143
137
return ;
144
138
}
145
139
146
140
for (int i = 0 ; i < num_channels; ++i)
147
141
dst[i].create (src.size (), depth);
148
142
149
- CV_Assert (num_channels <= 4 );
150
-
151
143
PtrStepSzb dst_as_devmem[4 ];
152
144
for (int i = 0 ; i < num_channels; ++i)
153
145
dst_as_devmem[i] = dst[i];
154
146
155
147
PtrStepSzb src_as_devmem (src);
156
- split_caller (src_as_devmem, dst_as_devmem, num_channels, src.elemSize1 (), stream);
148
+ cv::gpu::cudev::split_merge::split (src_as_devmem, dst_as_devmem, num_channels, src.elemSize1 (), StreamAccessor::getStream ( stream) );
157
149
}
158
150
}
159
151
160
- void cv::gpu::merge (const GpuMat* src, size_t n, GpuMat& dst, Stream& stream)
152
+ void cv::gpu::merge (const GpuMat* src, size_t n, OutputArray dst, Stream& stream)
161
153
{
162
- ::merge (src, n, dst, StreamAccessor::getStream( stream) );
154
+ merge_caller (src, n, dst, stream);
163
155
}
164
156
165
157
166
- void cv::gpu::merge (const std::vector<GpuMat>& src, GpuMat& dst, Stream& stream)
158
+ void cv::gpu::merge (const std::vector<GpuMat>& src, OutputArray dst, Stream& stream)
167
159
{
168
- ::merge (&src[0 ], src.size(), dst, StreamAccessor::getStream( stream) );
160
+ merge_caller (&src[0 ], src.size (), dst, stream);
169
161
}
170
162
171
- void cv::gpu::split (const GpuMat& src , GpuMat* dst, Stream& stream)
163
+ void cv::gpu::split (InputArray _src , GpuMat* dst, Stream& stream)
172
164
{
173
- ::split (src, dst, StreamAccessor::getStream(stream));
165
+ GpuMat src = _src.getGpuMat ();
166
+ split_caller (src, dst, stream);
174
167
}
175
168
176
- void cv::gpu::split (const GpuMat& src , std::vector<GpuMat>& dst, Stream& stream)
169
+ void cv::gpu::split (InputArray _src , std::vector<GpuMat>& dst, Stream& stream)
177
170
{
171
+ GpuMat src = _src.getGpuMat ();
178
172
dst.resize (src.channels ());
179
173
if (src.channels () > 0 )
180
- ::split (src, &dst[0 ], StreamAccessor::getStream( stream) );
174
+ split_caller (src, &dst[0 ], stream);
181
175
}
182
176
183
177
// //////////////////////////////////////////////////////////////////////
0 commit comments