Skip to content

[SPARK-10341] [SQL] fix memory starving in unsafe SMJ #8511

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

package org.apache.spark.rdd

import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag

import org.apache.spark.{Partition, Partitioner, TaskContext}
Expand All @@ -38,12 +39,28 @@ private[spark] class MapPartitionsWithPreparationRDD[U: ClassTag, T: ClassTag, M

override def getPartitions: Array[Partition] = firstParent[T].partitions

// In certain join operations, prepare can be called on the same partition multiple times.
// In this case, we need to ensure that each call to compute gets a separate prepare argument.
private[this] var preparedArguments: ArrayBuffer[M] = new ArrayBuffer[M]
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I just noticed this can be a val. We can fix this in a follow-up patch.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Will fix this a follow up PR.


/**
* Prepare a partition for a single call to compute.
*/
def prepare(): Unit = {
preparedArguments += preparePartition()
}

/**
* Prepare a partition before computing it from its parent.
*/
override def compute(partition: Partition, context: TaskContext): Iterator[U] = {
val preparedArgument = preparePartition()
val prepared =
if (preparedArguments.isEmpty) {
preparePartition()
} else {
preparedArguments.remove(0)
}
val parentIterator = firstParent[T].iterator(partition, context)
executePartition(context, partition.index, preparedArgument, parentIterator)
executePartition(context, partition.index, prepared, parentIterator)
}
}
13 changes: 13 additions & 0 deletions core/src/main/scala/org/apache/spark/rdd/ZippedPartitionsRDD.scala
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,16 @@ private[spark] abstract class ZippedPartitionsBaseRDD[V: ClassTag](
super.clearDependencies()
rdds = null
}

/**
* Call the prepare method of every parent that has one.
* This is needed for reserving execution memory in advance.
*/
protected def tryPrepareParents(): Unit = {
rdds.collect {
case rdd: MapPartitionsWithPreparationRDD[_, _, _] => rdd.prepare()
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this could just be

rdds.collect { case rdd: MapPartitionsWithPreparationRDD[_, _, _] => rdd.prepare() }

}
}

private[spark] class ZippedPartitionsRDD2[A: ClassTag, B: ClassTag, V: ClassTag](
Expand All @@ -84,6 +94,7 @@ private[spark] class ZippedPartitionsRDD2[A: ClassTag, B: ClassTag, V: ClassTag]
extends ZippedPartitionsBaseRDD[V](sc, List(rdd1, rdd2), preservesPartitioning) {

override def compute(s: Partition, context: TaskContext): Iterator[V] = {
tryPrepareParents()
val partitions = s.asInstanceOf[ZippedPartitionsPartition].partitions
f(rdd1.iterator(partitions(0), context), rdd2.iterator(partitions(1), context))
}
Expand All @@ -107,6 +118,7 @@ private[spark] class ZippedPartitionsRDD3
extends ZippedPartitionsBaseRDD[V](sc, List(rdd1, rdd2, rdd3), preservesPartitioning) {

override def compute(s: Partition, context: TaskContext): Iterator[V] = {
tryPrepareParents()
val partitions = s.asInstanceOf[ZippedPartitionsPartition].partitions
f(rdd1.iterator(partitions(0), context),
rdd2.iterator(partitions(1), context),
Expand Down Expand Up @@ -134,6 +146,7 @@ private[spark] class ZippedPartitionsRDD4
extends ZippedPartitionsBaseRDD[V](sc, List(rdd1, rdd2, rdd3, rdd4), preservesPartitioning) {

override def compute(s: Partition, context: TaskContext): Iterator[V] = {
tryPrepareParents()
val partitions = s.asInstanceOf[ZippedPartitionsPartition].partitions
f(rdd1.iterator(partitions(0), context),
rdd2.iterator(partitions(1), context),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,11 +46,17 @@ class MapPartitionsWithPreparationRDDSuite extends SparkFunSuite with LocalSpark
}

// Verify that the numbers are pushed in the order expected
val result = {
new MapPartitionsWithPreparationRDD[Int, Int, Unit](
parent, preparePartition, executePartition).collect()
}
val rdd = new MapPartitionsWithPreparationRDD[Int, Int, Unit](
parent, preparePartition, executePartition)
val result = rdd.collect()
assert(result === Array(10, 20, 30))

TestObject.things.clear()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you add a comment before this line:

// Zip two of these RDDs, both should be prepared before the parent is executed

// Zip two of these RDDs, both should be prepared before the parent is executed
val rdd2 = new MapPartitionsWithPreparationRDD[Int, Int, Unit](
parent, preparePartition, executePartition)
val result2 = rdd.zipPartitions(rdd2)((a, b) => a).collect()
assert(result2 === Array(10, 10, 20, 30, 20, 30))
}

}
Expand Down