Skip to content

Fix #233 Add limit pushdown to attached queries #313

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 10 commits into from
May 13, 2025
1 change: 1 addition & 0 deletions src/include/postgres_scanner.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ struct PostgresBindData : public FunctionData {
string schema_name;
string table_name;
string sql;
string limit;
idx_t pages_approx = 0;

vector<PostgresType> postgres_types;
Expand Down
13 changes: 7 additions & 6 deletions src/postgres_scanner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -251,14 +251,15 @@ static void PostgresInitInternal(ClientContext &context, const PostgresBindData
}
if (bind_data->table_name.empty()) {
D_ASSERT(!bind_data->sql.empty());
lstate.sql =
StringUtil::Format(R"(COPY (SELECT %s FROM (%s) AS __unnamed_subquery %s) TO STDOUT (FORMAT "binary");)",
col_names, bind_data->sql, filter);
lstate.sql = StringUtil::Format(
R"(COPY (SELECT %s FROM (%s) AS __unnamed_subquery %s%s) TO STDOUT (FORMAT "binary");)",
col_names, bind_data->sql, filter, bind_data->limit);

} else {
lstate.sql = StringUtil::Format(R"(COPY (SELECT %s FROM %s.%s %s) TO STDOUT (FORMAT "binary");)", col_names,
KeywordHelper::WriteQuoted(bind_data->schema_name, '"'),
KeywordHelper::WriteQuoted(bind_data->table_name, '"'), filter);
lstate.sql = StringUtil::Format(
R"(COPY (SELECT %s FROM %s.%s %s%s) TO STDOUT (FORMAT "binary");)",
col_names, KeywordHelper::WriteQuoted(bind_data->schema_name, '"'),
KeywordHelper::WriteQuoted(bind_data->table_name, '"'), filter, bind_data->limit);
}
lstate.exec = false;
lstate.done = false;
Expand Down
66 changes: 66 additions & 0 deletions src/storage/postgres_optimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,79 @@
#include "storage/postgres_transaction.hpp"
#include "storage/postgres_optimizer.hpp"
#include "duckdb/planner/operator/logical_get.hpp"
#include "duckdb/planner/operator/logical_limit.hpp"
#include "storage/postgres_catalog.hpp"
#include "postgres_scanner.hpp"


namespace duckdb {

struct PostgresOperators {
reference_map_t<PostgresCatalog, vector<reference<LogicalGet>>> scans;
};

static void OptimizePostgresScanLimitPushdown(unique_ptr<LogicalOperator> &op) {
if (op->type == LogicalOperatorType::LOGICAL_LIMIT) {
auto &limit = op->Cast<LogicalLimit>();
reference<LogicalOperator> child = *op->children[0];

while (child.get().type == LogicalOperatorType::LOGICAL_PROJECTION) {
child = *child.get().children[0];
}

if (child.get().type != LogicalOperatorType::LOGICAL_GET) {
OptimizePostgresScanLimitPushdown(op->children[0]);
return;
}

auto &get = child.get().Cast<LogicalGet>();
if (!PostgresCatalog::IsPostgresScan(get.function.name)) {
OptimizePostgresScanLimitPushdown(op->children[0]);
return;
}

switch (limit.limit_val.Type()) {
case LimitNodeType::CONSTANT_VALUE:
case LimitNodeType::UNSET:
break;
default:
// not a constant or unset limit
OptimizePostgresScanLimitPushdown(op->children[0]);
return;
}
switch (limit.offset_val.Type()) {
case LimitNodeType::CONSTANT_VALUE:
case LimitNodeType::UNSET:
break;
default:
// not a constant or unset offset
OptimizePostgresScanLimitPushdown(op->children[0]);
return;
}

auto &bind_data = get.bind_data->Cast<PostgresBindData>();

string generated_limit_clause = "";
if (limit.limit_val.Type() != LimitNodeType::UNSET) {
generated_limit_clause += " LIMIT " + to_string(limit.limit_val.GetConstantValue());
}
if (limit.offset_val.Type() != LimitNodeType::UNSET) {
generated_limit_clause += " OFFSET " + to_string(limit.offset_val.GetConstantValue());
}

if (!generated_limit_clause.empty()) {
bind_data.limit = generated_limit_clause;

op = std::move(op->children[0]);
return;
}
}

for (auto &child : op->children) {
OptimizePostgresScanLimitPushdown(child);
}
}

void GatherPostgresScans(LogicalOperator &op, PostgresOperators &result) {
if (op.type == LogicalOperatorType::LOGICAL_GET) {
auto &get = op.Cast<LogicalGet>();
Expand All @@ -35,6 +99,8 @@ void GatherPostgresScans(LogicalOperator &op, PostgresOperators &result) {
}

void PostgresOptimizer::Optimize(OptimizerExtensionInput &input, unique_ptr<LogicalOperator> &plan) {
// look at query plan and check if we can find LIMIT/OFFSET to pushdown
OptimizePostgresScanLimitPushdown(plan);
// look at the query plan and check if we can enable streaming query scans
PostgresOperators operators;
GatherPostgresScans(*plan, operators);
Expand Down
41 changes: 41 additions & 0 deletions test/sql/storage/limit.test
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
# name: test/sql/storage/limit.test
# description: Test limit on an attached table
# group: [storage]

require postgres_scanner

require-env POSTGRES_TEST_DATABASE_AVAILABLE

statement ok
ATTACH 'dbname=postgresscanner' AS s (TYPE POSTGRES)

statement ok
CREATE OR REPLACE TABLE s.large_tbl AS FROM range(100000) t(i)

query I
FROM s.large_tbl LIMIT 5
----
0
1
2
3
4

query I
FROM s.large_tbl LIMIT 5 OFFSET 5
----
5
6
7
8
9

statement ok
set explain_output='optimized_only'

# limit is now not in DuckDB plan, but sent down to Postgres, this checks that

query II
EXPLAIN FROM s.large_tbl LIMIT 5;
----
logical_opt <!REGEX>:.*LIMIT.*
Loading