Skip to content

Commit aa60965

Browse files
author
Maheedhar PV
committed
Merge branch 'mysql-8.0' into mysql-trunk
2 parents 1405d79 + 6e54c4d commit aa60965

File tree

3 files changed

+52
-6
lines changed

3 files changed

+52
-6
lines changed

mysql-test/r/multi_update_innodb.result

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,3 +113,17 @@ Note 1003 update `test`.`t1` join `test`.`t2` set `test`.`t2`.`b` = 0
113113
UPDATE t1,t2 SET t2.b=0;
114114
SET sql_buffer_result=@old_buf_result;
115115
DROP TABLE t1,t2;
116+
#
117+
# Bug#28716103 - MULTI-TABLE UPDATE OF PRIMARY KEY RESULTS IN ERROR 1032
118+
# "CAN'T FIND RECORD"
119+
#
120+
CREATE TABLE t1 (c1 CHAR(255) NOT NULL);
121+
CREATE TABLE t2 (c1 CHAR(255) NOT NULL, c2 CHAR(255) NOT NULL,
122+
c3 CHAR(255) NOT NULL, PRIMARY KEY (c1, c2, c3));
123+
INSERT INTO t1 VALUES ('x'), ('x');
124+
INSERT INTO t2 VALUES ('x', '', '');
125+
UPDATE t1, t2 SET t2.c2 = 'y', t2.c3 = 'y' WHERE t2.c1 = t1.c1;
126+
SELECT * FROM t2;
127+
c1 c2 c3
128+
x y y
129+
DROP TABLE t1, t2;

mysql-test/t/multi_update_innodb.test

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,3 +113,21 @@ eval $query;
113113

114114
SET sql_buffer_result=@old_buf_result;
115115
DROP TABLE t1,t2;
116+
117+
--echo #
118+
--echo # Bug#28716103 - MULTI-TABLE UPDATE OF PRIMARY KEY RESULTS IN ERROR 1032
119+
--echo # "CAN'T FIND RECORD"
120+
--echo #
121+
122+
# Make sure the size of primary key(accounting for bytes per code point)
123+
# is bigger than 512 bytes. This will force the use of a hash field
124+
# and a unique constraint on the hash field in the temporary table.
125+
126+
CREATE TABLE t1 (c1 CHAR(255) NOT NULL);
127+
CREATE TABLE t2 (c1 CHAR(255) NOT NULL, c2 CHAR(255) NOT NULL,
128+
c3 CHAR(255) NOT NULL, PRIMARY KEY (c1, c2, c3));
129+
INSERT INTO t1 VALUES ('x'), ('x');
130+
INSERT INTO t2 VALUES ('x', '', '');
131+
UPDATE t1, t2 SET t2.c2 = 'y', t2.c3 = 'y' WHERE t2.c1 = t1.c1;
132+
SELECT * FROM t2;
133+
DROP TABLE t1, t2;

sql/sql_update.cc

Lines changed: 20 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1891,7 +1891,7 @@ bool Query_result_update::optimize() {
18911891
TABLE *table = table_ref->table;
18921892
uint cnt = table_ref->shared;
18931893
List<Item> temp_fields;
1894-
ORDER group;
1894+
ORDER *group = nullptr;
18951895
Temp_table_param *tmp_param;
18961896

18971897
if (thd->lex->is_ignore()) table->file->ha_extra(HA_EXTRA_IGNORE_DUP_KEY);
@@ -2038,17 +2038,18 @@ bool Query_result_update::optimize() {
20382038

20392039
temp_fields.concat(fields_for_table[cnt]);
20402040

2041+
group = new (thd->mem_root) ORDER;
20412042
/* Make an unique key over the first field to avoid duplicated updates */
2042-
memset(&group, 0, sizeof(group));
2043-
group.direction = ORDER_ASC;
2044-
group.item = temp_fields.head_ref();
2043+
memset(group, 0, sizeof(*group));
2044+
group->direction = ORDER_ASC;
2045+
group->item = temp_fields.head_ref();
20452046

20462047
tmp_param->allow_group_via_temp_table = true;
20472048
tmp_param->field_count = temp_fields.elements;
20482049
tmp_param->group_parts = 1;
20492050
tmp_param->group_length = table->file->ref_length;
20502051
tmp_tables[cnt] =
2051-
create_tmp_table(thd, tmp_param, temp_fields, &group, false, false,
2052+
create_tmp_table(thd, tmp_param, temp_fields, group, false, false,
20522053
TMP_TABLE_ALL_COLUMNS, HA_POS_ERROR, "");
20532054
if (!tmp_tables[cnt]) return true;
20542055

@@ -2060,6 +2061,7 @@ bool Query_result_update::optimize() {
20602061
calling fill_record() to assign values to the temporary table's fields.
20612062
*/
20622063
tmp_tables[cnt]->triggers = table->triggers;
2064+
tmp_tables[cnt]->file->ha_index_init(0, false /*sorted*/);
20632065
}
20642066
return false;
20652067
}
@@ -2073,6 +2075,11 @@ void Query_result_update::cleanup(THD *thd) {
20732075
if (tmp_tables) {
20742076
for (uint cnt = 0; cnt < update_table_count; cnt++) {
20752077
if (tmp_tables[cnt]) {
2078+
/*
2079+
Cleanup can get called without the send_eof() call, close
2080+
the index if open.
2081+
*/
2082+
tmp_tables[cnt]->file->ha_index_or_rnd_end();
20762083
free_tmp_table(thd, tmp_tables[cnt]);
20772084
tmp_table_param[cnt].cleanup();
20782085
}
@@ -2230,11 +2237,16 @@ bool Query_result_update::send_data(THD *thd, List<Item> &) {
22302237
unupdated_check_opt_tables.elements,
22312238
*values_for_table[offset], nullptr, nullptr, false);
22322239

2240+
// check if a record exists with the same hash value
2241+
if (!check_unique_constraint(tmp_table))
2242+
return false; // skip adding duplicate record to the temp table
2243+
22332244
/* Write row, ignoring duplicated updates to a row */
22342245
error = tmp_table->file->ha_write_row(tmp_table->record[0]);
22352246
if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE) {
22362247
if (error &&
2237-
create_ondisk_from_heap(thd, tmp_table, error, true, nullptr)) {
2248+
(create_ondisk_from_heap(thd, tmp_table, error, true, nullptr) ||
2249+
tmp_table->file->ha_index_init(0, false /*sorted*/))) {
22382250
update_completed = true;
22392251
return true; // Not a table_is_full error
22402252
}
@@ -2381,6 +2393,8 @@ bool Query_result_update::do_updates(THD *thd) {
23812393
}
23822394
copy_field_end = copy_field_ptr;
23832395

2396+
// Before initializing for random scan, close the index opened for insert.
2397+
tmp_table->file->ha_index_or_rnd_end();
23842398
if ((local_error = tmp_table->file->ha_rnd_init(true))) {
23852399
if (table->file->is_fatal_error(local_error))
23862400
error_flags |= ME_FATALERROR;

0 commit comments

Comments
 (0)