Skip to content

Commit e4f6f2f

Browse files
committed
WL#10691 Ndb : ndb_restore slices of a backup file
This WL modifies ndb_restore so that it can be used to restore a subset, known as a slice, of the table fragments in a backup fileset as part of the --restore-data step. This allows a single backup fileset to be restored by multiple ndb_restore instances in parallel, each restoring a different slice of the table fragments, potentially reducing the time taken to restore a backup. ndb_restore is augmented with extra syntax : ndb_restore [--num-slices=#] [--slice-id=#] --num-slices (1..n) This is used to specify how many slices the backup fileset should be partitioned into. Defaults to 1. --slice-id (0..num-slices-1) This is used to specify which slice this ndb_restore instance should restore. Defaults to 0. A normal ndb_restore invocation ndb_restore --restore-data ... is equivalent to : ndb_restore --restore-data --num-slices=1 --slice-id=0 ... To get two way parallelism this could instead be replaced with : ndb_restore --restore-data --num-slices=2 --slice-id=0 ... & ndb_restore --restore-data --num-slices=2 --slice-id=1 ... & To get four way parallelism we could instead use: ndb_restore --restore-data --num-slices=4 --slice-id=0 ... & ndb_restore --restore-data --num-slices=4 --slice-id=1 ... & ndb_restore --restore-data --num-slices=4 --slice-id=2 ... & ndb_restore --restore-data --num-slices=4 --slice-id=3 ... & Notes : - Only the --restore-data step of ndb_restore is affected by restore by slice. --restore-meta, --restore-epoch, --disable-indexes, --rebuild-indexes et al remain serialised by design. - Each concurrently running ndb_restore instance needs its own API nodeid to be able to restore data. - The existing potential parallelism between node restores is unaffected. - All ndb_restore instances operating on the same backup fileset must read [and decompress] the whole fileset, but only restore a subset. MT Backup + Restore notes The MT Backup feature in 8.0 results in each data node writing multiple backup filesets, one per LDM. ndb_restore restores each fileset in a separate thread in a single ndb_restore process instance, giving LDM-level parallelism from a single ndb_restore process, without any redundant reading of the same fileset. Restore by slice can be used with ndb_restore of an MT backup. In this case, each ndb_restore will restore a subset of the table fragments across all filesets, and multiple ndb_restore instances can be used to get process level parallelism if required. Restore parallelism summary Release Degrees of parallelism 7.5 Data node 7.6 Data node x Slice 8.0 Data node x Slice x Fileset(LDM) Approved by : Priyanka Sangam <[email protected]>
1 parent 4a87ad6 commit e4f6f2f

File tree

8 files changed

+520
-63
lines changed

8 files changed

+520
-63
lines changed
Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
use test;
2+
create table test.t1 (a varchar(20) not null,
3+
b varchar(20) not null,
4+
c varchar(20),
5+
d varchar(20),
6+
e text,
7+
primary key(a,b)) engine=ndb;
8+
insert into test.t1 values
9+
("1a","1b",NULL,"1d", repeat('01WIFUA',1000)),
10+
("2a","2b",NULL,"2d", repeat('02WIFUA',1000)),
11+
("3a","3b",NULL,"3d", repeat('03WIFUA',1000)),
12+
("4a","4b",NULL,"4d", repeat('04WIFUA',1000)),
13+
("5a","5b",NULL,"5d", repeat('05WIFUA',1000)),
14+
("6a","6b",NULL,"6d", repeat('06WIFUA',1000)),
15+
("7a","7b",NULL,"7d", repeat('07WIFUA',1000)),
16+
("8a","8b",NULL,"8d", repeat('08WIFUA',1000)),
17+
("9a","9b",NULL,"9d", repeat('09WIFUA',1000)),
18+
("10a","10b",NULL,"10d", repeat('10WIFUA',1000)),
19+
("11a","11b",NULL,"11d", repeat('11WIFUA',1000)),
20+
("12a","12b",NULL,"12d", repeat('12WIFUA',1000)),
21+
("13a","13b",NULL,"13d", repeat('13WIFUA',1000)),
22+
("14a","14b",NULL,"14d", repeat('14WIFUA',1000)),
23+
("15a","15b",NULL,"15d", repeat('15WIFUA',1000)),
24+
("16a","16b",NULL,"16d", repeat('16WIFUA',1000));
25+
Stall backup completion
26+
Run backup
27+
Starting backup
28+
Backup started
29+
Make some changes which will be logged...
30+
insert into test.t1 values
31+
("17a","17b",NULL,"17d", repeat('17WIFUA',1000)),
32+
("18a","18b",NULL,"18d", repeat('18WIFUA',1000)),
33+
("19a","19b",NULL,"19d", repeat('19WIFUA',1000));
34+
update test.t1 set d=concat(d, "-modified");
35+
delete from test.t1 where a in ("5a","18a");
36+
Allow backup to complete...
37+
Waiting for backup to complete
38+
Backup completed
39+
delete from test.t1;
40+
# Normal restore using slices
41+
select a,b,c,d,length(e), right(e, 7) from test.t1 order by a,b;
42+
a b c d length(e) right(e, 7)
43+
10a 10b NULL 10d-modified 7000 10WIFUA
44+
11a 11b NULL 11d-modified 7000 11WIFUA
45+
12a 12b NULL 12d-modified 7000 12WIFUA
46+
13a 13b NULL 13d-modified 7000 13WIFUA
47+
14a 14b NULL 14d-modified 7000 14WIFUA
48+
15a 15b NULL 15d-modified 7000 15WIFUA
49+
16a 16b NULL 16d-modified 7000 16WIFUA
50+
17a 17b NULL 17d-modified 7000 17WIFUA
51+
19a 19b NULL 19d-modified 7000 19WIFUA
52+
1a 1b NULL 1d-modified 7000 01WIFUA
53+
2a 2b NULL 2d-modified 7000 02WIFUA
54+
3a 3b NULL 3d-modified 7000 03WIFUA
55+
4a 4b NULL 4d-modified 7000 04WIFUA
56+
6a 6b NULL 6d-modified 7000 06WIFUA
57+
7a 7b NULL 7d-modified 7000 07WIFUA
58+
8a 8b NULL 8d-modified 7000 08WIFUA
59+
9a 9b NULL 9d-modified 7000 09WIFUA
60+
drop table test.t1;
61+
# Restore with staging conversions + slices
62+
create table test.t1 (a varchar(20) not null,
63+
b varchar(20) not null,
64+
c text,
65+
d text,
66+
e text,
67+
primary key(a,b)) engine=ndb;
68+
select a,b,c,d,length(e), right(e, 7) from test.t1 order by a,b;
69+
a b c d length(e) right(e, 7)
70+
10a 10b NULL 10d-modified 7000 10WIFUA
71+
11a 11b NULL 11d-modified 7000 11WIFUA
72+
12a 12b NULL 12d-modified 7000 12WIFUA
73+
13a 13b NULL 13d-modified 7000 13WIFUA
74+
14a 14b NULL 14d-modified 7000 14WIFUA
75+
15a 15b NULL 15d-modified 7000 15WIFUA
76+
16a 16b NULL 16d-modified 7000 16WIFUA
77+
17a 17b NULL 17d-modified 7000 17WIFUA
78+
19a 19b NULL 19d-modified 7000 19WIFUA
79+
1a 1b NULL 1d-modified 7000 01WIFUA
80+
2a 2b NULL 2d-modified 7000 02WIFUA
81+
3a 3b NULL 3d-modified 7000 03WIFUA
82+
4a 4b NULL 4d-modified 7000 04WIFUA
83+
6a 6b NULL 6d-modified 7000 06WIFUA
84+
7a 7b NULL 7d-modified 7000 07WIFUA
85+
8a 8b NULL 8d-modified 7000 08WIFUA
86+
9a 9b NULL 9d-modified 7000 09WIFUA
87+
drop table test.t1;
88+
# Error out during restore to examine staging table names
89+
create table test.t1 (a varchar(20) not null,
90+
b varchar(20) not null,
91+
c text not null,
92+
d text not null,
93+
e text,
94+
primary key(a,b)) engine=ndb;
95+
# Dropping staging tables
96+
drop table test.t1;

mysql-test/suite/ndb/t/ndb_restore_conv_more.test

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -525,11 +525,11 @@ create table text2text (pk int primary key,
525525
--error 1
526526
--exec $NDB_RESTORE --no-defaults --core-file=0 -b $the_backup_id -n 1 -r --promote-attributes --lossy-conversions $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT 2>&1
527527
--echo # ndb_restore fails with error 1
528-
--exec $NDB_DROP_TABLE --no-defaults -d test text2text\$ST1 >> $NDB_TOOLS_OUTPUT
528+
--exec $NDB_DROP_TABLE --no-defaults -d test text2text\$ST1-0 >> $NDB_TOOLS_OUTPUT
529529
--error 1
530530
--exec $NDB_RESTORE --no-defaults --core-file=0 -b $the_backup_id -n 2 -r --promote-attributes --lossy-conversions $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT 2>&1
531531
--echo # ndb_restore fails with error 1
532-
--exec $NDB_DROP_TABLE --no-defaults -d test text2text\$ST2 >> $NDB_TOOLS_OUTPUT
532+
--exec $NDB_DROP_TABLE --no-defaults -d test text2text\$ST2-0 >> $NDB_TOOLS_OUTPUT
533533

534534
--echo # restore to text column with different charset
535535
drop table text2text;
Lines changed: 110 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,110 @@
1+
-- source include/have_ndb.inc
2+
-- source include/not_embedded.inc
3+
-- source include/have_ndb_debug.inc
4+
5+
use test;
6+
7+
create table test.t1 (a varchar(20) not null,
8+
b varchar(20) not null,
9+
c varchar(20),
10+
d varchar(20),
11+
e text,
12+
primary key(a,b)) engine=ndb;
13+
14+
insert into test.t1 values
15+
("1a","1b",NULL,"1d", repeat('01WIFUA',1000)),
16+
("2a","2b",NULL,"2d", repeat('02WIFUA',1000)),
17+
("3a","3b",NULL,"3d", repeat('03WIFUA',1000)),
18+
("4a","4b",NULL,"4d", repeat('04WIFUA',1000)),
19+
("5a","5b",NULL,"5d", repeat('05WIFUA',1000)),
20+
("6a","6b",NULL,"6d", repeat('06WIFUA',1000)),
21+
("7a","7b",NULL,"7d", repeat('07WIFUA',1000)),
22+
("8a","8b",NULL,"8d", repeat('08WIFUA',1000)),
23+
("9a","9b",NULL,"9d", repeat('09WIFUA',1000)),
24+
("10a","10b",NULL,"10d", repeat('10WIFUA',1000)),
25+
("11a","11b",NULL,"11d", repeat('11WIFUA',1000)),
26+
("12a","12b",NULL,"12d", repeat('12WIFUA',1000)),
27+
("13a","13b",NULL,"13d", repeat('13WIFUA',1000)),
28+
("14a","14b",NULL,"14d", repeat('14WIFUA',1000)),
29+
("15a","15b",NULL,"15d", repeat('15WIFUA',1000)),
30+
("16a","16b",NULL,"16d", repeat('16WIFUA',1000));
31+
32+
--echo Stall backup completion
33+
--exec $NDB_MGM --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING" -e "all error 10039" >> $NDB_TOOLS_OUTPUT 2>&1
34+
--echo Run backup
35+
--source suite/ndb/t/ndb_backup_nowait_start.inc
36+
37+
--echo Make some changes which will be logged...
38+
insert into test.t1 values
39+
("17a","17b",NULL,"17d", repeat('17WIFUA',1000)),
40+
("18a","18b",NULL,"18d", repeat('18WIFUA',1000)),
41+
("19a","19b",NULL,"19d", repeat('19WIFUA',1000));
42+
43+
update test.t1 set d=concat(d, "-modified");
44+
45+
delete from test.t1 where a in ("5a","18a");
46+
47+
--echo Allow backup to complete...
48+
--exec $NDB_MGM --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING" -e "all error 0" >> $NDB_TOOLS_OUTPUT 2>&1
49+
50+
--source suite/ndb/t/ndb_backup_nowait_wait.inc
51+
52+
delete from test.t1;
53+
54+
--echo # Normal restore using slices
55+
56+
--exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 1 --num-slices=3 --slice-id=0 -r $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT 2>&1
57+
--exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 1 --num-slices=3 --slice-id=1 -r $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT 2>&1
58+
--exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 1 --num-slices=3 --slice-id=2 -r $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT 2>&1
59+
--exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 2 --num-slices=3 --slice-id=0 -r $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT 2>&1
60+
--exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 2 --num-slices=3 --slice-id=1 -r $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT 2>&1
61+
--exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 2 --num-slices=3 --slice-id=2 -r $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT 2>&1
62+
63+
select a,b,c,d,length(e), right(e, 7) from test.t1 order by a,b;
64+
65+
drop table test.t1;
66+
67+
--echo # Restore with staging conversions + slices
68+
create table test.t1 (a varchar(20) not null,
69+
b varchar(20) not null,
70+
c text,
71+
d text,
72+
e text,
73+
primary key(a,b)) engine=ndb;
74+
75+
--exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 1 --promote-attributes --lossy-conversions --num-slices=5 --slice-id=0 -r $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT 2>&1
76+
--exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 1 --promote-attributes --lossy-conversions --num-slices=5 --slice-id=1 -r $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT 2>&1
77+
--exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 1 --promote-attributes --lossy-conversions --num-slices=5 --slice-id=2 -r $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT 2>&1
78+
--exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 1 --promote-attributes --lossy-conversions --num-slices=5 --slice-id=3 -r $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT 2>&1
79+
--exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 1 --promote-attributes --lossy-conversions --num-slices=5 --slice-id=4 -r $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT 2>&1
80+
--exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 2 --promote-attributes --lossy-conversions --num-slices=7 --slice-id=0 -r $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT 2>&1
81+
--exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 2 --promote-attributes --lossy-conversions --num-slices=7 --slice-id=1 -r $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT 2>&1
82+
--exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 2 --promote-attributes --lossy-conversions --num-slices=7 --slice-id=2 -r $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT 2>&1
83+
--exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 2 --promote-attributes --lossy-conversions --num-slices=7 --slice-id=3 -r $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT 2>&1
84+
--exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 2 --promote-attributes --lossy-conversions --num-slices=7 --slice-id=4 -r $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT 2>&1
85+
--exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 2 --promote-attributes --lossy-conversions --num-slices=7 --slice-id=5 -r $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT 2>&1
86+
--exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 2 --promote-attributes --lossy-conversions --num-slices=7 --slice-id=6 -r $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT 2>&1
87+
88+
select a,b,c,d,length(e), right(e, 7) from test.t1 order by a,b;
89+
90+
drop table test.t1;
91+
92+
--echo # Error out during restore to examine staging table names
93+
94+
create table test.t1 (a varchar(20) not null,
95+
b varchar(20) not null,
96+
c text not null,
97+
d text not null,
98+
e text,
99+
primary key(a,b)) engine=ndb;
100+
101+
--error 134
102+
--exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 1 --lossy-conversions --promote-attributes --num-slices=3 --slice-id=1 -r $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT 2>&1
103+
--error 134
104+
--exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 2 --lossy-conversions --promote-attributes --num-slices=3 --slice-id=1 -r $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT 2>&1
105+
106+
--echo # Dropping staging tables
107+
--exec $NDB_DROP_TABLE --no-defaults -d test t1\$ST1-1 >> $NDB_TOOLS_OUTPUT 2>1
108+
--exec $NDB_DROP_TABLE --no-defaults -d test t1\$ST2-1 >> $NDB_TOOLS_OUTPUT 2>1
109+
110+
drop table test.t1;

storage/ndb/tools/restore/Restore.cpp

Lines changed: 68 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -743,6 +743,7 @@ RestoreMetaData::readFragmentInfo()
743743
(((Uint64)ntohl(fragInfo.NoOfRecordsHigh)) << 32);
744744
tmp->filePosLow = ntohl(fragInfo.FilePosLow);
745745
tmp->filePosHigh = ntohl(fragInfo.FilePosHigh);
746+
tmp->sliceSkip = false; /* Init, set later */
746747

747748
table->m_fragmentInfo.push_back(tmp);
748749
table->m_noOfRecords += tmp->noOfRecords;
@@ -764,6 +765,7 @@ TableS::TableS(Uint32 version, NdbTableImpl* tableImpl)
764765
m_broken = false;
765766
m_main_table = NULL;
766767
m_main_column_id = ~(Uint32)0;
768+
m_has_blobs = false;
767769

768770
for (int i = 0; i < tableImpl->getNoOfColumns(); i++)
769771
createAttr(tableImpl->getColumn(i));
@@ -986,7 +988,7 @@ pad(Uint8* src, Uint32 align, Uint32 bitPos)
986988
}
987989

988990
const TupleS *
989-
RestoreDataIterator::getNextTuple(int & res)
991+
RestoreDataIterator::getNextTuple(int & res, const bool skipFragment)
990992
{
991993
if (m_currentTable->backupVersion >= NDBD_RAW_LCP)
992994
{
@@ -1002,53 +1004,66 @@ RestoreDataIterator::getNextTuple(int & res)
10021004
reset_bitfield_storage();
10031005
}
10041006
}
1005-
1006-
Uint32 dataLength = 0;
1007-
// Read record length
1008-
if (buffer_read(&dataLength, sizeof(dataLength), 1) != 1){
1009-
err << "getNextTuple:Error reading length of data part" << endl;
1010-
res = -1;
1011-
return NULL;
1012-
} // if
1013-
1014-
// Convert length from network byte order
1015-
dataLength = ntohl(dataLength);
1016-
const Uint32 dataLenBytes = 4 * dataLength;
1017-
1018-
if (dataLength == 0) {
1019-
// Zero length for last tuple
1020-
// End of this data fragment
1021-
debug << "End of fragment" << endl;
1022-
res = 0;
1023-
return NULL;
1024-
} // if
1025-
1026-
// Read tuple data
1027-
void *_buf_ptr;
1028-
if (buffer_get_ptr(&_buf_ptr, 1, dataLenBytes) != dataLenBytes) {
1029-
err << "getNextTuple:Read error: " << endl;
1030-
res = -1;
1031-
return NULL;
1032-
}
10331007

1034-
Uint32 *buf_ptr = (Uint32*)_buf_ptr;
1035-
if (m_currentTable->backupVersion >= NDBD_RAW_LCP)
1036-
{
1037-
res = readTupleData_packed(buf_ptr, dataLength);
1038-
}
1039-
else
1040-
{
1041-
res = readTupleData_old(buf_ptr, dataLength);
1042-
}
1043-
1044-
if (res)
1008+
while (true)
10451009
{
1046-
return NULL;
1047-
}
1010+
Uint32 dataLength = 0;
1011+
// Read record length
1012+
if (buffer_read(&dataLength, sizeof(dataLength), 1) != 1){
1013+
err << "getNextTuple:Error reading length of data part" << endl;
1014+
res = -1;
1015+
return NULL;
1016+
} // if
10481017

1049-
m_count ++;
1050-
res = 0;
1051-
return &m_tuple;
1018+
// Convert length from network byte order
1019+
dataLength = ntohl(dataLength);
1020+
const Uint32 dataLenBytes = 4 * dataLength;
1021+
1022+
if (dataLength == 0) {
1023+
// Zero length for last tuple
1024+
// End of this data fragment
1025+
debug << "End of fragment" << endl;
1026+
res = 0;
1027+
return NULL;
1028+
} // if
1029+
1030+
// Read tuple data
1031+
void *_buf_ptr;
1032+
if (buffer_get_ptr(&_buf_ptr, 1, dataLenBytes) != dataLenBytes) {
1033+
err << "getNextTuple:Read error: " << endl;
1034+
res = -1;
1035+
return NULL;
1036+
}
1037+
1038+
m_count++;
1039+
1040+
if (skipFragment)
1041+
{
1042+
/**
1043+
* Skip unpacking work, we just want to read all the tuples up
1044+
* to the end of this fragment
1045+
*/
1046+
continue;
1047+
}
1048+
1049+
Uint32 *buf_ptr = (Uint32*)_buf_ptr;
1050+
if (m_currentTable->backupVersion >= NDBD_RAW_LCP)
1051+
{
1052+
res = readTupleData_packed(buf_ptr, dataLength);
1053+
}
1054+
else
1055+
{
1056+
res = readTupleData_old(buf_ptr, dataLength);
1057+
}
1058+
1059+
if (res)
1060+
{
1061+
return NULL;
1062+
}
1063+
1064+
res = 0;
1065+
return &m_tuple;
1066+
}
10521067
} // RestoreDataIterator::getNextTuple
10531068

10541069
TableS *
@@ -1891,6 +1906,14 @@ void TableS::createAttr(NdbDictionary::Column *column)
18911906
m_noOfNullable++;
18921907
m_nullBitmaskSize = (m_noOfNullable + 31) / 32;
18931908
}
1909+
if ((d->m_column->getType() == NdbDictionary::Column::Blob) ||
1910+
(d->m_column->getType() == NdbDictionary::Column::Text))
1911+
{
1912+
if (d->m_column->getPartSize() > 0)
1913+
{
1914+
m_has_blobs = true;
1915+
}
1916+
}
18941917
m_variableAttribs.push_back(d);
18951918
} // TableS::createAttr
18961919

0 commit comments

Comments
 (0)