8000 pgoutput: Fix memory leak due to RelationSyncEntry.map. · postgrespro/postgres@eb89cb4 · GitHub
[go: up one dir, main page]

Skip to content
  • Commit eb89cb4

    Browse files
    author
    Amit Kapila
    committed
    pgoutput: Fix memory leak due to RelationSyncEntry.map.
    Release memory allocated when creating the tuple-conversion map and its component TupleDescs when its owning sync entry is invalidated. TupleDescs must also be freed when no map is deemed necessary, to begin with. Reported-by: Andres Freund Author: Amit Langote Reviewed-by: Takamichi Osumi, Amit Kapila Backpatch-through: 13, where it was introduced Discussion: https://postgr.es/m/MEYP282MB166933B1AB02B4FE56E82453B64D9@MEYP282MB1669.AUSP282.PROD.OUTLOOK.COM
    1 parent a40646e commit eb89cb4

    File tree

    2 files changed

    +65
    -9
    lines changed

    2 files changed

    +65
    -9
    lines changed

    src/backend/replication/pgoutput/pgoutput.c

    Lines changed: 38 additions & 8 deletions
    Original file line numberDiff line numberDiff line change
    @@ -74,7 +74,8 @@ static void send_relation_and_attrs(Relation relation, TransactionId xid,
    7474
    /*
    7575
    * Entry in the map used to remember which relation schemas we sent.
    7676
    *
    77-
    * The schema_sent flag determines if the current schema record was already
    77+
    * The schema_sent flag determines if the current schema record for the
    78+
    * relation (and for its ancestor if publish_as_relid is set) was already
    7879
    * sent to the subscriber (in which case we don't need to send it again).
    7980
    *
    8081
    * The schema cache on downstream is however updated only at commit time,
    @@ -92,10 +93,6 @@ typedef struct RelationSyncEntry
    9293
    {
    9394
    Oid relid; /* relation oid */
    9495

    95-
    /*
    96-
    * Did we send the schema? If ancestor relid is set, its schema must also
    97-
    * have been sent for this to be true.
    98-
    */
    9996
    bool s 10000 chema_sent;
    10097
    List *streamed_txns; /* streamed toplevel transactions with this
    10198
    * schema */
    @@ -437,10 +434,17 @@ maybe_send_schema(LogicalDecodingContext *ctx,
    437434
    else
    438435
    schema_sent = relentry->schema_sent;
    439436

    437+
    /* Nothing to do if we already sent the schema. */
    440438
    if (schema_sent)
    441439
    return;
    442440

    443-
    /* If needed, send the ancestor's schema first. */
    441+
    /*
    442+
    * Nope, so send the schema. If the changes will be published using an
    443+
    * ancestor's schema, not the relation's own, send that ancestor's schema
    444+
    * before sending relation's own (XXX - maybe sending only the former
    445+
    * suffices?). This is also a good place to set the map that will be used
    446+
    * to convert the relation's tuples into the ancestor's format, if needed.
    447+
    */
    444448
    if (relentry->publish_as_relid != RelationGetRelid(relation))
    445449
    {
    446450
    Relation ancestor = RelationIdGetRelation(relentry->publish_as_relid);
    @@ -450,8 +454,21 @@ maybe_send_schema(LogicalDecodingContext *ctx,
    450454

    451455
    /* Map must live as long as the session does. */
    452456
    oldctx = MemoryContextSwitchTo(CacheMemoryContext);
    453-
    relentry->map = convert_tuples_by_name(CreateTupleDescCopy(indesc),
    454-
    CreateTupleDescCopy(outdesc));
    457+
    458+
    /*
    459+
    * Make copies of the TupleDescs that will live as long as the map
    460+
    * does before putting into the map.
    461+
    */
    462+
    indesc = CreateTupleDescCopy(indesc);
    463+
    outdesc = CreateTupleDescCopy(outdesc);
    464+
    relentry->map = convert_tuples_by_name(indesc, outdesc);
    465+
    if (relentry->map == NULL)
    466+
    {
    467+
    /* Map not necessary, so free the TupleDescs too. */
    468+
    FreeTupleDesc(indesc);
    469+
    FreeTupleDesc(outdesc);
    470+
    }
    471+
    455472
    MemoryContextSwitchTo(oldctx);
    456473
    send_relation_and_attrs(ancestor, xid, ctx);
    457474
    RelationClose(ancestor);
    @@ -1011,6 +1028,7 @@ get_rel_sync_entry(PGOutputData *data, Oid relid)
    10111028
    entry->pubactions.pubinsert = entry->pubactions.pubupdate =
    10121029
    entry->pubactions.pubdelete = entry->pubactions.pubtruncate = false;
    10131030
    entry->publish_as_relid = InvalidOid;
    1031+
    entry->map = NULL; /* will be set by maybe_send_schema() if needed */
    10141032
    }
    10151033

    10161034
    /* Validate the entry */
    @@ -1191,12 +1209,24 @@ rel_sync_cache_relation_cb(Datum arg, Oid relid)
    11911209

    11921210
    /*
    11931211
    * Reset schema sent status as the relation definition may have changed.
    1212+
    * Also free any objects that depended on the earlier definition.
    11941213
    */
    11951214
    if (entry != NULL)
    11961215
    {
    11971216
    entry->schema_sent = false;
    11981217
    list_free(entry->streamed_txns);
    11991218
    entry->streamed_txns = NIL;
    1219+
    if (entry->map)
    1220+
    {
    1221+
    /*
    1222+
    * Must free the TupleDescs contained in the map explicitly,
    1223+
    * because free_conversion_map() doesn't.
    1224+
    */
    1225+
    FreeTupleDesc(entry->map->indesc);
    1226+
    FreeTupleDesc(entry->map->outdesc);
    1227+
    free_conversion_map(entry->map);
    1228+
    }
    1229+
    entry->map = NULL;
    12001230
    }
    12011231
    }
    12021232

    src/test/subscription/t/013_partition.pl

    Lines changed: 27 additions & 1 deletion
    Original file line numberDiff line numberDiff line change
    @@ -6,7 +6,7 @@
    66
    use warnings;
    77
    use PostgresNode;
    88
    use TestLib;
    9-
    use Test::More tests => 54;
    9+
    use Test::More tests => 56;
    1010

    1111
    # setup
    1212

    @@ -624,3 +624,29 @@ BEGIN
    624624

    625625
    $result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab3_1");
    626626
    is($result, qq(), 'truncate of tab3_1 replicated');
    627+
    628+
    # check that the map to convert tuples from leaf partition to the root
    629+
    # table is correctly rebuilt when a new column is added
    630+
    $node_publisher->safe_psql('postgres',
    631+
    "ALTER TABLE tab2 DROP b, ADD COLUMN c text DEFAULT 'pub_tab2', ADD b text");
    632+
    $node_publisher->safe_psql('postgres',
    633+
    "INSERT INTO tab2 (a, b) VALUES (1, 'xxx'), (3, 'yyy'), (5, 'zzz')");
    634+
    $node_publisher->safe_psql('postgres',
    635+
    "INSERT INTO tab2 (a, b, c) VALUES (6, 'aaa', 'xxx_c')");
    636+
    637+
    $node_publisher->wait_for_catchup('sub_viaroot');
    638+
    $node_publisher->wait_for_catchup('sub2');
    639+
    640+
    $result = $node_subscriber1->safe_psql('postgres',
    641+
    "SELECT c, a, b FROM tab2 ORDER BY 1, 2");
    642+
    is( $result, qq(pub_tab2|1|xxx
    643+
    pub_tab2|3|yyy
    644+
    pub_tab2|5|zzz
    645+
    xxx_c|6|aaa), 'inserts into tab2 replicated');
    646+
    647+
    $result = $node_subscriber2->safe_psql('postgres',
    648+
    "SELECT c, a, b FROM tab2 ORDER BY 1, 2");
    649+
    is( $result, qq(pub_tab2|1|xxx
    650+
    pub_tab2|3|yyy
    651+
    pub_tab2|5|zzz
    652+
    xxx_c|6|aaa), 'inserts into tab2 replicated');

    0 commit comments

    Comments
     (0)
    0