8000 pgoutput: Fix memory leak due to RelationSyncEntry.map. · postgrespro/postgres@d250568 · GitHub
[go: up one dir, main page]

Skip to content
  • Commit d250568

    author
    Amit Kapila
    committed
    pgoutput: Fix memory leak due to RelationSyncEntry.map.
    Release memory allocated when creating the tuple-conversion map and its component TupleDescs when its owning sync entry is invalidated. TupleDescs must also be freed when no map is deemed necessary, to begin with. Reported-by: Andres Freund Author: Amit Langote Reviewed-by: Takamichi Osumi, Amit Kapila Backpatch-through: 13, where it was introduced Discussion: https://postgr.es/m/MEYP282MB166933B1AB02B4FE56E82453B64D9@MEYP282MB1669.AUSP282.PROD.OUTLOOK.COM
    1 parent e2f21ff commit d250568

    File tree

    2 files changed

    +69
    -8
    lines changed

    2 files changed

    +69
    -8
    lines changed

    src/backend/replication/pgoutput/pgoutput.c

    Lines changed: 42 additions & 7 deletions
    Original file line numberDiff line numberDiff line change
    @@ -57,17 +57,17 @@ static void send_relation_and_attrs(Relation relation, LogicalDecodingContext *c
    5757
    /*
    5858
    * Entry in the map used to remember which relation schemas we sent.
    5959
    *
    60+
    * The schema_sent flag determines if the current schema record for the
    61+
    * relation (and for its ancestor if publish_as_relid is set) was already sent
    62+
    * to the subscriber (in which case we don't need to send it again).
    63+
    *
    6064
    * For partitions, 'pubactions' considers not only the table's own
    6165
    * publications, but also those of all of its ancestors.
    6266
    */
    6367
    typedef struct RelationSyncEntry
    6468
    {
    6569
    Oid relid; /* relation oid */
    6670

    67-
    /*
    68-
    * Did we send the schema? If ancestor relid is set, its schema must also
    69-
    * have been sent for this to be true.
    70-
    */
    7171
    bool schema_sent;
    7272

    7373
    bool replicate_valid;
    @@ -292,10 +292,17 @@ static void
    292292
    maybe_send_schema(LogicalDecodingContext *ctx,
    293293
    Relation relation, RelationSyncEntry *relentry)
    294294
    {
    295+
    /* Nothing to do if we already sent the schema. */
    295296
    if (relentry->schema_sent)
    296297
    return;
    297298

    298-
    /* If needed, send the ancestor's schema first. */
    299+
    /*
    300+
    * Nope, so send the schema. If the changes will be published using an
    301+
    * ancestor's schema, not the relation's own, send that ancestor's schema
    302+
    * before sending relation's own (XXX - maybe sending only the former
    303+
    * suffices?). This is also a good place to set the map that will be used
    304+
    * to convert the relation's tuples into the ancestor's format, if needed.
    305+
    */
    299306
    if (relentry->publish_as_relid != RelationGetRelid(relation))
    300307
    {
    301308
    Relation ancestor = RelationIdGetRelation(relentry->publish_as_relid);
    @@ -305,8 +312,21 @@ maybe_send_schema(LogicalDecodingContext *ctx,
    305312

    306313
    /* Map must live as long as the session does. */
    307314
    oldctx = MemoryContextSwitchTo(CacheMemoryContext);
    308-
    relentry->map = convert_tuples_by_name(CreateTupleDescCopy(indesc),
    309-
    CreateTupleDescCopy(outdesc));
    315+
    316+
    /*
    317+
    * Make copies of the TupleDescs that will live as long as the map
    318+
    * does before putting into the map.
    319+
    */
    320+
    indesc = CreateTupleDescCopy(indesc);
    321+
    outdesc = CreateTupleDescCopy(outdesc);
    322+
    relentry->map = convert_tuples_by_name(indesc, outdesc);
    323+
    if (relentry->map == NULL)
    324+
    {
    325+
    /* Map not necessary, so free the TupleDescs too. */
    326+
    FreeTupleDesc(indesc);
    327+
    FreeTupleDesc(outdesc);
    328+
    }
    329+
    310330
    MemoryContextSwitchTo(oldctx);
    311331
    send_relation_and_attrs(ancestor, ctx);
    312332
    RelationClose(ancestor);
    @@ -759,6 +779,7 @@ get_rel_sync_entry(PGOutputData *data, Oid relid)
    759779
    list_free(pubids);
    760780

    761781
    entry->publish_as_relid = publish_as_relid;
    782+
    entry->map = NULL; /* will be set by maybe_send_schema() if needed */
    762783
    entry->replicate_valid = true;
    763784
    }
    764785

    @@ -801,9 +822,23 @@ rel_sync_cache_relation_cb(Datum arg, Oid relid)
    801822

    802823
    /*
    803824
    * Reset schema sent status as the relation definition may have changed.
    825+
    * Also, free any objects that depended on the earlier definition.
    804826
    */
    805827
    if (entry != NULL)
    828+
    {
    806829
    entry->schema_sent = false;
    830+
    if (entry->map)
    831+
    {
    832+
    /*
    833+
    * Must free the TupleDescs contained in the map explicitly,
    834+
    * because free_conversion_map() doesn't.
    835+
    */
    836+
    FreeTupleDesc(entry->map->indesc);
    837+
    FreeTupleDesc(entry->map->outdesc);
    838+
    free_conversion_map(entry->map);
    839+
    }
    840+
    entry->map = NULL;
    841+
    }
    807842
    }
    808843

    809844
    /*

    src/test/subscription/t/013_partition.pl

    Lines changed: 27 additions & 1 deletion
    Original file line numberDiff line numberDiff line change
    @@ -3,7 +3,7 @@
    33
    use warnings;
    44
    use PostgresNode;
    55
    use TestLib;
    6-
    use Test::More tests => 54;
    6+
    use Test::More tests => 56;
    77

    88
    # setup
    99

    @@ -621,3 +621,29 @@ BEGIN
    621621

    622622
    $result = $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab3_1");
    623623
    is($result, qq(), 'truncate of tab3_1 replicated');
    624+
    625+
    # check that the map to convert tuples from leaf partition to the root
    626+
    # table is correctly rebuilt when a new column is added
    627+
    $node_publisher->safe_psql('postgres',
    628+
    "ALTER TABLE tab2 DROP b, ADD COLUMN c text DEFAULT 'pub_tab2', ADD b text");
    629+
    $node_publisher->safe_psql('postgres',
    630+
    "INSERT INTO tab2 (a, b) VALUES (1, 'xxx'), (3, 'yyy'), (5, 'zzz')");
    631+
    $node_publisher->safe_psql('postgres',
    632+
    "INSERT INTO tab2 (a, b, c) VALUES (6, 'aaa', 'xxx_c')");
    633+
    634+
    $node_publisher->wait_for_catchup('sub_viaroot');
    635+
    $node_publisher->wait_for_catchup('sub2');
    636+
    637+
    $result = $node_subscriber1->safe_psql('postgres',
    638+
    "SELECT c, a, b FROM tab2 ORDER BY 1, 2");
    639+
    is( $result, qq(pub_tab2|1|xxx
    640+
    pub_tab2|3|yyy
    641+
    pub_tab2|5|zzz
    642+
    xxx_c|6|aaa), 'inserts into tab2 replicated');
    643+
    644+
    $result = $node_subscriber2->safe_psql('postgres',
    645+
    "SELECT c, a, b FROM tab2 ORDER BY 1, 2");
    646+
    is( $result, qq(pub_tab2|1|xxx
    647+
    pub_tab2|3|yyy
    648+
    pub_tab2|5|zzz
    649+
    xxx_c|6|aaa), 'inserts into tab2 replicated');

    0 commit comments

    Comments
     (0)
    0