diff --git a/.gitmodules b/.gitmodules index d1330bf28c60..7cffd15daaea 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,16 +1,16 @@ [submodule "vendor/postgres-v14"] path = vendor/postgres-v14 url = https://github.com/neondatabase/postgres.git - branch = REL_14_STABLE_neon + branch = thesuhas/move_lwlsn_neon_ext_v14 [submodule "vendor/postgres-v15"] path = vendor/postgres-v15 url = https://github.com/neondatabase/postgres.git - branch = REL_15_STABLE_neon + branch = thesuhas/move_lwlsn_neon_ext_v15 [submodule "vendor/postgres-v16"] path = vendor/postgres-v16 url = https://github.com/neondatabase/postgres.git - branch = REL_16_STABLE_neon + branch = thesuhas/move_lwlsn_neon_ext_v16 [submodule "vendor/postgres-v17"] path = vendor/postgres-v17 url = https://github.com/neondatabase/postgres.git - branch = REL_17_STABLE_neon + branch = move_lwlsn_neon_ext diff --git a/pgxn/neon/Makefile b/pgxn/neon/Makefile index c87ae59fd6af..44c897d46b3e 100644 --- a/pgxn/neon/Makefile +++ b/pgxn/neon/Makefile @@ -10,6 +10,7 @@ OBJS = \ libpagestore.o \ logical_replication_monitor.o \ neon.o \ + neon_lwlc.o \ neon_pgversioncompat.o \ neon_perf_counters.o \ neon_utils.o \ diff --git a/pgxn/neon/file_cache.c b/pgxn/neon/file_cache.c index f13522e55b9f..02bdd08ac6f7 100644 --- a/pgxn/neon/file_cache.c +++ b/pgxn/neon/file_cache.c @@ -48,6 +48,7 @@ #include "hll.h" #include "bitmap.h" #include "neon.h" +#include "neon_lwlc.h" #include "neon_perf_counters.h" #define CriticalAssert(cond) do if (!(cond)) elog(PANIC, "LFC: assertion %s failed at %s:%d: ", #cond, __FILE__, __LINE__); while (0) @@ -999,7 +1000,9 @@ lfc_prefetch(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber blkno, LWLockRelease(lfc_lock); return false; } - lwlsn = GetLastWrittenLSN(rinfo, forknum, blkno); + + lwlsn = neon_get_lwlsn(rinfo, forknum, blkno); + if (lwlsn > lsn) { elog(DEBUG1, "Skip LFC write for %d because LwLSN=%X/%X is greater than not_nodified_since LSN %X/%X", diff --git a/pgxn/neon/neon.c b/pgxn/neon/neon.c index 0f226cc9e241..d72a94344986 100644 --- a/pgxn/neon/neon.c +++ b/pgxn/neon/neon.c @@ -33,6 +33,7 @@ #include "extension_server.h" #include "neon.h" +#include "neon_lwlc.h" #include "control_plane_connector.h" #include "logical_replication_monitor.h" #include "unstable_extensions.h" @@ -437,6 +438,8 @@ _PG_init(void) pg_init_libpagestore(); pg_init_walproposer(); + init_lwlc(); + pagestore_smgr_init(); Custom_XLogReaderRoutines = NeonOnDemandXLogReaderRoutines; diff --git a/pgxn/neon/neon_lwlc.c b/pgxn/neon/neon_lwlc.c new file mode 100644 index 000000000000..16fc78c8b89c --- /dev/null +++ b/pgxn/neon/neon_lwlc.c @@ -0,0 +1,497 @@ +#include "postgres.h" + +#include "neon_lwlc.h" + +#include "miscadmin.h" +#include "access/xlog.h" +#include "storage/ipc.h" +#include "storage/shmem.h" +#include "storage/buf_internals.h" +#include "utils/guc.h" +#include "utils/hsearch.h" + + + +typedef struct LastWrittenLsnCacheEntry +{ + BufferTag key; + XLogRecPtr lsn; + /* double linked list for LRU replacement algorithm */ + dlist_node lru_node; +} LastWrittenLsnCacheEntry; + +typedef struct LwLsnCacheCtl { + int lastWrittenLsnCacheSize; + /* + * Maximal last written LSN for pages not present in lastWrittenLsnCache + */ + XLogRecPtr maxLastWrittenLsn; + + /* + * Double linked list to implement LRU replacement policy for last written LSN cache. + * Access to this list as well as to last written LSN cache is protected by 'LastWrittenLsnLock'. + */ + dlist_head lastWrittenLsnLRU; +} LwLsnCacheCtl; + + +/* + * Cache of last written LSN for each relation page. + * Also to provide request LSN for smgrnblocks, smgrexists there is pseudokey=InvalidBlockId which stores LSN of last + * relation metadata update. + * Size of the cache is limited by GUC variable lastWrittenLsnCacheSize ("lsn_cache_size"), + * pages are replaced using LRU algorithm, based on L2-list. + * Access to this cache is protected by 'LastWrittenLsnLock'. + */ +static HTAB *lastWrittenLsnCache; + +LwLsnCacheCtl* LwLsnCache; + +static int lwlsn_cache_size = (128 * 1024); + + +static void +lwlc_register_gucs(void) +{ + DefineCustomIntVariable("neon.last_written_lsn_cache_size", + "Size of last written LSN cache used by Neon", + NULL, + &lwlsn_cache_size, + (128*1024), 1024, INT_MAX, + PGC_POSTMASTER, + 0, /* plain units */ + NULL, NULL, NULL); +} + +static XLogRecPtr SetLastWrittenLSNForBlockRangeInternal(XLogRecPtr lsn, + NRelFileInfo rlocator, + ForkNumber forknum, + BlockNumber from, + BlockNumber n_blocks); + +/* All the necessary hooks are defined here */ + + +// Note: these are the previous hooks +static set_lwlsn_block_range_hook_type prev_set_lwlsn_block_range_hook = NULL; +static set_lwlsn_block_v_hook_type prev_set_lwlsn_block_v_hook = NULL; +static set_lwlsn_block_hook_type prev_set_lwlsn_block_hook = NULL; +static update_max_lwlsn_hook_type prev_update_max_lwlsn_hook = NULL; + +static shmem_startup_hook_type prev_shmem_startup_hook; + +#if PG_VERSION_NUM >= 150000 +static shmem_request_hook_type prev_shmem_request_hook; +#endif + +static void shmemrequest(void); +static void shmeminit(void); +static void neon_update_max_lwlsn(XLogRecPtr lsn); + +void +init_lwlc(void) +{ + if (!process_shared_preload_libraries_in_progress) + return; + + lwlc_register_gucs(); + + prev_shmem_startup_hook = shmem_startup_hook; + shmem_startup_hook = shmeminit; + + #if PG_VERSION_NUM >= 150000 + prev_shmem_request_hook = shmem_request_hook; + shmem_request_hook = shmemrequest; + #else + shmemrequest(); + #endif + + prev_set_lwlsn_block_range_hook = set_lwlsn_block_range_hook; + set_lwlsn_block_range_hook = neon_set_lwlsn_block_range; + prev_set_lwlsn_block_v_hook = set_lwlsn_block_v_hook; + set_lwlsn_block_v_hook = neon_set_lwlsn_block_v; + prev_set_lwlsn_block_hook = set_lwlsn_block_hook; + set_lwlsn_block_hook = neon_set_lwlsn_block; + prev_update_max_lwlsn_hook = update_max_lwlsn_hook; + update_max_lwlsn_hook = neon_update_max_lwlsn; +} + + +static void shmemrequest(void) { + Size requested_size = sizeof(LwLsnCacheCtl); + + #if PG_VERSION_NUM >= 150000 + if (prev_shmem_request_hook) + prev_shmem_request_hook(); + #endif + + requested_size += hash_estimate_size(lwlsn_cache_size, sizeof(LastWrittenLsnCacheEntry)); + + RequestAddinShmemSpace(requested_size); +} + +static void shmeminit(void) { + static HASHCTL info; + bool found; + if (prev_shmem_startup_hook) { + prev_shmem_startup_hook(); + } + if (lwlsn_cache_size > 0) + { + info.keysize = sizeof(BufferTag); + info.entrysize = sizeof(LastWrittenLsnCacheEntry); + lastWrittenLsnCache = ShmemInitHash("last_written_lsn_cache", + lwlsn_cache_size, lwlsn_cache_size, + &info, + HASH_ELEM | HASH_BLOBS); + LwLsnCache = ShmemInitStruct("neon/LwLsnCacheCtl", sizeof(LwLsnCacheCtl), &found); + // Now set the size in the struct + LwLsnCache->lastWrittenLsnCacheSize = lwlsn_cache_size; + if (found) { + return; + } + } + dlist_init(&LwLsnCache->lastWrittenLsnLRU); + LwLsnCache->maxLastWrittenLsn = GetRedoRecPtr(); +} + +/* + * neon_get_lwlsn -- Returns maximal LSN of written page. + * It returns an upper bound for the last written LSN of a given page, + * either from a cached last written LSN or a global maximum last written LSN. + * If rnode is InvalidOid then we calculate maximum among all cached LSN and maxLastWrittenLsn. + * If cache is large enough, iterating through all hash items may be rather expensive. + * But neon_get_lwlsn(InvalidOid) is used only by neon_dbsize which is not performance critical. + */ +XLogRecPtr +neon_get_lwlsn(NRelFileInfo rlocator, ForkNumber forknum, BlockNumber blkno) +{ + XLogRecPtr lsn; + LastWrittenLsnCacheEntry* entry; + + Assert(LwLsnCache->lastWrittenLsnCacheSize != 0); + + LWLockAcquire(LastWrittenLsnLock, LW_SHARED); + + /* Maximal last written LSN among all non-cached pages */ + lsn = LwLsnCache->maxLastWrittenLsn; + + if (NInfoGetRelNumber(rlocator) != InvalidOid) + { + BufferTag key; + Oid spcOid = NInfoGetSpcOid(rlocator); + Oid dbOid = NInfoGetDbOid(rlocator); + Oid relNumber = NInfoGetRelNumber(rlocator); + BufTagInit(key, relNumber, forknum, blkno, spcOid, dbOid); + + entry = hash_search(lastWrittenLsnCache, &key, HASH_FIND, NULL); + if (entry != NULL) + lsn = entry->lsn; + else + { + LWLockRelease(LastWrittenLsnLock); + LWLockAcquire(LastWrittenLsnLock, LW_EXCLUSIVE); + /* + * In case of statements CREATE TABLE AS SELECT... or INSERT FROM SELECT... we are fetching data from source table + * and storing it in destination table. It cause problems with prefetch last-written-lsn is known for the pages of + * source table (which for example happens after compute restart). In this case we get get global value of + * last-written-lsn which is changed frequently as far as we are writing pages of destination table. + * As a result request-lsn for the prefetch and request-let when this page is actually needed are different + * and we got exported prefetch request. So it actually disarms prefetch. + * To prevent that, we re-insert the page with the latest LSN, so that it's + * less likely the LSN for this page will get evicted from the LwLsnCache + * before the page is read. + */ + lsn = SetLastWrittenLSNForBlockRangeInternal(lsn, rlocator, forknum, blkno, 1); + } + } + else + { + HASH_SEQ_STATUS seq; + /* Find maximum of all cached LSNs */ + hash_seq_init(&seq, lastWrittenLsnCache); + while ((entry = (LastWrittenLsnCacheEntry *) hash_seq_search(&seq)) != NULL) + { + if (entry->lsn > lsn) + lsn = entry->lsn; + } + } + LWLockRelease(LastWrittenLsnLock); + + return lsn; +} + +static void neon_update_max_lwlsn(XLogRecPtr lsn) { + LwLsnCache->maxLastWrittenLsn = lsn; +} + +/* + * GetLastWrittenLSN -- Returns maximal LSN of written page. + * It returns an upper bound for the last written LSN of a given page, + * either from a cached last written LSN or a global maximum last written LSN. + * If rnode is InvalidOid then we calculate maximum among all cached LSN and maxLastWrittenLsn. + * If cache is large enough, iterating through all hash items may be rather expensive. + * But GetLastWrittenLSN(InvalidOid) is used only by neon_dbsize which is not performance critical. + */ +void +neon_get_lwlsn_v(NRelFileInfo relfilenode, ForkNumber forknum, + BlockNumber blkno, int nblocks, XLogRecPtr *lsns) +{ + LastWrittenLsnCacheEntry* entry; + XLogRecPtr lsn; + + Assert(LwLsnCache->lastWrittenLsnCacheSize != 0); + Assert(nblocks > 0); + Assert(PointerIsValid(lsns)); + + LWLockAcquire(LastWrittenLsnLock, LW_SHARED); + + if (NInfoGetRelNumber(relfilenode) != InvalidOid) + { + BufferTag key; + bool missed_keys = false; + Oid spcOid = NInfoGetSpcOid(relfilenode); + Oid dbOid = NInfoGetDbOid(relfilenode); + Oid relNumber = NInfoGetRelNumber(relfilenode); + BufTagInit(key, relNumber, forknum, blkno, spcOid, dbOid); + + for (int i = 0; i < nblocks; i++) + { + /* Maximal last written LSN among all non-cached pages */ + key.blockNum = blkno + i; + + entry = hash_search(lastWrittenLsnCache, &key, HASH_FIND, NULL); + if (entry != NULL) + { + lsns[i] = entry->lsn; + } + else + { + /* Mark this block's LSN as missing - we'll update the LwLSN for missing blocks in bulk later */ + lsns[i] = InvalidXLogRecPtr; + missed_keys = true; + } + } + + /* + * If we had any missing LwLSN entries, we add the missing ones now. + * By doing the insertions in one batch, we decrease lock contention. + */ + if (missed_keys) + { + LWLockRelease(LastWrittenLsnLock); + LWLockAcquire(LastWrittenLsnLock, LW_EXCLUSIVE); + + lsn = LwLsnCache->maxLastWrittenLsn; + + for (int i = 0; i < nblocks; i++) + { + if (lsns[i] == InvalidXLogRecPtr) + { + lsns[i] = lsn; + SetLastWrittenLSNForBlockRangeInternal(lsn, relfilenode, forknum, blkno + i, 1); + } + } + } + } + else + { + HASH_SEQ_STATUS seq; + lsn = LwLsnCache->maxLastWrittenLsn; + /* Find maximum of all cached LSNs */ + hash_seq_init(&seq, lastWrittenLsnCache); + while ((entry = (LastWrittenLsnCacheEntry *) hash_seq_search(&seq)) != NULL) + { + if (entry->lsn > lsn) + lsn = entry->lsn; + } + + for (int i = 0; i < nblocks; i++) + lsns[i] = lsn; + } + LWLockRelease(LastWrittenLsnLock); +} + +/* + * Guts for SetLastWrittenLSNForBlockRange. + * Caller must ensure LastWrittenLsnLock is held in exclusive mode. + */ +static XLogRecPtr +SetLastWrittenLSNForBlockRangeInternal(XLogRecPtr lsn, + NRelFileInfo rlocator, + ForkNumber forknum, + BlockNumber from, + BlockNumber n_blocks) +{ + if (NInfoGetRelNumber(rlocator) == InvalidOid) + { + if (lsn > LwLsnCache->maxLastWrittenLsn) + LwLsnCache->maxLastWrittenLsn = lsn; + else + lsn = LwLsnCache->maxLastWrittenLsn; + } + else + { + LastWrittenLsnCacheEntry* entry; + BufferTag key; + bool found; + BlockNumber i; + + Oid spcOid = NInfoGetSpcOid(rlocator); + Oid dbOid = NInfoGetDbOid(rlocator); + Oid relNumber = NInfoGetRelNumber(rlocator); + BufTagInit(key, relNumber, forknum, from, spcOid, dbOid); + for (i = 0; i < n_blocks; i++) + { + key.blockNum = from + i; + entry = hash_search(lastWrittenLsnCache, &key, HASH_ENTER, &found); + if (found) + { + if (lsn > entry->lsn) + entry->lsn = lsn; + else + lsn = entry->lsn; + /* Unlink from LRU list */ + dlist_delete(&entry->lru_node); + } + else + { + entry->lsn = lsn; + if (hash_get_num_entries(lastWrittenLsnCache) > LwLsnCache->lastWrittenLsnCacheSize) + { + /* Replace least recently used entry */ + LastWrittenLsnCacheEntry* victim = dlist_container(LastWrittenLsnCacheEntry, lru_node, dlist_pop_head_node(&LwLsnCache->lastWrittenLsnLRU)); + /* Adjust max LSN for not cached relations/chunks if needed */ + if (victim->lsn > LwLsnCache->maxLastWrittenLsn) + LwLsnCache->maxLastWrittenLsn = victim->lsn; + + hash_search(lastWrittenLsnCache, victim, HASH_REMOVE, NULL); + } + } + /* Link to the end of LRU list */ + dlist_push_tail(&LwLsnCache->lastWrittenLsnLRU, &entry->lru_node); + } + } + return lsn; +} + +/* + * SetLastWrittenLSNForBlockRange -- Set maximal LSN of written page range. + * We maintain cache of last written LSNs with limited size and LRU replacement + * policy. Keeping last written LSN for each page allows to use old LSN when + * requesting pages of unchanged or appended relations. Also it is critical for + * efficient work of prefetch in case massive update operations (like vacuum or remove). + * + * rlocator.relNumber can be InvalidOid, in this case maxLastWrittenLsn is updated. + * SetLastWrittenLsn with dummy rlocator is used by createdb and dbase_redo functions. + */ +XLogRecPtr +neon_set_lwlsn_block_range(XLogRecPtr lsn, NRelFileInfo rlocator, ForkNumber forknum, BlockNumber from, BlockNumber n_blocks) +{ + if (lsn == InvalidXLogRecPtr || n_blocks == 0 || LwLsnCache->lastWrittenLsnCacheSize == 0) + return lsn; + + LWLockAcquire(LastWrittenLsnLock, LW_EXCLUSIVE); + lsn = SetLastWrittenLSNForBlockRangeInternal(lsn, rlocator, forknum, from, n_blocks); + LWLockRelease(LastWrittenLsnLock); + + return lsn; +} + +/* + * neon_set_lwlsn_block_v -- Set maximal LSN of pages to their respective + * LSNs. + * + * We maintain cache of last written LSNs with limited size and LRU replacement + * policy. Keeping last written LSN for each page allows to use old LSN when + * requesting pages of unchanged or appended relations. Also it is critical for + * efficient work of prefetch in case massive update operations (like vacuum or remove). + */ +XLogRecPtr +neon_set_lwlsn_block_v(const XLogRecPtr *lsns, NRelFileInfo relfilenode, + ForkNumber forknum, BlockNumber blockno, + int nblocks) +{ + LastWrittenLsnCacheEntry* entry; + BufferTag key; + bool found; + XLogRecPtr max = InvalidXLogRecPtr; + Oid spcOid = NInfoGetSpcOid(relfilenode); + Oid dbOid = NInfoGetDbOid(relfilenode); + Oid relNumber = NInfoGetRelNumber(relfilenode); + + if (lsns == NULL || nblocks == 0 || LwLsnCache->lastWrittenLsnCacheSize == 0 || + NInfoGetRelNumber(relfilenode) == InvalidOid) + return InvalidXLogRecPtr; + + + BufTagInit(key, relNumber, forknum, blockno, spcOid, dbOid); + + LWLockAcquire(LastWrittenLsnLock, LW_EXCLUSIVE); + + for (int i = 0; i < nblocks; i++) + { + XLogRecPtr lsn = lsns[i]; + + key.blockNum = blockno + i; + entry = hash_search(lastWrittenLsnCache, &key, HASH_ENTER, &found); + if (found) + { + if (lsn > entry->lsn) + entry->lsn = lsn; + else + lsn = entry->lsn; + /* Unlink from LRU list */ + dlist_delete(&entry->lru_node); + } + else + { + entry->lsn = lsn; + if (hash_get_num_entries(lastWrittenLsnCache) > LwLsnCache->lastWrittenLsnCacheSize) + { + /* Replace least recently used entry */ + LastWrittenLsnCacheEntry* victim = dlist_container(LastWrittenLsnCacheEntry, lru_node, dlist_pop_head_node(&LwLsnCache->lastWrittenLsnLRU)); + /* Adjust max LSN for not cached relations/chunks if needed */ + if (victim->lsn > LwLsnCache->maxLastWrittenLsn) + LwLsnCache->maxLastWrittenLsn = victim->lsn; + + hash_search(lastWrittenLsnCache, victim, HASH_REMOVE, NULL); + } + } + /* Link to the end of LRU list */ + dlist_push_tail(&LwLsnCache->lastWrittenLsnLRU, &entry->lru_node); + max = Max(max, lsn); + } + + LWLockRelease(LastWrittenLsnLock); + + return max; +} + +/* + * SetLastWrittenLSNForBlock -- Set maximal LSN for block + */ +XLogRecPtr +neon_set_lwlsn_block(XLogRecPtr lsn, NRelFileInfo rlocator, ForkNumber forknum, BlockNumber blkno) +{ + return neon_set_lwlsn_block_range(lsn, rlocator, forknum, blkno, 1); +} + +/* + * neon_set_lwlsn_relation -- Set maximal LSN for relation metadata + */ +XLogRecPtr +neon_set_lwlsn_relation(XLogRecPtr lsn, NRelFileInfo rlocator, ForkNumber forknum) +{ + return neon_set_lwlsn_block(lsn, rlocator, forknum, REL_METADATA_PSEUDO_BLOCKNO); +} + +/* + * neon_set_lwlsn_db -- Set maximal LSN for the whole database + */ +XLogRecPtr +neon_set_lwlsn_db(XLogRecPtr lsn) +{ + NRelFileInfo dummyNode = {InvalidOid, InvalidOid, InvalidOid}; + return neon_set_lwlsn_block(lsn, dummyNode, MAIN_FORKNUM, 0); +} \ No newline at end of file diff --git a/pgxn/neon/neon_lwlc.h b/pgxn/neon/neon_lwlc.h new file mode 100644 index 000000000000..041d824b1578 --- /dev/null +++ b/pgxn/neon/neon_lwlc.h @@ -0,0 +1,17 @@ +#ifndef NEON_LWLC_H +#define NEON_LWLC_H + +#include "neon_pgversioncompat.h" + +void init_lwlc(void); + +/* Hooks */ +XLogRecPtr neon_get_lwlsn(NRelFileInfo rlocator, ForkNumber forknum, BlockNumber blkno); +void neon_get_lwlsn_v(NRelFileInfo relfilenode, ForkNumber forknum, BlockNumber blkno, int nblocks, XLogRecPtr *lsns); +XLogRecPtr neon_set_lwlsn_block_range(XLogRecPtr lsn, NRelFileInfo rlocator, ForkNumber forknum, BlockNumber from, BlockNumber n_blocks); +XLogRecPtr neon_set_lwlsn_block_v(const XLogRecPtr *lsns, NRelFileInfo relfilenode, ForkNumber forknum, BlockNumber blockno, int nblocks); +XLogRecPtr neon_set_lwlsn_block(XLogRecPtr lsn, NRelFileInfo rlocator, ForkNumber forknum, BlockNumber blkno); +XLogRecPtr neon_set_lwlsn_relation(XLogRecPtr lsn, NRelFileInfo rlocator, ForkNumber forknum); +XLogRecPtr neon_set_lwlsn_db(XLogRecPtr lsn); + +#endif \ No newline at end of file diff --git a/pgxn/neon/neon_pgversioncompat.h b/pgxn/neon/neon_pgversioncompat.h index 6b4b355672a8..cdfb0c0b1221 100644 --- a/pgxn/neon/neon_pgversioncompat.h +++ b/pgxn/neon/neon_pgversioncompat.h @@ -76,6 +76,14 @@ InitBufferTag(BufferTag *tag, const RelFileNode *rnode, #define BufTagGetRelNumber(tagp) ((tagp)->rnode.relNode) +#define BufTagInit(tag, relNumber, forknum, blkno, spcOid, dbOid) \ + do { \ + RelFileNode rnode = { .spcNode = spcOid, .dbNode = dbOid, .relNode = relNumber}; \ + (tag).forkNum = forknum; \ + (tag).blockNum = blkno; \ + (tag).rnode = rnode; \ + } while (false) + #define InvalidRelFileNumber InvalidOid #define SMgrRelGetRelInfo(reln) \ @@ -125,6 +133,15 @@ InitBufferTag(BufferTag *tag, const RelFileNode *rnode, .relNumber = (tag).relNumber, \ }) + #define BufTagInit(tag, relNumber, forknum, blkno, spcOid, dbOid) \ + do { \ + (tag).forkNum = forknum; \ + (tag).blockNum = blkno; \ + (tag).spcOid = spcOid; \ + (tag).dbOid = dbOid; \ + (tag).relNumber = relNumber; \ + } while (false) + #define SMgrRelGetRelInfo(reln) \ ((reln)->smgr_rlocator) diff --git a/pgxn/neon/pagestore_smgr.c b/pgxn/neon/pagestore_smgr.c index 1135212e221c..f481c70b0dd7 100644 --- a/pgxn/neon/pagestore_smgr.c +++ b/pgxn/neon/pagestore_smgr.c @@ -69,6 +69,7 @@ #include "bitmap.h" #include "neon.h" +#include "neon_lwlc.h" #include "neon_perf_counters.h" #include "pagestore_client.h" @@ -340,11 +341,6 @@ static void prefetch_do_request(PrefetchRequest *slot, neon_request_lsns *force_ static bool prefetch_wait_for(uint64 ring_index); static void prefetch_cleanup_trailing_unused(void); static inline void prefetch_set_unused(uint64 ring_index); -#if PG_MAJORVERSION_NUM < 17 -static void -GetLastWrittenLSNv(NRelFileInfo relfilenode, ForkNumber forknum, - BlockNumber blkno, int nblocks, XLogRecPtr *lsns); -#endif static void neon_get_request_lsns(NRelFileInfo rinfo, ForkNumber forknum, @@ -864,7 +860,7 @@ prefetch_on_ps_disconnect(void) /* * We can have gone into retry due to network error, so update stats with - * the latest available + * the latest available */ MyNeonCounters->pageserver_open_requests = MyPState->n_requests_inflight; @@ -1105,7 +1101,7 @@ prefetch_register_bufferv(BufferTag tag, neon_request_lsns *frlsns, Retry: /* * We can have gone into retry due to network error, so update stats with - * the latest available + * the latest available */ MyNeonCounters->pageserver_open_requests = MyPState->ring_unused - MyPState->ring_receive; @@ -2217,19 +2213,6 @@ nm_adjust_lsn(XLogRecPtr lsn) } -/* - * Since PG17 we use vetorized version, - * so add compatibility function for older versions - */ -#if PG_MAJORVERSION_NUM < 17 -static void -GetLastWrittenLSNv(NRelFileInfo relfilenode, ForkNumber forknum, - BlockNumber blkno, int nblocks, XLogRecPtr *lsns) -{ - lsns[0] = GetLastWrittenLSN(relfilenode, forknum, blkno); -} -#endif - /* * Return LSN for requesting pages and number of blocks from page server */ @@ -2241,7 +2224,7 @@ neon_get_request_lsns(NRelFileInfo rinfo, ForkNumber forknum, BlockNumber blkno, Assert(nblocks <= PG_IOV_MAX); - GetLastWrittenLSNv(rinfo, forknum, blkno, (int) nblocks, last_written_lsns); + neon_get_lwlsn_v(rinfo, forknum, blkno, (int) nblocks, last_written_lsns); for (int i = 0; i < nblocks; i++) { diff --git a/pgxn/neon_walredo/walredoproc.c b/pgxn/neon_walredo/walredoproc.c index 4673de778cb3..a1be49857318 100644 --- a/pgxn/neon_walredo/walredoproc.c +++ b/pgxn/neon_walredo/walredoproc.c @@ -286,9 +286,6 @@ WalRedoMain(int argc, char *argv[]) max_wal_senders = 0; InitializeMaxBackends(); - /* Disable lastWrittenLsnCache */ - lastWrittenLsnCacheSize = 0; - #if PG_VERSION_NUM >= 150000 process_shmem_requests(); InitializeShmemGUCs(); diff --git a/vendor/postgres-v14 b/vendor/postgres-v14 index 7b7592e74059..d252e9d6177e 160000 --- a/vendor/postgres-v14 +++ b/vendor/postgres-v14 @@ -1 +1 @@ -Subproject commit 7b7592e74059f795b64f06860cea97673418f35e +Subproject commit d252e9d6177ec01fd9730f9a131c38ed1c149b7a diff --git a/vendor/postgres-v15 b/vendor/postgres-v15 index ee794ba767ee..df1727017d76 160000 --- a/vendor/postgres-v15 +++ b/vendor/postgres-v15 @@ -1 +1 @@ -Subproject commit ee794ba767eef9b10260ef67d3a58084f1dabd6f +Subproject commit df1727017d761e9cd677d335f324af6b565bcc4c diff --git a/vendor/postgres-v16 b/vendor/postgres-v16 index 512856aaa8be..85287d32713d 160000 --- a/vendor/postgres-v16 +++ b/vendor/postgres-v16 @@ -1 +1 @@ -Subproject commit 512856aaa8bedbaa8f06811449518dcb0c2e5d8f +Subproject commit 85287d32713d3d36af9f0b82bee3fa90ebb75b36 diff --git a/vendor/postgres-v17 b/vendor/postgres-v17 index e5e87b9f52d0..faff7e3b4572 160000 --- a/vendor/postgres-v17 +++ b/vendor/postgres-v17 @@ -1 +1 @@ -Subproject commit e5e87b9f52d0eaeb83f3e2517bb9727aac37729b +Subproject commit faff7e3b457265c672fd04cb71d364f5ec36ea74 diff --git a/vendor/revisions.json b/vendor/revisions.json index 1d76e1da0143..5ffc8d9092a8 100644 --- a/vendor/revisions.json +++ b/vendor/revisions.json @@ -1,18 +1,18 @@ { "v17": [ "17.4", - "e5e87b9f52d0eaeb83f3e2517bb9727aac37729b" + "faff7e3b457265c672fd04cb71d364f5ec36ea74" ], "v16": [ "16.8", - "512856aaa8bedbaa8f06811449518dcb0c2e5d8f" + "85287d32713d3d36af9f0b82bee3fa90ebb75b36" ], "v15": [ "15.12", - "ee794ba767eef9b10260ef67d3a58084f1dabd6f" + "df1727017d761e9cd677d335f324af6b565bcc4c" ], "v14": [ "14.17", - "7b7592e74059f795b64f06860cea97673418f35e" + "d252e9d6177ec01fd9730f9a131c38ed1c149b7a" ] }