@@ -483,6 +483,67 @@ heapgetpage(TableScanDesc sscan, BlockNumber block)
483
483
scan -> rs_ntuples = ntup ;
484
484
}
485
485
486
+ /*
487
+ * heapgettup_initial_block - return the first BlockNumber to scan
488
+ *
489
+ * Returns InvalidBlockNumber when there are no blocks to scan. This can
490
+ * occur with empty tables and in parallel scans when parallel workers get all
491
+ * of the pages before we can get a chance to get our first page.
492
+ */
493
+ static BlockNumber
494
+ heapgettup_initial_block (HeapScanDesc scan , ScanDirection dir )
495
+ {
496
+ Assert (!scan -> rs_inited );
497
+
498
+ /* When there are no pages to scan, return InvalidBlockNumber */
499
+ if (scan -> rs_nblocks == 0 || scan -> rs_numblocks == 0 )
500
+ return InvalidBlockNumber ;
501
+
502
+ if (ScanDirectionIsForward (dir ))
503
+ {
504
+ /* serial scan */
505
+ if (scan -> rs_base .rs_parallel == NULL )
506
+ return scan -> rs_startblock ;
507
+ else
508
+ {
509
+ /* parallel scan */
510
+ table_block_parallelscan_startblock_init (scan -> rs_base .rs_rd ,
511
+ scan -> rs_parallelworkerdata ,
512
+ (ParallelBlockTableScanDesc ) scan -> rs_base .rs_parallel );
513
+
514
+ /* may return InvalidBlockNumber if there are no more blocks */
515
+ return table_block_parallelscan_nextpage (scan -> rs_base .rs_rd ,
516
+ scan -> rs_parallelworkerdata ,
517
+ (ParallelBlockTableScanDesc ) scan -> rs_base .rs_parallel );
518
+ }
519
+ }
520
+ else
521
+ {
522
+ /* backward parallel scan not supported */
523
+ Assert (scan -> rs_base .rs_parallel == NULL );
524
+
525
+ /*
526
+ * Disable reporting to syncscan logic in a backwards scan; it's not
527
+ * very likely anyone else is doing the same thing at the same time,
528
+ * and much more likely that we'll just bollix things for forward
529
+ * scanners.
530
+ */
531
+ scan -> rs_base .rs_flags &= ~SO_ALLOW_SYNC ;
532
+
533
+ /*
534
+ * Start from last page of the scan. Ensure we take into account
535
+ * rs_numblocks if it's been adjusted by heap_setscanlimits().
536
+ */
537
+ if (scan -> rs_numblocks != InvalidBlockNumber )
538
+ return (scan -> rs_startblock + scan -> rs_numblocks - 1 ) % scan -> rs_nblocks ;
539
+
540
+ if (scan -> rs_startblock > 0 )
541
+ return scan -> rs_startblock - 1 ;
542
+
543
+ return scan -> rs_nblocks - 1 ;
544
+ }
545
+ }
546
+
486
547
/* ----------------
487
548
* heapgettup - fetch next heap tuple
488
549
*
@@ -527,38 +588,19 @@ heapgettup(HeapScanDesc scan,
527
588
{
528
589
if (!scan -> rs_inited )
529
590
{
591
+ block = heapgettup_initial_block (scan , dir );
592
+
530
593
/*
531
- * return null immediately if relation is empty
594
+ * Check if we have reached the end of the scan already. This
595
+ * could happen if the table is empty or if the parallel workers
596
+ * have already finished the scan before we did anything ourselves
532
597
*/
533
- if (scan -> rs_nblocks == 0 || scan -> rs_numblocks == 0 )
598
+ if (block == InvalidBlockNumber )
534
599
{
535
600
Assert (!BufferIsValid (scan -> rs_cbuf ));
536
601
tuple -> t_data = NULL ;
537
602
return ;
538
603
}
539
- if (scan -> rs_base .rs_parallel != NULL )
540
- {
541
- ParallelBlockTableScanDesc pbscan =
542
- (ParallelBlockTableScanDesc ) scan -> rs_base .rs_parallel ;
543
- ParallelBlockTableScanWorker pbscanwork =
544
- scan -> rs_parallelworkerdata ;
545
-
546
- table_block_parallelscan_startblock_init (scan -> rs_base .rs_rd ,
547
- pbscanwork , pbscan );
548
-
549
- block = table_block_parallelscan_nextpage (scan -> rs_base .rs_rd ,
550
- pbscanwork , pbscan );
551
-
552
- /* Other processes might have already finished the scan. */
553
- if (block == InvalidBlockNumber )
554
- {
555
- Assert (!BufferIsValid (scan -> rs_cbuf ));
556
- tuple -> t_data = NULL ;
557
- return ;
558
- }
559
- }
560
- else
561
- block = scan -> rs_startblock ; /* first page */
562
604
heapgetpage ((TableScanDesc ) scan , block );
563
605
lineoff = FirstOffsetNumber ; /* first offnum */
564
606
scan -> rs_inited = true;
@@ -582,60 +624,40 @@ heapgettup(HeapScanDesc scan,
582
624
}
583
625
else
584
626
{
585
- /* backward parallel scan not supported */
586
- Assert (scan -> rs_base .rs_parallel == NULL );
587
-
588
627
if (!scan -> rs_inited )
589
628
{
629
+ block = heapgettup_initial_block (scan , dir );
630
+
590
631
/*
591
- * return null immediately if relation is empty
632
+ * Check if we have reached the end of the scan already. This
633
+ * could happen if the table is empty.
592
634
*/
593
- if (scan -> rs_nblocks == 0 || scan -> rs_numblocks == 0 )
635
+ if (block == InvalidBlockNumber )
594
636
{
595
637
Assert (!BufferIsValid (scan -> rs_cbuf ));
596
638
tuple -> t_data = NULL ;
597
639
return ;
598
640
}
599
641
600
- /*
601
- * Disable reporting to syncscan logic in a backwards scan; it's
602
- * not very likely anyone else is doing the same thing at the same
603
- * time, and much more likely that we'll just bollix things for
604
- * forward scanners.
605
- */
606
- scan -> rs_base .rs_flags &= ~SO_ALLOW_SYNC ;
607
-
608
- /*
609
- * Start from last page of the scan. Ensure we take into account
610
- * rs_numblocks if it's been adjusted by heap_setscanlimits().
611
- */
612
- if (scan -> rs_numblocks != InvalidBlockNumber )
613
- block = (scan -> rs_startblock + scan -> rs_numblocks - 1 ) % scan -> rs_nblocks ;
614
- else if (scan -> rs_startblock > 0 )
615
- block = scan -> rs_startblock - 1 ;
616
- else
617
- block = scan -> rs_nblocks - 1 ;
618
642
heapgetpage ((TableScanDesc ) scan , block );
643
+ LockBuffer (scan -> rs_cbuf , BUFFER_LOCK_SHARE );
644
+
645
+ page = BufferGetPage (scan -> rs_cbuf );
646
+ TestForOldSnapshot (snapshot , scan -> rs_base .rs_rd , page );
647
+ lines = PageGetMaxOffsetNumber (page );
648
+ lineoff = lines ; /* final offnum */
649
+ scan -> rs_inited = true;
619
650
}
620
651
else
621
652
{
622
653
/* continue from previously returned page/tuple */
623
654
block = scan -> rs_cblock ; /* current page */
624
- }
655
+ LockBuffer ( scan -> rs_cbuf , BUFFER_LOCK_SHARE );
625
656
626
- LockBuffer (scan -> rs_cbuf , BUFFER_LOCK_SHARE );
657
+ page = BufferGetPage (scan -> rs_cbuf );
658
+ TestForOldSnapshot (snapshot , scan -> rs_base .rs_rd , page );
659
+ lines = PageGetMaxOffsetNumber (page );
627
660
628
- page = BufferGetPage (scan -> rs_cbuf );
629
- TestForOldSnapshot (snapshot , scan -> rs_base .rs_rd , page );
630
- lines = PageGetMaxOffsetNumber (page );
631
-
632
- if (!scan -> rs_inited )
633
- {
634
- lineoff = lines ; /* final offnum */
635
- scan -> rs_inited = true;
636
- }
637
- else
638
- {
639
661
/*
640
662
* The previous returned tuple may have been vacuumed since the
641
663
* previous scan when we use a non-MVCC snapshot, so we must
@@ -837,38 +859,19 @@ heapgettup_pagemode(HeapScanDesc scan,
837
859
{
838
860
if (!scan -> rs_inited )
839
861
{
862
+ block = heapgettup_initial_block (scan , dir );
863
+
840
864
/*
841
- * return null immediately if relation is empty
865
+ * Check if we have reached the end of the scan already. This
866
+ * could happen if the table is empty or if the parallel workers
867
+ * have already finished the scan before we did anything ourselves
842
868
*/
843
- if (scan -> rs_nblocks == 0 || scan -> rs_numblocks == 0 )
869
+ if (block == InvalidBlockNumber )
844
870
{
845
871
Assert (!BufferIsValid (scan -> rs_cbuf ));
846
872
tuple -> t_data = NULL ;
847
873
return ;
848
874
}
849
- if (scan -> rs_base .rs_parallel != NULL )
850
- {
851
- ParallelBlockTableScanDesc pbscan =
852
- (ParallelBlockTableScanDesc ) scan -> rs_base .rs_parallel ;
853
- ParallelBlockTableScanWorker pbscanwork =
854
- scan -> rs_parallelworkerdata ;
855
-
856
- table_block_parallelscan_startblock_init (scan -> rs_base .rs_rd ,
857
- pbscanwork , pbscan );
858
-
859
- block = table_block_parallelscan_nextpage (scan -> rs_base .rs_rd ,
860
- pbscanwork , pbscan );
861
-
862
- /* Other processes might have already finished the scan. */
863
- if (block == InvalidBlockNumber )
864
- {
865
- Assert (!BufferIsValid (scan -> rs_cbuf ));
866
- tuple -> t_data = NULL ;
867
- return ;
868
- }
869
- }
870
- else
871
- block = scan -> rs_startblock ; /* first page */
872
875
heapgetpage ((TableScanDesc ) scan , block );
873
876
lineindex = 0 ;
874
877
scan -> rs_inited = true;
@@ -889,58 +892,36 @@ heapgettup_pagemode(HeapScanDesc scan,
889
892
}
890
893
else
891
894
{
892
- /* backward parallel scan not supported */
893
- Assert (scan -> rs_base .rs_parallel == NULL );
894
-
895
895
if (!scan -> rs_inited )
896
896
{
897
+ block = heapgettup_initial_block (scan , dir );
898
+
897
899
/*
898
- * return null immediately if relation is empty
900
+ * Check if we have reached the end of the scan already. This
901
+ * could happen if the table is empty.
899
902
*/
900
- if (scan -> rs_nblocks == 0 || scan -> rs_numblocks == 0 )
903
+ if (block == InvalidBlockNumber )
901
904
{
902
905
Assert (!BufferIsValid (scan -> rs_cbuf ));
903
906
tuple -> t_data = NULL ;
904
907
return ;
905
908
}
906
909
907
- /*
908
- * Disable reporting to syncscan logic in a backwards scan; it's
909
- * not very likely anyone else is doing the same thing at the same
910
- * time, and much more likely that we'll just bollix things for
911
- * forward scanners.
912
- */
913
- scan -> rs_base .rs_flags &= ~SO_ALLOW_SYNC ;
914
-
915
- /*
916
- * Start from last page of the scan. Ensure we take into account
917
- * rs_numblocks if it's been adjusted by heap_setscanlimits().
918
- */
919
- if (scan -> rs_numblocks != InvalidBlockNumber )
920
- block = (scan -> rs_startblock + scan -> rs_numblocks - 1 ) % scan -> rs_nblocks ;
921
- else if (scan -> rs_startblock > 0 )
922
- block = scan -> rs_startblock - 1 ;
923
- else
924
- block = scan -> rs_nblocks - 1 ;
925
910
heapgetpage ((TableScanDesc ) scan , block );
911
+ page = BufferGetPage (scan -> rs_cbuf );
912
+ TestForOldSnapshot (scan -> rs_base .rs_snapshot , scan -> rs_base .rs_rd , page );
913
+ lines = scan -> rs_ntuples ;
914
+ lineindex = lines - 1 ;
915
+ scan -> rs_inited = true;
926
916
}
927
917
else
928
918
{
929
919
/* continue from previously returned page/tuple */
930
920
block = scan -> rs_cblock ; /* current page */
931
- }
932
-
933
- page = BufferGetPage (scan -> rs_cbuf );
934
- TestForOldSnapshot (scan -> rs_base .rs_snapshot , scan -> rs_base .rs_rd , page );
935
- lines = scan -> rs_ntuples ;
936
921
937
- if (!scan -> rs_inited )
938
- {
939
- lineindex = lines - 1 ;
940
- scan -> rs_inited = true;
941
- }
942
- else
943
- {
922
+ page = BufferGetPage (scan -> rs_cbuf );
923
+ TestForOldSnapshot (scan -> rs_base .rs_snapshot , scan -> rs_base .rs_rd , page );
924
+ lines = scan -> rs_ntuples ;
944
925
lineindex = scan -> rs_cindex - 1 ;
945
926
}
946
927
/* block and lineindex now reference the previous visible tid */
0 commit comments