class Sequel::Postgres::Dataset
Constants
- BindArgumentMethods
- PREPARED_ARG_PLACEHOLDER
- PreparedStatementMethods
Public Instance Methods
# File lib/sequel/adapters/postgres.rb 694 def bound_variable_modules 695 [BindArgumentMethods] 696 end
# File lib/sequel/adapters/postgres.rb 608 def fetch_rows(sql) 609 return cursor_fetch_rows(sql){|h| yield h} if @opts[:cursor] 610 execute(sql){|res| yield_hash_rows(res, fetch_rows_set_cols(res)){|h| yield h}} 611 end
Use a cursor for paging.
# File lib/sequel/adapters/postgres.rb 614 def paged_each(opts=OPTS, &block) 615 unless block_given? 616 return enum_for(:paged_each, opts) 617 end 618 use_cursor(opts).each(&block) 619 end
PostgreSQL uses $N for placeholders instead of ?, so use a $ as the placeholder.
# File lib/sequel/adapters/postgres.rb 704 def prepared_arg_placeholder 705 PREPARED_ARG_PLACEHOLDER 706 end
# File lib/sequel/adapters/postgres.rb 698 def prepared_statement_modules 699 [PreparedStatementMethods] 700 end
Uses a cursor for fetching records, instead of fetching the entire result set at once. Note this uses a transaction around the cursor usage by default and can be changed using `hold: true` as described below. Cursors can be used to process large datasets without holding all rows in memory (which is what the underlying drivers may do by default). Options:
- :cursor_name
-
The name assigned to the cursor (default 'sequel_cursor'). Nested cursors require different names.
- :hold
-
Declare the cursor WITH HOLD and don't use transaction around the cursor usage.
- :rows_per_fetch
-
The number of rows per fetch (default 1000). Higher numbers result in fewer queries but greater memory use.
Usage:
DB[:huge_table].use_cursor.each{|row| p row} DB[:huge_table].use_cursor(rows_per_fetch: 10000).each{|row| p row} DB[:huge_table].use_cursor(cursor_name: 'my_cursor').each{|row| p row}
This is untested with the prepared statement/bound variable support, and unlikely to work with either.
# File lib/sequel/adapters/postgres.rb 643 def use_cursor(opts=OPTS) 644 clone(:cursor=>{:rows_per_fetch=>1000}.merge!(opts)) 645 end
Replace the WHERE clause with one that uses CURRENT OF with the given cursor name (or the default cursor name). This allows you to update a large dataset by updating individual rows while processing the dataset via a cursor:
DB[:huge_table].use_cursor(rows_per_fetch: 1).each do |row| DB[:huge_table].where_current_of.update(column: ruby_method(row)) end
# File lib/sequel/adapters/postgres.rb 655 def where_current_of(cursor_name='sequel_cursor') 656 clone(:where=>Sequel.lit(['CURRENT OF '], Sequel.identifier(cursor_name))) 657 end
Private Instance Methods
Generate and execute a procedure call.
# File lib/sequel/adapters/postgres.rb 712 def call_procedure(name, args) 713 sql = String.new 714 sql << "CALL " 715 identifier_append(sql, name) 716 literal_append(sql, args) 717 with_sql_first(sql) 718 end
Use a cursor to fetch groups of records at a time, yielding them to the block.
# File lib/sequel/adapters/postgres.rb 721 def cursor_fetch_rows(sql) 722 server_opts = {:server=>@opts[:server] || :read_only} 723 cursor = @opts[:cursor] 724 hold = cursor[:hold] 725 cursor_name = quote_identifier(cursor[:cursor_name] || 'sequel_cursor') 726 rows_per_fetch = cursor[:rows_per_fetch].to_i 727 728 db.public_send(*(hold ? [:synchronize, server_opts[:server]] : [:transaction, server_opts])) do 729 begin 730 execute_ddl("DECLARE #{cursor_name} NO SCROLL CURSOR WITH#{'OUT' unless hold} HOLD FOR #{sql}", server_opts) 731 rows_per_fetch = 1000 if rows_per_fetch <= 0 732 fetch_sql = "FETCH FORWARD #{rows_per_fetch} FROM #{cursor_name}" 733 cols = nil 734 # Load columns only in the first fetch, so subsequent fetches are faster 735 execute(fetch_sql) do |res| 736 cols = fetch_rows_set_cols(res) 737 yield_hash_rows(res, cols){|h| yield h} 738 return if res.ntuples < rows_per_fetch 739 end 740 while true 741 execute(fetch_sql) do |res| 742 yield_hash_rows(res, cols){|h| yield h} 743 return if res.ntuples < rows_per_fetch 744 end 745 end 746 rescue Exception => e 747 raise 748 ensure 749 begin 750 execute_ddl("CLOSE #{cursor_name}", server_opts) 751 rescue 752 raise e if e 753 raise 754 end 755 end 756 end 757 end
Set the columns based on the result set, and return the array of field numers, type conversion procs, and name symbol arrays.
# File lib/sequel/adapters/postgres.rb 761 def fetch_rows_set_cols(res) 762 cols = [] 763 procs = db.conversion_procs 764 res.nfields.times do |fieldnum| 765 cols << [procs[res.ftype(fieldnum)], output_identifier(res.fname(fieldnum))] 766 end 767 self.columns = cols.map{|c| c[1]} 768 cols 769 end
Use the driver's escape_bytea
# File lib/sequel/adapters/postgres.rb 772 def literal_blob_append(sql, v) 773 sql << "'" << db.synchronize(@opts[:server]){|c| c.escape_bytea(v)} << "'" 774 end
Use the driver's escape_string
# File lib/sequel/adapters/postgres.rb 777 def literal_string_append(sql, v) 778 sql << "'" << db.synchronize(@opts[:server]){|c| c.escape_string(v)} << "'" 779 end
For each row in the result set, yield a hash with column name symbol keys and typecasted values.
# File lib/sequel/adapters/postgres.rb 783 def yield_hash_rows(res, cols) 784 ntuples = res.ntuples 785 recnum = 0 786 while recnum < ntuples 787 fieldnum = 0 788 nfields = cols.length 789 converted_rec = {} 790 while fieldnum < nfields 791 type_proc, fieldsym = cols[fieldnum] 792 value = res.getvalue(recnum, fieldnum) 793 converted_rec[fieldsym] = (value && type_proc) ? type_proc.call(value) : value 794 fieldnum += 1 795 end 796 yield converted_rec 797 recnum += 1 798 end 799 end