Class Sequel::Dataset
In: lib/sequel/adapters/do.rb
lib/sequel/adapters/jdbc.rb
lib/sequel/adapters/mysql.rb
lib/sequel/adapters/postgres.rb
lib/sequel/adapters/sqlite.rb
lib/sequel/adapters/utils/stored_procedures.rb
lib/sequel/adapters/swift.rb
lib/sequel/dataset.rb
lib/sequel/dataset/actions.rb
lib/sequel/dataset/features.rb
lib/sequel/dataset/graph.rb
lib/sequel/dataset/misc.rb
lib/sequel/dataset/prepared_statements.rb
lib/sequel/dataset/query.rb
lib/sequel/dataset/sql.rb
lib/sequel/dataset/mutation.rb
lib/sequel/extensions/pagination.rb
lib/sequel/extensions/pretty_table.rb
lib/sequel/extensions/query.rb
lib/sequel/extensions/to_dot.rb
lib/sequel/extensions/null_dataset.rb
lib/sequel/extensions/columns_introspection.rb
lib/sequel/extensions/select_remove.rb
Parent: Sequel::Dataset

A dataset represents an SQL query, or more generally, an abstract set of rows in the database. Datasets can be used to create, retrieve, update and delete records.

Query results are always retrieved on demand, so a dataset can be kept around and reused indefinitely (datasets never cache results):

  my_posts = DB[:posts].filter(:author => 'david') # no records are retrieved
  my_posts.all # records are retrieved
  my_posts.all # records are retrieved again

Most dataset methods return modified copies of the dataset (functional style), so you can reuse different datasets to access data:

  posts = DB[:posts]
  davids_posts = posts.filter(:author => 'david')
  old_posts = posts.filter('stamp < ?', Date.today - 7)
  davids_old_posts = davids_posts.filter('stamp < ?', Date.today - 7)

Datasets are Enumerable objects, so they can be manipulated using any of the Enumerable methods, such as map, inject, etc.

For more information, see the "Dataset Basics" guide.

Methods

<<   ==   []   []=   _import   _insert_sql   _select_map_multiple   _select_map_single   _update_sql   add_graph_aliases   aliased_expression_sql_append   all   and   array_sql_append   avg   bind   boolean_constant_sql_append   call   call   call   case_expression_sql_append   cast_sql_append   clause_methods   clone   column_all_sql_append   columns   columns!   columns_without_introspection   complex_expression_sql_append   compound_clone   compound_from_self   constant_sql_append   count   def_append_methods   def_mutation_method   def_mutation_method   delete   delete_sql   distinct   each   each_page   each_server   empty?   eql?   except   exclude   exclude_having   exclude_where   exists   fetch_rows   fetch_rows   fetch_rows   fetch_rows   fetch_rows   fetch_rows   fetch_rows   filter   first   first_source   first_source_alias   first_source_table   for_update   from   from_self   function_sql_append   get   graph   graph   grep   group   group_and_count   group_by   group_cube   group_rollup   hash   having   identifier_input_method   identifier_output_method   import   insert   insert_multiple   insert_sql   inspect   intersect   interval   introspect_all_columns   invert   join   join_clause_sql_append   join_on_clause_sql_append   join_table   join_using_clause_sql_append   last   limit   literal_append   lock_style   map   max   min   multi_insert   multi_insert_sql   naked   naked!   negative_boolean_constant_sql_append   new   nullify   nullify!   options_overlap   or   order   order_append   order_by   order_more   order_prepend   ordered_expression_sql_append   paginate   placeholder_literal_string_sql_append   prepare   prepare   prepare   prepare   prepared_arg_placeholder   print   provides_accurate_rows_matched?   qualified_identifier_sql_append   qualify   qualify_to   qualify_to_first_source   query   quote_identifier_append   quote_identifiers?   quote_schema_table_append   quoted_identifier_append   range   recursive_cte_requires_column_aliases?   requires_placeholder_type_specifiers?   requires_sql_standard_datetimes?   returning   reverse   reverse_order   row_number_column   schema_and_table   select   select_all   select_append   select_group   select_hash   select_hash_groups   select_map   select_more   select_order_map   select_remove   select_sql   server   set   set_defaults   set_graph_aliases   set_overrides   simple_select_all?   single_record   single_value   split_alias   split_multiple_result_sets   sql   subscript_sql_append   sum   supports_cte?   supports_cte_in_subqueries?   supports_distinct_on?   supports_group_cube?   supports_group_rollup?   supports_insert_select?   supports_intersect_except?   supports_intersect_except_all?   supports_is_true?   supports_join_using?   supports_modifying_joins?   supports_multiple_column_in?   supports_ordered_distinct_on?   supports_returning?   supports_select_all_and_column?   supports_timestamp_timezones?   supports_timestamp_usecs?   supports_where_true?   supports_window_functions?   to_csv   to_dot   to_hash   to_hash_groups   to_prepared_statement   truncate   truncate_sql   unbind   unfiltered   ungraphed   ungrouped   union   unlimited   unordered   unused_table_alias   update   update_sql   use_cursor   where   window_function_sql_append   window_sql_append   with   with_recursive   with_sql   with_sql_delete  

Included Modules

Constants

DatasetClass = self
DatasetClass = self
JAVA_SQL_TIMESTAMP = Java::JavaSQL::Timestamp   Cache Java class constants to speed up lookups
JAVA_SQL_TIME = Java::JavaSQL::Time
JAVA_SQL_DATE = Java::JavaSQL::Date
JAVA_SQL_BLOB = Java::JavaSQL::Blob
JAVA_SQL_CLOB = Java::JavaSQL::Clob
JAVA_BUFFERED_READER = Java::JavaIo::BufferedReader
JAVA_BIG_DECIMAL = Java::JavaMath::BigDecimal
JAVA_BYTE_ARRAY = Java::byte[]
TYPE_TRANSLATOR_INSTANCE = tt = TYPE_TRANSLATOR.new
DECIMAL_METHOD = tt.method(:decimal)   Cache type translator methods so that duplicate Method objects are not created.
TIME_METHOD = tt.method(:time)
DATE_METHOD = tt.method(:date)
BUFFERED_READER_METHOD = tt.method(:buffered_reader)
BYTE_ARRAY_METHOD = tt.method(:byte_array)
BLOB_METHOD = tt.method(:blob)
CLOB_METHOD = tt.method(:clob)
DatasetClass = self
DatasetClass = self
APOS = Sequel::Dataset::APOS
PREPARED_ARG_PLACEHOLDER = LiteralString.new('$').freeze
DatasetClass = self
PREPARED_ARG_PLACEHOLDER = ':'.freeze
DatasetClass = self

Attributes

convert_types  [RW]  Whether to convert some Java types to ruby types when retrieving rows. Uses the database‘s setting by default, can be set to false to roughly double performance when fetching rows.

Public Instance methods

Execute the given type of statement with the hash of values.

[Source]

     # File lib/sequel/adapters/postgres.rb, line 673
673:         def call(type, bind_vars={}, *values, &block)
674:           ps = to_prepared_statement(type, values)
675:           ps.extend(BindArgumentMethods)
676:           ps.call(bind_vars, &block)
677:         end

Execute the given type of statement with the hash of values.

[Source]

     # File lib/sequel/adapters/sqlite.rb, line 342
342:       def call(type, bind_vars={}, *values, &block)
343:         ps = to_prepared_statement(type, values)
344:         ps.extend(BindArgumentMethods)
345:         ps.call(bind_vars, &block)
346:       end

Execute the SQL on the database and yield the rows as hashes with symbol keys.

[Source]

     # File lib/sequel/adapters/do.rb, line 157
157:       def fetch_rows(sql)
158:         execute(sql) do |reader|
159:           cols = @columns = reader.fields.map{|f| output_identifier(f)}
160:           while(reader.next!) do
161:             h = {}
162:             cols.zip(reader.values).each{|k, v| h[k] = v}
163:             yield h
164:           end
165:         end
166:         self
167:       end

Set the columns and yield the hashes to the block.

[Source]

     # File lib/sequel/adapters/swift.rb, line 139
139:       def fetch_rows(sql)
140:         execute(sql) do |res|
141:           @columns = res.fields
142:           res.each{|h| yield h}
143:         end
144:         self
145:       end

Yield all rows matching this dataset. If the dataset is set to split multiple statements, yield arrays of hashes one per statement instead of yielding results for all statements as hashes.

[Source]

     # File lib/sequel/adapters/mysql.rb, line 295
295:       def fetch_rows(sql)
296:         execute(sql) do |r|
297:           i = -1
298:           cps = db.conversion_procs
299:           cols = r.fetch_fields.map do |f| 
300:             # Pretend tinyint is another integer type if its length is not 1, to
301:             # avoid casting to boolean if Sequel::MySQL.convert_tinyint_to_bool
302:             # is set.
303:             type_proc = f.type == 1 && f.length != 1 ? cps[2] : cps[f.type]
304:             [output_identifier(f.name), type_proc, i+=1]
305:           end
306:           @columns = cols.map{|c| c.first}
307:           if opts[:split_multiple_result_sets]
308:             s = []
309:             yield_rows(r, cols){|h| s << h}
310:             yield s
311:           else
312:             yield_rows(r, cols){|h| yield h}
313:           end
314:         end
315:         self
316:       end

Yield all rows returned by executing the given SQL and converting the types.

[Source]

     # File lib/sequel/adapters/postgres.rb, line 564
564:       def fetch_rows(sql)
565:         return cursor_fetch_rows(sql){|h| yield h} if @opts[:cursor]
566:         execute(sql){|res| yield_hash_rows(res, fetch_rows_set_cols(res)){|h| yield h}}
567:       end

Correctly return rows from the database and return them as hashes.

[Source]

     # File lib/sequel/adapters/jdbc.rb, line 611
611:       def fetch_rows(sql, &block)
612:         execute(sql){|result| process_result_set(result, &block)}
613:         self
614:       end

Yield a hash for each row in the dataset.

[Source]

     # File lib/sequel/adapters/sqlite.rb, line 349
349:       def fetch_rows(sql)
350:         execute(sql) do |result|
351:           i = -1
352:           cps = db.conversion_procs
353:           type_procs = result.types.map{|t| cps[base_type_name(t)]}
354:           cols = result.columns.map{|c| i+=1; [output_identifier(c), i, type_procs[i]]}
355:           @columns = cols.map{|c| c.first}
356:           result.each do |values|
357:             row = {}
358:             cols.each do |name,i,type_proc|
359:               v = values[i]
360:               if type_proc && v
361:                 v = type_proc.call(v)
362:               end
363:               row[name] = v
364:             end
365:             yield row
366:           end
367:         end
368:       end

Don‘t allow graphing a dataset that splits multiple statements

[Source]

     # File lib/sequel/adapters/mysql.rb, line 319
319:       def graph(*)
320:         raise(Error, "Can't graph a dataset that splits multiple result sets") if opts[:split_multiple_result_sets]
321:         super
322:       end

Prepare the given type of query with the given name and store it in the database. Note that a new native prepared statement is created on each call to this prepared statement.

[Source]

     # File lib/sequel/adapters/sqlite.rb, line 373
373:       def prepare(type, name=nil, *values)
374:         ps = to_prepared_statement(type, values)
375:         ps.extend(PreparedStatementMethods)
376:         if name
377:           ps.prepared_statement_name = name
378:           db.set_prepared_statement(name, ps)
379:         end
380:         ps
381:       end

Prepare the given type of statement with the given name, and store it in the database to be called later.

[Source]

     # File lib/sequel/adapters/postgres.rb, line 681
681:         def prepare(type, name=nil, *values)
682:           ps = to_prepared_statement(type, values)
683:           ps.extend(PreparedStatementMethods)
684:           if name
685:             ps.prepared_statement_name = name
686:             db.set_prepared_statement(name, ps)
687:           end
688:           ps
689:         end

Create a named prepared statement that is stored in the database (and connection) for reuse.

[Source]

     # File lib/sequel/adapters/jdbc.rb, line 618
618:       def prepare(type, name=nil, *values)
619:         ps = to_prepared_statement(type, values)
620:         ps.extend(PreparedStatementMethods)
621:         if name
622:           ps.prepared_statement_name = name
623:           db.set_prepared_statement(name, ps)
624:         end
625:         ps
626:       end

PostgreSQL uses $N for placeholders instead of ?, so use a $ as the placeholder.

[Source]

     # File lib/sequel/adapters/postgres.rb, line 695
695:         def prepared_arg_placeholder
696:           PREPARED_ARG_PLACEHOLDER
697:         end

Makes each yield arrays of rows, with each array containing the rows for a given result set. Does not work with graphing. So you can submit SQL with multiple statements and easily determine which statement returned which results.

Modifies the row_proc of the returned dataset so that it still works as expected (running on the hashes instead of on the arrays of hashes). If you modify the row_proc afterward, note that it will receive an array of hashes instead of a hash.

[Source]

     # File lib/sequel/adapters/mysql.rb, line 333
333:       def split_multiple_result_sets
334:         raise(Error, "Can't split multiple statements on a graphed dataset") if opts[:graph]
335:         ds = clone(:split_multiple_result_sets=>true)
336:         ds.row_proc = proc{|x| x.map{|h| row_proc.call(h)}} if row_proc
337:         ds
338:       end

Uses a cursor for fetching records, instead of fetching the entire result set at once. Can be used to process large datasets without holding all rows in memory (which is what the underlying drivers do by default). Options:

  • :rows_per_fetch - the number of rows per fetch (default 1000). Higher numbers result in fewer queries but greater memory use.

Usage:

  DB[:huge_table].use_cursor.each{|row| p row}
  DB[:huge_table].use_cursor(:rows_per_fetch=>10000).each{|row| p row}

This is untested with the prepared statement/bound variable support, and unlikely to work with either.

[Source]

     # File lib/sequel/adapters/postgres.rb, line 584
584:       def use_cursor(opts={})
585:         clone(:cursor=>{:rows_per_fetch=>1000}.merge(opts))
586:       end

2 - Methods that execute code on the database

These methods all execute the dataset‘s SQL on the database. They don‘t return modified datasets, so if used in a method chain they should be the last method called.

Constants

ACTION_METHODS = (<<-METHS).split.map{|x| x.to_sym} << [] []= all avg count columns columns! delete each empty? fetch_rows first get import insert insert_multiple interval last map max min multi_insert range select_hash select_hash_groups select_map select_order_map set single_record single_value sum to_csv to_hash to_hash_groups truncate update METHS ).split.map{|x| x.to_sym}   Action methods defined by Sequel that execute code on the database.

Public Instance methods

Inserts the given argument into the database. Returns self so it can be used safely when chaining:

  DB[:items] << {:id=>0, :name=>'Zero'} << DB[:old_items].select(:id, name)

[Source]

    # File lib/sequel/dataset/actions.rb, line 23
23:     def <<(arg)
24:       insert(arg)
25:       self
26:     end

Returns the first record matching the conditions. Examples:

  DB[:table][:id=>1] # SELECT * FROM table WHERE (id = 1) LIMIT 1
  # => {:id=1}

[Source]

    # File lib/sequel/dataset/actions.rb, line 32
32:     def [](*conditions)
33:       raise(Error, ARRAY_ACCESS_ERROR_MSG) if (conditions.length == 1 and conditions.first.is_a?(Integer)) or conditions.length == 0
34:       first(*conditions)
35:     end

Update all records matching the conditions with the values specified. Returns the number of rows affected.

  DB[:table][:id=>1] = {:id=>2} # UPDATE table SET id = 2 WHERE id = 1
  # => 1 # number of rows affected

[Source]

    # File lib/sequel/dataset/actions.rb, line 42
42:     def []=(conditions, values)
43:       filter(conditions).update(values)
44:     end

Returns an array with all records in the dataset. If a block is given, the array is iterated over after all items have been loaded.

  DB[:table].all # SELECT * FROM table
  # => [{:id=>1, ...}, {:id=>2, ...}, ...]

  # Iterate over all rows in the table
  DB[:table].all{|row| p row}

[Source]

    # File lib/sequel/dataset/actions.rb, line 54
54:     def all(&block)
55:       a = []
56:       each{|r| a << r}
57:       post_load(a)
58:       a.each(&block) if block
59:       a
60:     end

Returns the average value for the given column.

  DB[:table].avg(:number) # SELECT avg(number) FROM table LIMIT 1
  # => 3

[Source]

    # File lib/sequel/dataset/actions.rb, line 66
66:     def avg(column)
67:       aggregate_dataset.get{avg(column)}
68:     end

Returns the columns in the result set in order as an array of symbols. If the columns are currently cached, returns the cached value. Otherwise, a SELECT query is performed to retrieve a single row in order to get the columns.

If you are looking for all columns for a single table and maybe some information about each column (e.g. database type), see Database#schema.

  DB[:table].columns
  # => [:id, :name]

[Source]

    # File lib/sequel/dataset/actions.rb, line 79
79:     def columns
80:       return @columns if @columns
81:       ds = unfiltered.unordered.clone(:distinct => nil, :limit => 1, :offset=>nil)
82:       ds.each{break}
83:       @columns = ds.instance_variable_get(:@columns)
84:       @columns || []
85:     end

Ignore any cached column information and perform a query to retrieve a row in order to get the columns.

  DB[:table].columns!
  # => [:id, :name]

[Source]

    # File lib/sequel/dataset/actions.rb, line 92
92:     def columns!
93:       @columns = nil
94:       columns
95:     end

Returns the number of records in the dataset.

  DB[:table].count # SELECT COUNT(*) AS count FROM table LIMIT 1
  # => 3

[Source]

     # File lib/sequel/dataset/actions.rb, line 101
101:     def count
102:       aggregate_dataset.get{COUNT(:*){}.as(count)}.to_i
103:     end

Deletes the records in the dataset. The returned value should be number of records deleted, but that is adapter dependent.

  DB[:table].delete # DELETE * FROM table
  # => 3

[Source]

     # File lib/sequel/dataset/actions.rb, line 110
110:     def delete(&block)
111:       sql = delete_sql
112:       if uses_returning?(:delete)
113:         returning_fetch_rows(sql, &block)
114:       else
115:         execute_dui(sql)
116:       end
117:     end

Iterates over the records in the dataset as they are yielded from the database adapter, and returns self.

  DB[:table].each{|row| p row} # SELECT * FROM table

Note that this method is not safe to use on many adapters if you are running additional queries inside the provided block. If you are running queries inside the block, you should use all instead of each for the outer queries, or use a separate thread or shard inside each:

[Source]

     # File lib/sequel/dataset/actions.rb, line 128
128:     def each
129:       if @opts[:graph]
130:         graph_each{|r| yield r}
131:       elsif row_proc = @row_proc
132:         fetch_rows(select_sql){|r| yield row_proc.call(r)}
133:       else
134:         fetch_rows(select_sql){|r| yield r}
135:       end
136:       self
137:     end

Returns true if no records exist in the dataset, false otherwise

  DB[:table].empty? # SELECT 1 AS one FROM table LIMIT 1
  # => false

[Source]

     # File lib/sequel/dataset/actions.rb, line 143
143:     def empty?
144:       get(Sequel::SQL::AliasedExpression.new(1, :one)).nil?
145:     end

Executes a select query and fetches records, yielding each record to the supplied block. The yielded records should be hashes with symbol keys. This method should probably should not be called by user code, use each instead.

[Source]

     # File lib/sequel/dataset/actions.rb, line 151
151:     def fetch_rows(sql)
152:       raise NotImplemented, NOTIMPL_MSG
153:     end

If a integer argument is given, it is interpreted as a limit, and then returns all matching records up to that limit. If no argument is passed, it returns the first matching record. If any other type of argument(s) is passed, it is given to filter and the first matching record is returned. If a block is given, it is used to filter the dataset before returning anything. Examples:

  DB[:table].first # SELECT * FROM table LIMIT 1
  # => {:id=>7}

  DB[:table].first(2) # SELECT * FROM table LIMIT 2
  # => [{:id=>6}, {:id=>4}]

  DB[:table].first(:id=>2) # SELECT * FROM table WHERE (id = 2) LIMIT 1
  # => {:id=>2}

  DB[:table].first("id = 3") # SELECT * FROM table WHERE (id = 3) LIMIT 1
  # => {:id=>3}

  DB[:table].first("id = ?", 4) # SELECT * FROM table WHERE (id = 4) LIMIT 1
  # => {:id=>4}

  DB[:table].first{id > 2} # SELECT * FROM table WHERE (id > 2) LIMIT 1
  # => {:id=>5}

  DB[:table].first("id > ?", 4){id < 6} # SELECT * FROM table WHERE ((id > 4) AND (id < 6)) LIMIT 1
  # => {:id=>5}

  DB[:table].first(2){id < 2} # SELECT * FROM table WHERE (id < 2) LIMIT 2
  # => [{:id=>1}]

[Source]

     # File lib/sequel/dataset/actions.rb, line 185
185:     def first(*args, &block)
186:       ds = block ? filter(&block) : self
187: 
188:       if args.empty?
189:         ds.single_record
190:       else
191:         args = (args.size == 1) ? args.first : args
192:         if Integer === args
193:           ds.limit(args).all
194:         else
195:           ds.filter(args).single_record
196:         end
197:       end
198:     end

Return the column value for the first matching record in the dataset. Raises an error if both an argument and block is given.

  DB[:table].get(:id) # SELECT id FROM table LIMIT 1
  # => 3

  ds.get{sum(id)} # SELECT sum(id) FROM table LIMIT 1
  # => 6

[Source]

     # File lib/sequel/dataset/actions.rb, line 208
208:     def get(column=(no_arg=true; nil), &block)
209:       if block
210:         raise(Error, ARG_BLOCK_ERROR_MSG) unless no_arg
211:         select(&block).single_value
212:       else
213:         select(column).single_value
214:       end
215:     end

Inserts multiple records into the associated table. This method can be used to efficiently insert a large number of records into a table in a single query if the database supports it. Inserts are automatically wrapped in a transaction.

This method is called with a columns array and an array of value arrays:

  DB[:table].import([:x, :y], [[1, 2], [3, 4]])
  # INSERT INTO table (x, y) VALUES (1, 2)
  # INSERT INTO table (x, y) VALUES (3, 4)

This method also accepts a dataset instead of an array of value arrays:

  DB[:table].import([:x, :y], DB[:table2].select(:a, :b))
  # INSERT INTO table (x, y) SELECT a, b FROM table2

Options:

:commit_every :Open a new transaction for every given number of records. For example, if you provide a value of 50, will commit after every 50 records.
:server :Set the server/shard to use for the transaction and insert queries.
:slice :Same as :commit_every, :commit_every takes precedence.

[Source]

     # File lib/sequel/dataset/actions.rb, line 240
240:     def import(columns, values, opts={})
241:       return @db.transaction{insert(columns, values)} if values.is_a?(Dataset)
242: 
243:       return if values.empty?
244:       raise(Error, IMPORT_ERROR_MSG) if columns.empty?
245:       ds = opts[:server] ? server(opts[:server]) : self
246:       
247:       if slice_size = opts[:commit_every] || opts[:slice]
248:         offset = 0
249:         rows = []
250:         while offset < values.length
251:           rows << ds._import(columns, values[offset, slice_size], opts)
252:           offset += slice_size
253:         end
254:         rows.flatten
255:       else
256:         ds._import(columns, values, opts)
257:       end
258:     end

Inserts values into the associated table. The returned value is generally the value of the primary key for the inserted row, but that is adapter dependent.

insert handles a number of different argument formats:

no arguments or single empty hash :Uses DEFAULT VALUES
single hash :Most common format, treats keys as columns an values as values
single array :Treats entries as values, with no columns
two arrays :Treats first array as columns, second array as values
single Dataset :Treats as an insert based on a selection from the dataset given, with no columns
array and dataset :Treats as an insert based on a selection from the dataset given, with the columns given by the array.

Examples:

  DB[:items].insert
  # INSERT INTO items DEFAULT VALUES

  DB[:items].insert({})
  # INSERT INTO items DEFAULT VALUES

  DB[:items].insert([1,2,3])
  # INSERT INTO items VALUES (1, 2, 3)

  DB[:items].insert([:a, :b], [1,2])
  # INSERT INTO items (a, b) VALUES (1, 2)

  DB[:items].insert(:a => 1, :b => 2)
  # INSERT INTO items (a, b) VALUES (1, 2)

  DB[:items].insert(DB[:old_items])
  # INSERT INTO items SELECT * FROM old_items

  DB[:items].insert([:a, :b], DB[:old_items])
  # INSERT INTO items (a, b) SELECT * FROM old_items

[Source]

     # File lib/sequel/dataset/actions.rb, line 295
295:     def insert(*values, &block)
296:       sql = insert_sql(*values)
297:       if uses_returning?(:insert)
298:         returning_fetch_rows(sql, &block)
299:       else
300:         execute_insert(sql)
301:       end
302:     end

Inserts multiple values. If a block is given it is invoked for each item in the given array before inserting it. See multi_insert as a possibly faster version that may be able to insert multiple records in one SQL statement (if supported by the database). Returns an array of primary keys of inserted rows.

  DB[:table].insert_multiple([{:x=>1}, {:x=>2}])
  # => [4, 5]
  # INSERT INTO table (x) VALUES (1)
  # INSERT INTO table (x) VALUES (2)

  DB[:table].insert_multiple([{:x=>1}, {:x=>2}]){|row| row[:y] = row[:x] * 2}
  # => [6, 7]
  # INSERT INTO table (x, y) VALUES (1, 2)
  # INSERT INTO table (x, y) VALUES (2, 4)

[Source]

     # File lib/sequel/dataset/actions.rb, line 319
319:     def insert_multiple(array, &block)
320:       if block
321:         array.map{|i| insert(block.call(i))}
322:       else
323:         array.map{|i| insert(i)}
324:       end
325:     end

Returns the interval between minimum and maximum values for the given column.

  DB[:table].interval(:id) # SELECT (max(id) - min(id)) FROM table LIMIT 1
  # => 6

[Source]

     # File lib/sequel/dataset/actions.rb, line 332
332:     def interval(column)
333:       aggregate_dataset.get{max(column) - min(column)}
334:     end

Reverses the order and then runs first with the given arguments and block. Note that this will not necessarily give you the last record in the dataset, unless you have an unambiguous order. If there is not currently an order for this dataset, raises an Error.

  DB[:table].order(:id).last # SELECT * FROM table ORDER BY id DESC LIMIT 1
  # => {:id=>10}

  DB[:table].order(:id.desc).last(2) # SELECT * FROM table ORDER BY id ASC LIMIT 2
  # => [{:id=>1}, {:id=>2}]

[Source]

     # File lib/sequel/dataset/actions.rb, line 346
346:     def last(*args, &block)
347:       raise(Error, 'No order specified') unless @opts[:order]
348:       reverse.first(*args, &block)
349:     end

Maps column values for each record in the dataset (if a column name is given), or performs the stock mapping functionality of Enumerable otherwise. Raises an Error if both an argument and block are given.

  DB[:table].map(:id) # SELECT * FROM table
  # => [1, 2, 3, ...]

  DB[:table].map{|r| r[:id] * 2} # SELECT * FROM table
  # => [2, 4, 6, ...]

You can also provide an array of column names:

  DB[:table].map([:id, :name]) # SELECT * FROM table
  # => [[1, 'A'], [2, 'B'], [3, 'C'], ...]

[Source]

     # File lib/sequel/dataset/actions.rb, line 365
365:     def map(column=nil, &block)
366:       if column
367:         raise(Error, ARG_BLOCK_ERROR_MSG) if block
368:         return naked.map(column) if row_proc
369:         if column.is_a?(Array)
370:           super(){|r| r.values_at(*column)}
371:         else
372:           super(){|r| r[column]}
373:         end
374:       else
375:         super(&block)
376:       end
377:     end

Returns the maximum value for the given column.

  DB[:table].max(:id) # SELECT max(id) FROM table LIMIT 1
  # => 10

[Source]

     # File lib/sequel/dataset/actions.rb, line 383
383:     def max(column)
384:       aggregate_dataset.get{max(column)}
385:     end

Returns the minimum value for the given column.

  DB[:table].min(:id) # SELECT min(id) FROM table LIMIT 1
  # => 1

[Source]

     # File lib/sequel/dataset/actions.rb, line 391
391:     def min(column)
392:       aggregate_dataset.get{min(column)}
393:     end

This is a front end for import that allows you to submit an array of hashes instead of arrays of columns and values:

  DB[:table].multi_insert([{:x => 1}, {:x => 2}])
  # INSERT INTO table (x) VALUES (1)
  # INSERT INTO table (x) VALUES (2)

Be aware that all hashes should have the same keys if you use this calling method, otherwise some columns could be missed or set to null instead of to default values.

This respects the same options as import.

[Source]

     # File lib/sequel/dataset/actions.rb, line 407
407:     def multi_insert(hashes, opts={})
408:       return if hashes.empty?
409:       columns = hashes.first.keys
410:       import(columns, hashes.map{|h| columns.map{|c| h[c]}}, opts)
411:     end

Returns a Range instance made from the minimum and maximum values for the given column.

  DB[:table].range(:id) # SELECT max(id) AS v1, min(id) AS v2 FROM table LIMIT 1
  # => 1..10

[Source]

     # File lib/sequel/dataset/actions.rb, line 418
418:     def range(column)
419:       if r = aggregate_dataset.select{[min(column).as(v1), max(column).as(v2)]}.first
420:         (r[:v1]..r[:v2])
421:       end
422:     end

Returns a hash with key_column values as keys and value_column values as values. Similar to to_hash, but only selects the columns given.

  DB[:table].select_hash(:id, :name) # SELECT id, name FROM table
  # => {1=>'a', 2=>'b', ...}

You can also provide an array of column names for either the key_column, the value column, or both:

  DB[:table].select_hash([:id, :foo], [:name, :bar]) # SELECT * FROM table
  # {[1, 3]=>['a', 'c'], [2, 4]=>['b', 'd'], ...}

When using this method, you must be sure that each expression has an alias that Sequel can determine. Usually you can do this by calling the as method on the expression and providing an alias.

[Source]

     # File lib/sequel/dataset/actions.rb, line 439
439:     def select_hash(key_column, value_column)
440:       _select_hash(:to_hash, key_column, value_column)
441:     end

Returns a hash with key_column values as keys and an array of value_column values. Similar to to_hash_groups, but only selects the columns given.

  DB[:table].select_hash(:name, :id) # SELECT id, name FROM table
  # => {'a'=>[1, 4, ...], 'b'=>[2, ...], ...}

You can also provide an array of column names for either the key_column, the value column, or both:

  DB[:table].select_hash([:first, :middle], [:last, :id]) # SELECT * FROM table
  # {['a', 'b']=>[['c', 1], ['d', 2], ...], ...}

When using this method, you must be sure that each expression has an alias that Sequel can determine. Usually you can do this by calling the as method on the expression and providing an alias.

[Source]

     # File lib/sequel/dataset/actions.rb, line 458
458:     def select_hash_groups(key_column, value_column)
459:       _select_hash(:to_hash_groups, key_column, value_column)
460:     end

Selects the column given (either as an argument or as a block), and returns an array of all values of that column in the dataset. If you give a block argument that returns an array with multiple entries, the contents of the resulting array are undefined. Raises an Error if called with both an argument and a block.

  DB[:table].select_map(:id) # SELECT id FROM table
  # => [3, 5, 8, 1, ...]

  DB[:table].select_map{id * 2} # SELECT (id * 2) FROM table
  # => [6, 10, 16, 2, ...]

You can also provide an array of column names:

  DB[:table].select_map([:id, :name]) # SELECT id, name FROM table
  # => [[1, 'A'], [2, 'B'], [3, 'C'], ...]

If you provide an array of expressions, you must be sure that each entry in the array has an alias that Sequel can determine. Usually you can do this by calling the as method on the expression and providing an alias.

[Source]

     # File lib/sequel/dataset/actions.rb, line 482
482:     def select_map(column=nil, &block)
483:       _select_map(column, false, &block)
484:     end

The same as select_map, but in addition orders the array by the column.

  DB[:table].select_order_map(:id) # SELECT id FROM table ORDER BY id
  # => [1, 2, 3, 4, ...]

  DB[:table].select_order_map{id * 2} # SELECT (id * 2) FROM table ORDER BY (id * 2)
  # => [2, 4, 6, 8, ...]

You can also provide an array of column names:

  DB[:table].select_order_map([:id, :name]) # SELECT id, name FROM table ORDER BY id, name
  # => [[1, 'A'], [2, 'B'], [3, 'C'], ...]

If you provide an array of expressions, you must be sure that each entry in the array has an alias that Sequel can determine. Usually you can do this by calling the as method on the expression and providing an alias.

[Source]

     # File lib/sequel/dataset/actions.rb, line 502
502:     def select_order_map(column=nil, &block)
503:       _select_map(column, true, &block)
504:     end

Alias for update, but not aliased directly so subclasses don‘t have to override both methods.

[Source]

     # File lib/sequel/dataset/actions.rb, line 508
508:     def set(*args)
509:       update(*args)
510:     end

Returns the first record in the dataset, or nil if the dataset has no records. Users should probably use first instead of this method.

[Source]

     # File lib/sequel/dataset/actions.rb, line 515
515:     def single_record
516:       clone(:limit=>1).each{|r| return r}
517:       nil
518:     end

Returns the first value of the first record in the dataset. Returns nil if dataset is empty. Users should generally use get instead of this method.

[Source]

     # File lib/sequel/dataset/actions.rb, line 523
523:     def single_value
524:       if r = naked.ungraphed.single_record
525:         r.values.first
526:       end
527:     end

Returns the sum for the given column.

  DB[:table].sum(:id) # SELECT sum(id) FROM table LIMIT 1
  # => 55

[Source]

     # File lib/sequel/dataset/actions.rb, line 533
533:     def sum(column)
534:       aggregate_dataset.get{sum(column)}
535:     end

Returns a string in CSV format containing the dataset records. By default the CSV representation includes the column titles in the first line. You can turn that off by passing false as the include_column_titles argument.

This does not use a CSV library or handle quoting of values in any way. If any values in any of the rows could include commas or line endings, you shouldn‘t use this.

  puts DB[:table].to_csv # SELECT * FROM table
  # id,name
  # 1,Jim
  # 2,Bob

[Source]

     # File lib/sequel/dataset/actions.rb, line 550
550:     def to_csv(include_column_titles = true)
551:       n = naked
552:       cols = n.columns
553:       csv = ''
554:       csv << "#{cols.join(COMMA_SEPARATOR)}\r\n" if include_column_titles
555:       n.each{|r| csv << "#{cols.collect{|c| r[c]}.join(COMMA_SEPARATOR)}\r\n"}
556:       csv
557:     end

Returns a hash with one column used as key and another used as value. If rows have duplicate values for the key column, the latter row(s) will overwrite the value of the previous row(s). If the value_column is not given or nil, uses the entire hash as the value.

  DB[:table].to_hash(:id, :name) # SELECT * FROM table
  # {1=>'Jim', 2=>'Bob', ...}

  DB[:table].to_hash(:id) # SELECT * FROM table
  # {1=>{:id=>1, :name=>'Jim'}, 2=>{:id=>2, :name=>'Bob'}, ...}

You can also provide an array of column names for either the key_column, the value column, or both:

  DB[:table].to_hash([:id, :foo], [:name, :bar]) # SELECT * FROM table
  # {[1, 3]=>['Jim', 'bo'], [2, 4]=>['Bob', 'be'], ...}

  DB[:table].to_hash([:id, :name]) # SELECT * FROM table
  # {[1, 'Jim']=>{:id=>1, :name=>'Jim'}, [2, 'Bob'=>{:id=>2, :name=>'Bob'}, ...}

[Source]

     # File lib/sequel/dataset/actions.rb, line 578
578:     def to_hash(key_column, value_column = nil)
579:       h = {}
580:       if value_column
581:         return naked.to_hash(key_column, value_column) if row_proc
582:         if value_column.is_a?(Array)
583:           if key_column.is_a?(Array)
584:             each{|r| h[r.values_at(*key_column)] = r.values_at(*value_column)}
585:           else
586:             each{|r| h[r[key_column]] = r.values_at(*value_column)}
587:           end
588:         else
589:           if key_column.is_a?(Array)
590:             each{|r| h[r.values_at(*key_column)] = r[value_column]}
591:           else
592:             each{|r| h[r[key_column]] = r[value_column]}
593:           end
594:         end
595:       elsif key_column.is_a?(Array)
596:         each{|r| h[r.values_at(*key_column)] = r}
597:       else
598:         each{|r| h[r[key_column]] = r}
599:       end
600:       h
601:     end

Returns a hash with one column used as key and the values being an array of column values. If the value_column is not given or nil, uses the entire hash as the value.

  DB[:table].to_hash(:name, :id) # SELECT * FROM table
  # {'Jim'=>[1, 4, 16, ...], 'Bob'=>[2], ...}

  DB[:table].to_hash(:name) # SELECT * FROM table
  # {'Jim'=>[{:id=>1, :name=>'Jim'}, {:id=>4, :name=>'Jim'}, ...], 'Bob'=>[{:id=>2, :name=>'Bob'}], ...}

You can also provide an array of column names for either the key_column, the value column, or both:

  DB[:table].to_hash([:first, :middle], [:last, :id]) # SELECT * FROM table
  # {['Jim', 'Bob']=>[['Smith', 1], ['Jackson', 4], ...], ...}

  DB[:table].to_hash([:first, :middle]) # SELECT * FROM table
  # {['Jim', 'Bob']=>[{:id=>1, :first=>'Jim', :middle=>'Bob', :last=>'Smith'}, ...], ...}

[Source]

     # File lib/sequel/dataset/actions.rb, line 621
621:     def to_hash_groups(key_column, value_column = nil)
622:       h = {}
623:       if value_column
624:         return naked.to_hash_groups(key_column, value_column) if row_proc
625:         if value_column.is_a?(Array)
626:           if key_column.is_a?(Array)
627:             each{|r| (h[r.values_at(*key_column)] ||= []) << r.values_at(*value_column)}
628:           else
629:             each{|r| (h[r[key_column]] ||= []) << r.values_at(*value_column)}
630:           end
631:         else
632:           if key_column.is_a?(Array)
633:             each{|r| (h[r.values_at(*key_column)] ||= []) << r[value_column]}
634:           else
635:             each{|r| (h[r[key_column]] ||= []) << r[value_column]}
636:           end
637:         end
638:       elsif key_column.is_a?(Array)
639:         each{|r| (h[r.values_at(*key_column)] ||= []) << r}
640:       else
641:         each{|r| (h[r[key_column]] ||= []) << r}
642:       end
643:       h
644:     end

Truncates the dataset. Returns nil.

  DB[:table].truncate # TRUNCATE table
  # => nil

[Source]

     # File lib/sequel/dataset/actions.rb, line 650
650:     def truncate
651:       execute_ddl(truncate_sql)
652:     end

Updates values for the dataset. The returned value is generally the number of rows updated, but that is adapter dependent. values should a hash where the keys are columns to set and values are the values to which to set the columns.

  DB[:table].update(:x=>nil) # UPDATE table SET x = NULL
  # => 10

  DB[:table].update(:x=>:x+1, :y=>0) # UPDATE table SET x = (x + 1), y = 0
  # => 10

[Source]

     # File lib/sequel/dataset/actions.rb, line 664
664:     def update(values={}, &block)
665:       sql = update_sql(values)
666:       if uses_returning?(:update)
667:         returning_fetch_rows(sql, &block)
668:       else
669:         execute_dui(sql)
670:       end
671:     end

Execute the given SQL and return the number of rows deleted. This exists solely as an optimization, replacing with_sql(sql).delete. It‘s significantly faster as it does not require cloning the current dataset.

[Source]

     # File lib/sequel/dataset/actions.rb, line 676
676:     def with_sql_delete(sql)
677:       execute_dui(sql)
678:     end

Protected Instance methods

Internals of import. If primary key values are requested, use separate insert commands for each row. Otherwise, call multi_insert_sql and execute each statement it gives separately.

[Source]

     # File lib/sequel/dataset/actions.rb, line 685
685:     def _import(columns, values, opts)
686:       trans_opts = opts.merge(:server=>@opts[:server])
687:       if opts[:return] == :primary_key
688:         @db.transaction(trans_opts){values.map{|v| insert(columns, v)}}
689:       else
690:         stmts = multi_insert_sql(columns, values)
691:         @db.transaction(trans_opts){stmts.each{|st| execute_dui(st)}}
692:       end
693:     end

Return an array of arrays of values given by the symbols in ret_cols.

[Source]

     # File lib/sequel/dataset/actions.rb, line 696
696:     def _select_map_multiple(ret_cols)
697:       map{|r| r.values_at(*ret_cols)}
698:     end

Returns an array of the first value in each row.

[Source]

     # File lib/sequel/dataset/actions.rb, line 701
701:     def _select_map_single
702:       map{|r| r.values.first}
703:     end

4 - Methods that describe what the dataset supports

These methods all return booleans, with most describing whether or not the dataset supports a feature.

Public Instance methods

Whether this dataset will provide accurate number of rows matched for delete and update statements. Accurate in this case is the number of rows matched by the dataset‘s filter.

[Source]

    # File lib/sequel/dataset/features.rb, line 23
23:     def provides_accurate_rows_matched?
24:       true
25:     end

Whether this dataset quotes identifiers.

[Source]

    # File lib/sequel/dataset/features.rb, line 10
10:     def quote_identifiers?
11:       if defined?(@quote_identifiers)
12:         @quote_identifiers
13:       elsif db.respond_to?(:quote_identifiers?)
14:         @quote_identifiers = db.quote_identifiers?
15:       else
16:         @quote_identifiers = false
17:       end
18:     end

Whether you must use a column alias list for recursive CTEs (false by default).

[Source]

    # File lib/sequel/dataset/features.rb, line 29
29:     def recursive_cte_requires_column_aliases?
30:       false
31:     end

Whether type specifiers are required for prepared statement/bound variable argument placeholders (i.e. :bv__integer)

[Source]

    # File lib/sequel/dataset/features.rb, line 41
41:     def requires_placeholder_type_specifiers?
42:       false
43:     end

Whether the dataset requires SQL standard datetimes (false by default, as most allow strings with ISO 8601 format).

[Source]

    # File lib/sequel/dataset/features.rb, line 35
35:     def requires_sql_standard_datetimes?
36:       false
37:     end

Whether the dataset supports common table expressions (the WITH clause). If given, type can be :select, :insert, :update, or :delete, in which case it determines whether WITH is supported for the respective statement type.

[Source]

    # File lib/sequel/dataset/features.rb, line 48
48:     def supports_cte?(type=:select)
49:       send("#{type}_clause_methods""#{type}_clause_methods").include?("#{type}_with_sql""#{type}_with_sql")
50:     end

Whether the dataset supports common table expressions (the WITH clause) in subqueries. If false, applies the WITH clause to the main query, which can cause issues if multiple WITH clauses use the same name.

[Source]

    # File lib/sequel/dataset/features.rb, line 55
55:     def supports_cte_in_subqueries?
56:       false
57:     end

Whether the dataset supports or can emulate the DISTINCT ON clause, false by default.

[Source]

    # File lib/sequel/dataset/features.rb, line 60
60:     def supports_distinct_on?
61:       false
62:     end

Whether the dataset supports CUBE with GROUP BY.

[Source]

    # File lib/sequel/dataset/features.rb, line 65
65:     def supports_group_cube?
66:       false
67:     end

Whether the dataset supports ROLLUP with GROUP BY.

[Source]

    # File lib/sequel/dataset/features.rb, line 70
70:     def supports_group_rollup?
71:       false
72:     end

Whether this dataset supports the insert_select method for returning all columns values directly from an insert query.

[Source]

    # File lib/sequel/dataset/features.rb, line 76
76:     def supports_insert_select?
77:       supports_returning?(:insert)
78:     end

Whether the dataset supports the INTERSECT and EXCEPT compound operations, true by default.

[Source]

    # File lib/sequel/dataset/features.rb, line 81
81:     def supports_intersect_except?
82:       true
83:     end

Whether the dataset supports the INTERSECT ALL and EXCEPT ALL compound operations, true by default.

[Source]

    # File lib/sequel/dataset/features.rb, line 86
86:     def supports_intersect_except_all?
87:       true
88:     end

Whether the dataset supports the IS TRUE syntax.

[Source]

    # File lib/sequel/dataset/features.rb, line 91
91:     def supports_is_true?
92:       true
93:     end

Whether the dataset supports the JOIN table USING (column1, …) syntax.

[Source]

    # File lib/sequel/dataset/features.rb, line 96
96:     def supports_join_using?
97:       true
98:     end

Whether modifying joined datasets is supported.

[Source]

     # File lib/sequel/dataset/features.rb, line 101
101:     def supports_modifying_joins?
102:       false
103:     end

Whether the IN/NOT IN operators support multiple columns when an array of values is given.

[Source]

     # File lib/sequel/dataset/features.rb, line 107
107:     def supports_multiple_column_in?
108:       true
109:     end

Whether the dataset supports or can fully emulate the DISTINCT ON clause, including respecting the ORDER BY clause, false by default

[Source]

     # File lib/sequel/dataset/features.rb, line 113
113:     def supports_ordered_distinct_on?
114:       supports_distinct_on?
115:     end

Whether the RETURNING clause is supported for the given type of query. type can be :insert, :update, or :delete.

[Source]

     # File lib/sequel/dataset/features.rb, line 119
119:     def supports_returning?(type)
120:       send("#{type}_clause_methods""#{type}_clause_methods").include?("#{type}_returning_sql""#{type}_returning_sql")
121:     end

Whether the database supports SELECT *, column FROM table

[Source]

     # File lib/sequel/dataset/features.rb, line 124
124:     def supports_select_all_and_column?
125:       true
126:     end

Whether the dataset supports timezones in literal timestamps

[Source]

     # File lib/sequel/dataset/features.rb, line 129
129:     def supports_timestamp_timezones?
130:       false
131:     end

Whether the dataset supports fractional seconds in literal timestamps

[Source]

     # File lib/sequel/dataset/features.rb, line 134
134:     def supports_timestamp_usecs?
135:       true
136:     end

Whether the dataset supports WHERE TRUE (or WHERE 1 for databases that that use 1 for true).

[Source]

     # File lib/sequel/dataset/features.rb, line 145
145:     def supports_where_true?
146:       true
147:     end

Whether the dataset supports window functions.

[Source]

     # File lib/sequel/dataset/features.rb, line 139
139:     def supports_window_functions?
140:       false
141:     end

5 - Methods related to dataset graphing

Dataset graphing changes the dataset to yield hashes where keys are table name symbols and values are hashes representing the columns related to that table. All of these methods return modified copies of the receiver.

Public Instance methods

Adds the given graph aliases to the list of graph aliases to use, unlike set_graph_aliases, which replaces the list (the equivalent of select_more when graphing). See set_graph_aliases.

  DB[:table].add_graph_aliases(:some_alias=>[:table, :column])
  # SELECT ..., table.column AS some_alias
  # => {:table=>{:column=>some_alias_value, ...}, ...}

[Source]

    # File lib/sequel/dataset/graph.rb, line 17
17:     def add_graph_aliases(graph_aliases)
18:       columns, graph_aliases = graph_alias_columns(graph_aliases)
19:       ds = select_more(*columns)
20:       ds.opts[:graph_aliases] = (ds.opts[:graph_aliases] || (ds.opts[:graph][:column_aliases] rescue {}) || {}).merge(graph_aliases)
21:       ds
22:     end

Allows you to join multiple datasets/tables and have the result set split into component tables.

This differs from the usual usage of join, which returns the result set as a single hash. For example:

  # CREATE TABLE artists (id INTEGER, name TEXT);
  # CREATE TABLE albums (id INTEGER, name TEXT, artist_id INTEGER);

  DB[:artists].left_outer_join(:albums, :artist_id=>:id).first
  #=> {:id=>albums.id, :name=>albums.name, :artist_id=>albums.artist_id}

  DB[:artists].graph(:albums, :artist_id=>:id).first
  #=> {:artists=>{:id=>artists.id, :name=>artists.name}, :albums=>{:id=>albums.id, :name=>albums.name, :artist_id=>albums.artist_id}}

Using a join such as left_outer_join, the attribute names that are shared between the tables are combined in the single return hash. You can get around that by using select with correct aliases for all of the columns, but it is simpler to use graph and have the result set split for you. In addition, graph respects any row_proc of the current dataset and the datasets you use with graph.

If you are graphing a table and all columns for that table are nil, this indicates that no matching rows existed in the table, so graph will return nil instead of a hash with all nil values:

  # If the artist doesn't have any albums
  DB[:artists].graph(:albums, :artist_id=>:id).first
  => {:artists=>{:id=>artists.id, :name=>artists.name}, :albums=>nil}

Arguments:

dataset :Can be a symbol (specifying a table), another dataset, or an object that responds to dataset and returns a symbol or a dataset
join_conditions :Any condition(s) allowed by join_table.
block :A block that is passed to join_table.

Options:

:from_self_alias :The alias to use when the receiver is not a graphed dataset but it contains multiple FROM tables or a JOIN. In this case, the receiver is wrapped in a from_self before graphing, and this option determines the alias to use.
:implicit_qualifier :The qualifier of implicit conditions, see join_table.
:join_type :The type of join to use (passed to join_table). Defaults to :left_outer.
:select :An array of columns to select. When not used, selects all columns in the given dataset. When set to false, selects no columns and is like simply joining the tables, though graph keeps some metadata about the join that makes it important to use graph instead of join_table.
:table_alias :The alias to use for the table. If not specified, doesn‘t alias the table. You will get an error if the the alias (or table) name is used more than once.

[Source]

     # File lib/sequel/dataset/graph.rb, line 74
 74:     def graph(dataset, join_conditions = nil, options = {}, &block)
 75:       # Allow the use of a model, dataset, or symbol as the first argument
 76:       # Find the table name/dataset based on the argument
 77:       dataset = dataset.dataset if dataset.respond_to?(:dataset)
 78:       table_alias = options[:table_alias]
 79:       case dataset
 80:       when Symbol
 81:         table = dataset
 82:         dataset = @db[dataset]
 83:         table_alias ||= table
 84:       when ::Sequel::Dataset
 85:         if dataset.simple_select_all?
 86:           table = dataset.opts[:from].first
 87:           table_alias ||= table
 88:         else
 89:           table = dataset
 90:           table_alias ||= dataset_alias((@opts[:num_dataset_sources] || 0)+1)
 91:         end
 92:       else
 93:         raise Error, "The dataset argument should be a symbol, dataset, or model"
 94:       end
 95: 
 96:       # Raise Sequel::Error with explanation that the table alias has been used
 97:       raise_alias_error = lambda do
 98:         raise(Error, "this #{options[:table_alias] ? 'alias' : 'table'} has already been been used, please specify " \
 99:           "#{options[:table_alias] ? 'a different alias' : 'an alias via the :table_alias option'}") 
100:       end
101: 
102:       # Only allow table aliases that haven't been used
103:       raise_alias_error.call if @opts[:graph] && @opts[:graph][:table_aliases] && @opts[:graph][:table_aliases].include?(table_alias)
104:       
105:       # Use a from_self if this is already a joined table
106:       ds = (!@opts[:graph] && (@opts[:from].length > 1 || @opts[:join])) ? from_self(:alias=>options[:from_self_alias] || first_source) : self
107:       
108:       # Join the table early in order to avoid cloning the dataset twice
109:       ds = ds.join_table(options[:join_type] || :left_outer, table, join_conditions, :table_alias=>table_alias, :implicit_qualifier=>options[:implicit_qualifier], &block)
110:       opts = ds.opts
111: 
112:       # Whether to include the table in the result set
113:       add_table = options[:select] == false ? false : true
114:       # Whether to add the columns to the list of column aliases
115:       add_columns = !ds.opts.include?(:graph_aliases)
116: 
117:       # Setup the initial graph data structure if it doesn't exist
118:       if graph = opts[:graph]
119:         opts[:graph] = graph = graph.dup
120:         select = opts[:select].dup
121:         [:column_aliases, :table_aliases, :column_alias_num].each{|k| graph[k] = graph[k].dup}
122:       else
123:         master = alias_symbol(ds.first_source_alias)
124:         raise_alias_error.call if master == table_alias
125:         # Master hash storing all .graph related information
126:         graph = opts[:graph] = {}
127:         # Associates column aliases back to tables and columns
128:         column_aliases = graph[:column_aliases] = {}
129:         # Associates table alias (the master is never aliased)
130:         table_aliases = graph[:table_aliases] = {master=>self}
131:         # Keep track of the alias numbers used
132:         ca_num = graph[:column_alias_num] = Hash.new(0)
133:         # All columns in the master table are never
134:         # aliased, but are not included if set_graph_aliases
135:         # has been used.
136:         if add_columns
137:           if (select = @opts[:select]) && !select.empty? && !(select.length == 1 && (select.first.is_a?(SQL::ColumnAll)))
138:             select = select.each do |sel|
139:               column = case sel
140:               when Symbol
141:                 _, c, a = split_symbol(sel)
142:                 (a || c).to_sym
143:               when SQL::Identifier
144:                 sel.value.to_sym
145:               when SQL::QualifiedIdentifier
146:                 column = sel.column
147:                 column = column.value if column.is_a?(SQL::Identifier)
148:                 column.to_sym
149:               when SQL::AliasedExpression
150:                 column = sel.aliaz
151:                 column = column.value if column.is_a?(SQL::Identifier)
152:                 column.to_sym
153:               else
154:                 raise Error, "can't figure out alias to use for graphing for #{sel.inspect}"
155:               end
156:               column_aliases[column] = [master, column]
157:             end
158:             select = qualified_expression(select, master)
159:           else
160:             select = columns.map do |column|
161:               column_aliases[column] = [master, column]
162:               SQL::QualifiedIdentifier.new(master, column)
163:             end
164:           end
165:         end
166:       end
167: 
168:       # Add the table alias to the list of aliases
169:       # Even if it isn't been used in the result set,
170:       # we add a key for it with a nil value so we can check if it
171:       # is used more than once
172:       table_aliases = graph[:table_aliases]
173:       table_aliases[table_alias] = add_table ? dataset : nil
174: 
175:       # Add the columns to the selection unless we are ignoring them
176:       if add_table && add_columns
177:         column_aliases = graph[:column_aliases]
178:         ca_num = graph[:column_alias_num]
179:         # Which columns to add to the result set
180:         cols = options[:select] || dataset.columns
181:         # If the column hasn't been used yet, don't alias it.
182:         # If it has been used, try table_column.
183:         # If that has been used, try table_column_N 
184:         # using the next value of N that we know hasn't been
185:         # used
186:         cols.each do |column|
187:           col_alias, identifier = if column_aliases[column]
188:             column_alias = "#{table_alias}_#{column}""#{table_alias}_#{column}"
189:             if column_aliases[column_alias]
190:               column_alias_num = ca_num[column_alias]
191:               column_alias = "#{column_alias}_#{column_alias_num}""#{column_alias}_#{column_alias_num}" 
192:               ca_num[column_alias] += 1
193:             end
194:             [column_alias, SQL::AliasedExpression.new(SQL::QualifiedIdentifier.new(table_alias, column), column_alias)]
195:           else
196:             ident = SQL::QualifiedIdentifier.new(table_alias, column)
197:             [column, ident]
198:           end
199:           column_aliases[col_alias] = [table_alias, column]
200:           select.push(identifier)
201:         end
202:       end
203:       add_columns ? ds.select(*select) : ds
204:     end

This allows you to manually specify the graph aliases to use when using graph. You can use it to only select certain columns, and have those columns mapped to specific aliases in the result set. This is the equivalent of select for a graphed dataset, and must be used instead of select whenever graphing is used.

graph_aliases :Should be a hash with keys being symbols of column aliases, and values being either symbols or arrays with one to three elements. If the value is a symbol, it is assumed to be the same as a one element array containing that symbol. The first element of the array should be the table alias symbol. The second should be the actual column name symbol. If the array only has a single element the column name symbol will be assumed to be the same as the corresponding hash key. If the array has a third element, it is used as the value returned, instead of table_alias.column_name.
  DB[:artists].graph(:albums, :artist_id=>:id).
    set_graph_aliases(:name=>:artists,
                      :album_name=>[:albums, :name],
                      :forty_two=>[:albums, :fourtwo, 42]).first
  # SELECT artists.name, albums.name AS album_name, 42 AS forty_two ...
  # => {:artists=>{:name=>artists.name}, :albums=>{:name=>albums.name, :fourtwo=>42}}

[Source]

     # File lib/sequel/dataset/graph.rb, line 230
230:     def set_graph_aliases(graph_aliases)
231:       columns, graph_aliases = graph_alias_columns(graph_aliases)
232:       ds = select(*columns)
233:       ds.opts[:graph_aliases] = graph_aliases
234:       ds
235:     end

Remove the splitting of results into subhashes, and all metadata related to the current graph (if any).

[Source]

     # File lib/sequel/dataset/graph.rb, line 239
239:     def ungraphed
240:       clone(:graph=>nil, :graph_aliases=>nil)
241:     end

6 - Miscellaneous methods

These methods don‘t fit cleanly into another section.

Constants

NOTIMPL_MSG = "This method must be overridden in Sequel adapters".freeze
ARRAY_ACCESS_ERROR_MSG = 'You cannot call Dataset#[] with an integer or with no arguments.'.freeze
ARG_BLOCK_ERROR_MSG = 'Must use either an argument or a block, not both'.freeze
IMPORT_ERROR_MSG = 'Using Sequel::Dataset#import an empty column array is not allowed'.freeze

Attributes

db  [RW]  The database related to this dataset. This is the Database instance that will execute all of this dataset‘s queries.
opts  [RW]  The hash of options for this dataset, keys are symbols.

Public Class methods

Constructs a new Dataset instance with an associated database and options. Datasets are usually constructed by invoking the Database#[] method:

  DB[:posts]

Sequel::Dataset is an abstract class that is not useful by itself. Each database adapter provides a subclass of Sequel::Dataset, and has the Database#dataset method return an instance of that subclass.

[Source]

    # File lib/sequel/dataset/misc.rb, line 28
28:     def initialize(db, opts = nil)
29:       @db = db
30:       @opts = opts || {}
31:     end

Public Instance methods

Define a hash value such that datasets with the same DB, opts, and SQL will be consider equal.

[Source]

    # File lib/sequel/dataset/misc.rb, line 35
35:     def ==(o)
36:       o.is_a?(self.class) && db == o.db  && opts == o.opts && sql == o.sql
37:     end

Yield a dataset for each server in the connection pool that is tied to that server. Intended for use in sharded environments where all servers need to be modified with the same data:

  DB[:configs].where(:key=>'setting').each_server{|ds| ds.update(:value=>'new_value')}

[Source]

    # File lib/sequel/dataset/misc.rb, line 49
49:     def each_server
50:       db.servers.each{|s| yield server(s)}
51:     end

Alias for ==

[Source]

    # File lib/sequel/dataset/misc.rb, line 40
40:     def eql?(o)
41:       self == o
42:     end

Alias of first_source_alias

[Source]

    # File lib/sequel/dataset/misc.rb, line 54
54:     def first_source
55:       first_source_alias
56:     end

The first source (primary table) for this dataset. If the dataset doesn‘t have a table, raises an Error. If the table is aliased, returns the aliased name.

  DB[:table].first_source_alias
  # => :table

  DB[:table___t].first_source_alias
  # => :t

[Source]

    # File lib/sequel/dataset/misc.rb, line 66
66:     def first_source_alias
67:       source = @opts[:from]
68:       if source.nil? || source.empty?
69:         raise Error, 'No source specified for query'
70:       end
71:       case s = source.first
72:       when SQL::AliasedExpression
73:         s.aliaz
74:       when Symbol
75:         sch, table, aliaz = split_symbol(s)
76:         aliaz ? aliaz.to_sym : s
77:       else
78:         s
79:       end
80:     end

The first source (primary table) for this dataset. If the dataset doesn‘t have a table, raises an error. If the table is aliased, returns the original table, not the alias

  DB[:table].first_source_table
  # => :table

  DB[:table___t].first_source_table
  # => :table

[Source]

     # File lib/sequel/dataset/misc.rb, line 91
 91:     def first_source_table
 92:       source = @opts[:from]
 93:       if source.nil? || source.empty?
 94:         raise Error, 'No source specified for query'
 95:       end
 96:       case s = source.first
 97:       when SQL::AliasedExpression
 98:         s.expression
 99:       when Symbol
100:         sch, table, aliaz = split_symbol(s)
101:         aliaz ? (sch ? SQL::QualifiedIdentifier.new(sch, table) : table.to_sym) : s
102:       else
103:         s
104:       end
105:     end

Define a hash value such that datasets with the same DB, opts, and SQL will have the same hash value

[Source]

     # File lib/sequel/dataset/misc.rb, line 109
109:     def hash
110:       [db, opts.sort_by{|k, v| k.to_s}, sql].hash
111:     end

The String instance method to call on identifiers before sending them to the database.

[Source]

     # File lib/sequel/dataset/misc.rb, line 115
115:     def identifier_input_method
116:       if defined?(@identifier_input_method)
117:         @identifier_input_method
118:       elsif db.respond_to?(:identifier_input_method)
119:         @identifier_input_method = db.identifier_input_method
120:       else
121:         @identifier_input_method = nil
122:       end
123:     end

The String instance method to call on identifiers before sending them to the database.

[Source]

     # File lib/sequel/dataset/misc.rb, line 127
127:     def identifier_output_method
128:       if defined?(@identifier_output_method)
129:         @identifier_output_method
130:       elsif db.respond_to?(:identifier_output_method)
131:         @identifier_output_method = db.identifier_output_method
132:       else
133:         @identifier_output_method = nil
134:       end
135:     end

Returns a string representation of the dataset including the class name and the corresponding SQL select statement.

[Source]

     # File lib/sequel/dataset/misc.rb, line 139
139:     def inspect
140:       c = self.class
141:       c = c.superclass while c.name.nil? || c.name == ''
142:       "#<#{c.name}: #{sql.inspect}>"
143:     end

The alias to use for the row_number column, used when emulating OFFSET support and for eager limit strategies

[Source]

     # File lib/sequel/dataset/misc.rb, line 147
147:     def row_number_column
148:       :x_sequel_row_number_x
149:     end

Splits a possible implicit alias in c, handling both SQL::AliasedExpressions and Symbols. Returns an array of two elements, with the first being the main expression, and the second being the alias.

[Source]

     # File lib/sequel/dataset/misc.rb, line 154
154:     def split_alias(c)
155:       case c
156:       when Symbol
157:         c_table, column, aliaz = split_symbol(c)
158:         [c_table ? SQL::QualifiedIdentifier.new(c_table, column.to_sym) : column.to_sym, aliaz]
159:       when SQL::AliasedExpression
160:         [c.expression, c.aliaz]
161:       when SQL::JoinClause
162:         [c.table, c.table_alias]
163:       else
164:         [c, nil]
165:       end
166:     end

Creates a unique table alias that hasn‘t already been used in the dataset. table_alias can be any type of object accepted by alias_symbol. The symbol returned will be the implicit alias in the argument, possibly appended with "_N" if the implicit alias has already been used, where N is an integer starting at 0 and increasing until an unused one is found.

You can provide a second addition array argument containing symbols that should not be considered valid table aliases. The current aliases for the FROM and JOIN tables are automatically included in this array.

  DB[:table].unused_table_alias(:t)
  # => :t

  DB[:table].unused_table_alias(:table)
  # => :table_0

  DB[:table, :table_0].unused_table_alias(:table)
  # => :table_1

  DB[:table, :table_0].unused_table_alias(:table, [:table_1, :table_2])
  # => :table_3

[Source]

     # File lib/sequel/dataset/misc.rb, line 190
190:     def unused_table_alias(table_alias, used_aliases = [])
191:       table_alias = alias_symbol(table_alias)
192:       used_aliases += opts[:from].map{|t| alias_symbol(t)} if opts[:from]
193:       used_aliases += opts[:join].map{|j| j.table_alias ? alias_alias_symbol(j.table_alias) : alias_symbol(j.table)} if opts[:join]
194:       if used_aliases.include?(table_alias)
195:         i = 0
196:         loop do
197:           ta = "#{table_alias}_#{i}""#{table_alias}_#{i}"
198:           return ta unless used_aliases.include?(ta)
199:           i += 1 
200:         end
201:       else
202:         table_alias
203:       end
204:     end

8 - Methods related to prepared statements or bound variables

On some adapters, these use native prepared statements and bound variables, on others support is emulated. For details, see the "Prepared Statements/Bound Variables" guide.

Classes and Modules

Module Sequel::Dataset::UnnumberedArgumentMapper

Constants

PREPARED_ARG_PLACEHOLDER = LiteralString.new('?').freeze

Public Instance methods

Set the bind variables to use for the call. If bind variables have already been set for this dataset, they are updated with the contents of bind_vars.

  DB[:table].filter(:id=>:$id).bind(:id=>1).call(:first)
  # SELECT * FROM table WHERE id = ? LIMIT 1 -- (1)
  # => {:id=>1}

[Source]

     # File lib/sequel/dataset/prepared_statements.rb, line 217
217:     def bind(bind_vars={})
218:       clone(:bind_vars=>@opts[:bind_vars] ? @opts[:bind_vars].merge(bind_vars) : bind_vars)
219:     end

For the given type (:select, :first, :insert, :insert_select, :update, or :delete), run the sql with the bind variables specified in the hash. values is a hash passed to insert or update (if one of those types is used), which may contain placeholders.

  DB[:table].filter(:id=>:$id).call(:first, :id=>1)
  # SELECT * FROM table WHERE id = ? LIMIT 1 -- (1)
  # => {:id=>1}

[Source]

     # File lib/sequel/dataset/prepared_statements.rb, line 228
228:     def call(type, bind_variables={}, *values, &block)
229:       prepare(type, nil, *values).call(bind_variables, &block)
230:     end

Prepare an SQL statement for later execution. Takes a type similar to call, and the name symbol of the prepared statement. While name defaults to nil, it should always be provided as a symbol for the name of the prepared statement, as some databases require that prepared statements have names.

This returns a clone of the dataset extended with PreparedStatementMethods, which you can call with the hash of bind variables to use. The prepared statement is also stored in the associated database, where it can be called by name. The following usage is identical:

  ps = DB[:table].filter(:name=>:$name).prepare(:first, :select_by_name)

  ps.call(:name=>'Blah')
  # SELECT * FROM table WHERE name = ? -- ('Blah')
  # => {:id=>1, :name=>'Blah'}

  DB.call(:select_by_name, :name=>'Blah') # Same thing

[Source]

     # File lib/sequel/dataset/prepared_statements.rb, line 250
250:     def prepare(type, name=nil, *values)
251:       ps = to_prepared_statement(type, values)
252:       db.set_prepared_statement(name, ps) if name
253:       ps
254:     end

Protected Instance methods

Return a cloned copy of the current dataset extended with PreparedStatementMethods, setting the type and modify values.

[Source]

     # File lib/sequel/dataset/prepared_statements.rb, line 260
260:     def to_prepared_statement(type, values=nil)
261:       ps = bind
262:       ps.extend(PreparedStatementMethods)
263:       ps.orig_dataset = self
264:       ps.prepared_type = type
265:       ps.prepared_modify_values = values
266:       ps
267:     end

1 - Methods that return modified datasets

These methods all return modified copies of the receiver.

Constants

COLUMN_CHANGE_OPTS = [:select, :sql, :from, :join].freeze   The dataset options that require the removal of cached columns if changed.
NON_SQL_OPTIONS = [:server, :defaults, :overrides, :graph, :eager_graph, :graph_aliases]   Which options don‘t affect the SQL generation. Used by simple_select_all? to determine if this is a simple SELECT * FROM table.
CONDITIONED_JOIN_TYPES = [:inner, :full_outer, :right_outer, :left_outer, :full, :right, :left]   These symbols have _join methods created (e.g. inner_join) that call join_table with the symbol, passing along the arguments and block from the method call.
UNCONDITIONED_JOIN_TYPES = [:natural, :natural_left, :natural_right, :natural_full, :cross]   These symbols have _join methods created (e.g. natural_join) that call join_table with the symbol. They only accept a single table argument which is passed to join_table, and they raise an error if called with a block.
JOIN_METHODS = (CONDITIONED_JOIN_TYPES + UNCONDITIONED_JOIN_TYPES).map{|x| "#{x}_join".to_sym} + [:join, :join_table]   All methods that return modified datasets with a joined table added.
QUERY_METHODS = (<<-METHS).split.map{|x| x.to_sym} + JOIN_METHODS add_graph_aliases and distinct except exclude exclude_having exclude_where filter for_update from from_self graph grep group group_and_count group_by having intersect invert limit lock_style naked or order order_append order_by order_more order_prepend paginate qualify query reverse reverse_order select select_all select_append select_group select_more server set_defaults set_graph_aliases set_overrides unfiltered ungraphed ungrouped union unlimited unordered where with with_recursive with_sql METHS ).split.map{|x| x.to_sym} + JOIN_METHODS   Methods that return modified datasets

Public Instance methods

Adds an further filter to an existing filter using AND. If no filter exists an error is raised. This method is identical to filter except it expects an existing filter.

  DB[:table].filter(:a).and(:b) # SELECT * FROM table WHERE a AND b

[Source]

    # File lib/sequel/dataset/query.rb, line 46
46:     def and(*cond, &block)
47:       raise(InvalidOperation, "No existing filter found.") unless @opts[:having] || @opts[:where]
48:       filter(*cond, &block)
49:     end

Returns a new clone of the dataset with with the given options merged. If the options changed include options in COLUMN_CHANGE_OPTS, the cached columns are deleted. This method should generally not be called directly by user code.

[Source]

    # File lib/sequel/dataset/query.rb, line 55
55:     def clone(opts = {})
56:       c = super()
57:       c.opts = @opts.merge(opts)
58:       c.instance_variable_set(:@columns, nil) if opts.keys.any?{|o| COLUMN_CHANGE_OPTS.include?(o)}
59:       c
60:     end

Returns a copy of the dataset with the SQL DISTINCT clause. The DISTINCT clause is used to remove duplicate rows from the output. If arguments are provided, uses a DISTINCT ON clause, in which case it will only be distinct on those columns, instead of all returned columns. Raises an error if arguments are given and DISTINCT ON is not supported.

 DB[:items].distinct # SQL: SELECT DISTINCT * FROM items
 DB[:items].order(:id).distinct(:id) # SQL: SELECT DISTINCT ON (id) * FROM items ORDER BY id

[Source]

    # File lib/sequel/dataset/query.rb, line 71
71:     def distinct(*args)
72:       raise(InvalidOperation, "DISTINCT ON not supported") if !args.empty? && !supports_distinct_on?
73:       clone(:distinct => args)
74:     end

Adds an EXCEPT clause using a second dataset object. An EXCEPT compound dataset returns all rows in the current dataset that are not in the given dataset. Raises an InvalidOperation if the operation is not supported. Options:

:alias :Use the given value as the from_self alias
:all :Set to true to use EXCEPT ALL instead of EXCEPT, so duplicate rows can occur
:from_self :Set to false to not wrap the returned dataset in a from_self, use with care.
  DB[:items].except(DB[:other_items])
  # SELECT * FROM (SELECT * FROM items EXCEPT SELECT * FROM other_items) AS t1

  DB[:items].except(DB[:other_items], :all=>true, :from_self=>false)
  # SELECT * FROM items EXCEPT ALL SELECT * FROM other_items

  DB[:items].except(DB[:other_items], :alias=>:i)
  # SELECT * FROM (SELECT * FROM items EXCEPT SELECT * FROM other_items) AS i

[Source]

    # File lib/sequel/dataset/query.rb, line 93
93:     def except(dataset, opts={})
94:       opts = {:all=>opts} unless opts.is_a?(Hash)
95:       raise(InvalidOperation, "EXCEPT not supported") unless supports_intersect_except?
96:       raise(InvalidOperation, "EXCEPT ALL not supported") if opts[:all] && !supports_intersect_except_all?
97:       compound_clone(:except, dataset, opts)
98:     end

Performs the inverse of Dataset#filter. Note that if you have multiple filter conditions, this is not the same as a negation of all conditions.

  DB[:items].exclude(:category => 'software')
  # SELECT * FROM items WHERE (category != 'software')

  DB[:items].exclude(:category => 'software', :id=>3)
  # SELECT * FROM items WHERE ((category != 'software') OR (id != 3))

[Source]

     # File lib/sequel/dataset/query.rb, line 108
108:     def exclude(*cond, &block)
109:       _filter_or_exclude(true, @opts[:having] ? :having : :where, *cond, &block)
110:     end

Inverts the given conditions and adds them to the HAVING clause.

  DB[:items].select_group(:name).exclude_having{count(name) < 2}
  # SELECT name FROM items GROUP BY name HAVING (count(name) >= 2)

[Source]

     # File lib/sequel/dataset/query.rb, line 116
116:     def exclude_having(*cond, &block)
117:       _filter_or_exclude(true, :having, *cond, &block)
118:     end

Inverts the given conditions and adds them to the WHERE clause.

  DB[:items].select_group(:name).exclude_where(:category => 'software')
  # SELECT * FROM items WHERE (category != 'software')

  DB[:items].select_group(:name).
    exclude_having{count(name) < 2}.
    exclude_where(:category => 'software')
  # SELECT name FROM items WHERE (category != 'software')
  # GROUP BY name HAVING (count(name) >= 2)

[Source]

     # File lib/sequel/dataset/query.rb, line 130
130:     def exclude_where(*cond, &block)
131:       _filter_or_exclude(true, :where, *cond, &block)
132:     end

Returns a copy of the dataset with the given conditions imposed upon it. If the query already has a HAVING clause, then the conditions are imposed in the HAVING clause. If not, then they are imposed in the WHERE clause.

filter accepts the following argument types:

  • Hash - list of equality/inclusion expressions
  • Array - depends:
    • If first member is a string, assumes the rest of the arguments are parameters and interpolates them into the string.
    • If all members are arrays of length two, treats the same way as a hash, except it allows for duplicate keys to be specified.
    • Otherwise, treats each argument as a separate condition.
  • String - taken literally
  • Symbol - taken as a boolean column argument (e.g. WHERE active)
  • Sequel::SQL::BooleanExpression - an existing condition expression, probably created using the Sequel expression filter DSL.

filter also takes a block, which should return one of the above argument types, and is treated the same way. This block yields a virtual row object, which is easy to use to create identifiers and functions. For more details on the virtual row support, see the "Virtual Rows" guide

If both a block and regular argument are provided, they get ANDed together.

Examples:

  DB[:items].filter(:id => 3)
  # SELECT * FROM items WHERE (id = 3)

  DB[:items].filter('price < ?', 100)
  # SELECT * FROM items WHERE price < 100

  DB[:items].filter([[:id, [1,2,3]], [:id, 0..10]])
  # SELECT * FROM items WHERE ((id IN (1, 2, 3)) AND ((id >= 0) AND (id <= 10)))

  DB[:items].filter('price < 100')
  # SELECT * FROM items WHERE price < 100

  DB[:items].filter(:active)
  # SELECT * FROM items WHERE :active

  DB[:items].filter{price < 100}
  # SELECT * FROM items WHERE (price < 100)

Multiple filter calls can be chained for scoping:

  software = dataset.filter(:category => 'software').filter{price < 100}
  # SELECT * FROM items WHERE ((category = 'software') AND (price < 100))

See the the "Dataset Filtering" guide for more examples and details.

[Source]

     # File lib/sequel/dataset/query.rb, line 186
186:     def filter(*cond, &block)
187:       _filter(@opts[:having] ? :having : :where, *cond, &block)
188:     end

Returns a cloned dataset with a :update lock style.

  DB[:table].for_update # SELECT * FROM table FOR UPDATE

[Source]

     # File lib/sequel/dataset/query.rb, line 193
193:     def for_update
194:       lock_style(:update)
195:     end

Returns a copy of the dataset with the source changed. If no source is given, removes all tables. If multiple sources are given, it is the same as using a CROSS JOIN (cartesian product) between all tables.

  DB[:items].from # SQL: SELECT *
  DB[:items].from(:blah) # SQL: SELECT * FROM blah
  DB[:items].from(:blah, :foo) # SQL: SELECT * FROM blah, foo

[Source]

     # File lib/sequel/dataset/query.rb, line 204
204:     def from(*source)
205:       table_alias_num = 0
206:       sources = []
207:       ctes = nil
208:       source.each do |s|
209:         case s
210:         when Hash
211:           s.each{|k,v| sources << SQL::AliasedExpression.new(k,v)}
212:         when Dataset
213:           if hoist_cte?(s)
214:             ctes ||= []
215:             ctes += s.opts[:with]
216:             s = s.clone(:with=>nil)
217:           end
218:           sources << SQL::AliasedExpression.new(s, dataset_alias(table_alias_num+=1))
219:         when Symbol
220:           sch, table, aliaz = split_symbol(s)
221:           if aliaz
222:             s = sch ? SQL::QualifiedIdentifier.new(sch, table) : SQL::Identifier.new(table)
223:             sources << SQL::AliasedExpression.new(s, aliaz.to_sym)
224:           else
225:             sources << s
226:           end
227:         else
228:           sources << s
229:         end
230:       end
231:       o = {:from=>sources.empty? ? nil : sources}
232:       o[:with] = (opts[:with] || []) + ctes if ctes
233:       o[:num_dataset_sources] = table_alias_num if table_alias_num > 0
234:       clone(o)
235:     end

Returns a dataset selecting from the current dataset. Supplying the :alias option controls the alias of the result.

  ds = DB[:items].order(:name).select(:id, :name)
  # SELECT id,name FROM items ORDER BY name

  ds.from_self
  # SELECT * FROM (SELECT id, name FROM items ORDER BY name) AS t1

  ds.from_self(:alias=>:foo)
  # SELECT * FROM (SELECT id, name FROM items ORDER BY name) AS foo

[Source]

     # File lib/sequel/dataset/query.rb, line 248
248:     def from_self(opts={})
249:       fs = {}
250:       @opts.keys.each{|k| fs[k] = nil unless NON_SQL_OPTIONS.include?(k)}
251:       clone(fs).from(opts[:alias] ? as(opts[:alias]) : self)
252:     end

Match any of the columns to any of the patterns. The terms can be strings (which use LIKE) or regular expressions (which are only supported on MySQL and PostgreSQL). Note that the total number of pattern matches will be Array(columns).length * Array(terms).length, which could cause performance issues.

Options (all are boolean):

:all_columns :All columns must be matched to any of the given patterns.
:all_patterns :All patterns must match at least one of the columns.
:case_insensitive :Use a case insensitive pattern match (the default is case sensitive if the database supports it).

If both :all_columns and :all_patterns are true, all columns must match all patterns.

Examples:

  dataset.grep(:a, '%test%')
  # SELECT * FROM items WHERE (a LIKE '%test%')

  dataset.grep([:a, :b], %w'%test% foo')
  # SELECT * FROM items WHERE ((a LIKE '%test%') OR (a LIKE 'foo') OR (b LIKE '%test%') OR (b LIKE 'foo'))

  dataset.grep([:a, :b], %w'%foo% %bar%', :all_patterns=>true)
  # SELECT * FROM a WHERE (((a LIKE '%foo%') OR (b LIKE '%foo%')) AND ((a LIKE '%bar%') OR (b LIKE '%bar%')))

  dataset.grep([:a, :b], %w'%foo% %bar%', :all_columns=>true)
  # SELECT * FROM a WHERE (((a LIKE '%foo%') OR (a LIKE '%bar%')) AND ((b LIKE '%foo%') OR (b LIKE '%bar%')))

  dataset.grep([:a, :b], %w'%foo% %bar%', :all_patterns=>true, :all_columns=>true)
  # SELECT * FROM a WHERE ((a LIKE '%foo%') AND (b LIKE '%foo%') AND (a LIKE '%bar%') AND (b LIKE '%bar%'))

[Source]

     # File lib/sequel/dataset/query.rb, line 285
285:     def grep(columns, patterns, opts={})
286:       if opts[:all_patterns]
287:         conds = Array(patterns).map do |pat|
288:           SQL::BooleanExpression.new(opts[:all_columns] ? :AND : :OR, *Array(columns).map{|c| SQL::StringExpression.like(c, pat, opts)})
289:         end
290:         filter(SQL::BooleanExpression.new(opts[:all_patterns] ? :AND : :OR, *conds))
291:       else
292:         conds = Array(columns).map do |c|
293:           SQL::BooleanExpression.new(:OR, *Array(patterns).map{|pat| SQL::StringExpression.like(c, pat, opts)})
294:         end
295:         filter(SQL::BooleanExpression.new(opts[:all_columns] ? :AND : :OR, *conds))
296:       end
297:     end

Returns a copy of the dataset with the results grouped by the value of the given columns. If a block is given, it is treated as a virtual row block, similar to filter.

  DB[:items].group(:id) # SELECT * FROM items GROUP BY id
  DB[:items].group(:id, :name) # SELECT * FROM items GROUP BY id, name
  DB[:items].group{[a, sum(b)]} # SELECT * FROM items GROUP BY a, sum(b)

[Source]

     # File lib/sequel/dataset/query.rb, line 306
306:     def group(*columns, &block)
307:       virtual_row_columns(columns, block)
308:       clone(:group => (columns.compact.empty? ? nil : columns))
309:     end

Returns a dataset grouped by the given column with count by group. Column aliases may be supplied, and will be included in the select clause. If a block is given, it is treated as a virtual row block, similar to filter.

Examples:

  DB[:items].group_and_count(:name).all
  # SELECT name, count(*) AS count FROM items GROUP BY name
  # => [{:name=>'a', :count=>1}, ...]

  DB[:items].group_and_count(:first_name, :last_name).all
  # SELECT first_name, last_name, count(*) AS count FROM items GROUP BY first_name, last_name
  # => [{:first_name=>'a', :last_name=>'b', :count=>1}, ...]

  DB[:items].group_and_count(:first_name___name).all
  # SELECT first_name AS name, count(*) AS count FROM items GROUP BY first_name
  # => [{:name=>'a', :count=>1}, ...]

  DB[:items].group_and_count{substr(first_name, 1, 1).as(initial)}.all
  # SELECT substr(first_name, 1, 1) AS initial, count(*) AS count FROM items GROUP BY substr(first_name, 1, 1)
  # => [{:initial=>'a', :count=>1}, ...]

[Source]

     # File lib/sequel/dataset/query.rb, line 337
337:     def group_and_count(*columns, &block)
338:       select_group(*columns, &block).select_more(COUNT_OF_ALL_AS_COUNT)
339:     end

Alias of group

[Source]

     # File lib/sequel/dataset/query.rb, line 312
312:     def group_by(*columns, &block)
313:       group(*columns, &block)
314:     end

Adds the appropriate CUBE syntax to GROUP BY.

[Source]

     # File lib/sequel/dataset/query.rb, line 342
342:     def group_cube
343:       raise Error, "GROUP BY CUBE not supported on #{db.database_type}" unless supports_group_cube?
344:       clone(:group_options=>:cube)
345:     end

Adds the appropriate ROLLUP syntax to GROUP BY.

[Source]

     # File lib/sequel/dataset/query.rb, line 348
348:     def group_rollup
349:       raise Error, "GROUP BY ROLLUP not supported on #{db.database_type}" unless supports_group_rollup?
350:       clone(:group_options=>:rollup)
351:     end

Returns a copy of the dataset with the HAVING conditions changed. See filter for argument types.

  DB[:items].group(:sum).having(:sum=>10)
  # SELECT * FROM items GROUP BY sum HAVING (sum = 10)

[Source]

     # File lib/sequel/dataset/query.rb, line 357
357:     def having(*cond, &block)
358:       _filter(:having, *cond, &block)
359:     end

Adds an INTERSECT clause using a second dataset object. An INTERSECT compound dataset returns all rows in both the current dataset and the given dataset. Raises an InvalidOperation if the operation is not supported. Options:

:alias :Use the given value as the from_self alias
:all :Set to true to use INTERSECT ALL instead of INTERSECT, so duplicate rows can occur
:from_self :Set to false to not wrap the returned dataset in a from_self, use with care.
  DB[:items].intersect(DB[:other_items])
  # SELECT * FROM (SELECT * FROM items INTERSECT SELECT * FROM other_items) AS t1

  DB[:items].intersect(DB[:other_items], :all=>true, :from_self=>false)
  # SELECT * FROM items INTERSECT ALL SELECT * FROM other_items

  DB[:items].intersect(DB[:other_items], :alias=>:i)
  # SELECT * FROM (SELECT * FROM items INTERSECT SELECT * FROM other_items) AS i

[Source]

     # File lib/sequel/dataset/query.rb, line 378
378:     def intersect(dataset, opts={})
379:       opts = {:all=>opts} unless opts.is_a?(Hash)
380:       raise(InvalidOperation, "INTERSECT not supported") unless supports_intersect_except?
381:       raise(InvalidOperation, "INTERSECT ALL not supported") if opts[:all] && !supports_intersect_except_all?
382:       compound_clone(:intersect, dataset, opts)
383:     end

Inverts the current filter.

  DB[:items].filter(:category => 'software').invert
  # SELECT * FROM items WHERE (category != 'software')

  DB[:items].filter(:category => 'software', :id=>3).invert
  # SELECT * FROM items WHERE ((category != 'software') OR (id != 3))

[Source]

     # File lib/sequel/dataset/query.rb, line 392
392:     def invert
393:       having, where = @opts[:having], @opts[:where]
394:       raise(Error, "No current filter") unless having || where
395:       o = {}
396:       o[:having] = SQL::BooleanExpression.invert(having) if having
397:       o[:where] = SQL::BooleanExpression.invert(where) if where
398:       clone(o)
399:     end

Alias of inner_join

[Source]

     # File lib/sequel/dataset/query.rb, line 402
402:     def join(*args, &block)
403:       inner_join(*args, &block)
404:     end

Returns a joined dataset. Not usually called directly, users should use the appropriate join method (e.g. join, left_join, natural_join, cross_join) which fills in the type argument.

Takes the following arguments:

  • type - The type of join to do (e.g. :inner)
  • table - Depends on type:
    • Dataset - a subselect is performed with an alias of tN for some value of N
    • Model (or anything responding to :table_name) - table.table_name
    • String, Symbol: table
  • expr - specifies conditions, depends on type:
    • Hash, Array of two element arrays - Assumes key (1st arg) is column of joined table (unless already qualified), and value (2nd arg) is column of the last joined or primary table (or the :implicit_qualifier option). To specify multiple conditions on a single joined table column, you must use an array. Uses a JOIN with an ON clause.
    • Array - If all members of the array are symbols, considers them as columns and uses a JOIN with a USING clause. Most databases will remove duplicate columns from the result set if this is used.
    • nil - If a block is not given, doesn‘t use ON or USING, so the JOIN should be a NATURAL or CROSS join. If a block is given, uses an ON clause based on the block, see below.
    • Everything else - pretty much the same as a using the argument in a call to filter, so strings are considered literal, symbols specify boolean columns, and Sequel expressions can be used. Uses a JOIN with an ON clause.
  • options - a hash of options, with any of the following keys:
    • :table_alias - the name of the table‘s alias when joining, necessary for joining to the same table more than once. No alias is used by default.
    • :implicit_qualifier - The name to use for qualifying implicit conditions. By default, the last joined or primary table is used.
  • block - The block argument should only be given if a JOIN with an ON clause is used, in which case it yields the table alias/name for the table currently being joined, the table alias/name for the last joined (or first table), and an array of previous SQL::JoinClause. Unlike filter, this block is not treated as a virtual row block.

Examples:

  DB[:a].join_table(:cross, :b)
  # SELECT * FROM a CROSS JOIN b

  DB[:a].join_table(:inner, DB[:b], :c=>d)
  # SELECT * FROM a INNER JOIN (SELECT * FROM b) AS t1 ON (t1.c = a.d)

  DB[:a].join_table(:left, :b___c, [:d])
  # SELECT * FROM a LEFT JOIN b AS c USING (d)

  DB[:a].natural_join(:b).join_table(:inner, :c) do |ta, jta, js|
    (:d.qualify(ta) > :e.qualify(jta)) & {:f.qualify(ta)=>DB.from(js.first.table).select(:g)}
  end
  # SELECT * FROM a NATURAL JOIN b INNER JOIN c
  #   ON ((c.d > b.e) AND (c.f IN (SELECT g FROM b)))

[Source]

     # File lib/sequel/dataset/query.rb, line 457
457:     def join_table(type, table, expr=nil, options={}, &block)
458:       if hoist_cte?(table)
459:         s, ds = hoist_cte(table)
460:         return s.join_table(type, ds, expr, options, &block)
461:       end
462: 
463:       using_join = expr.is_a?(Array) && !expr.empty? && expr.all?{|x| x.is_a?(Symbol)}
464:       if using_join && !supports_join_using?
465:         h = {}
466:         expr.each{|s| h[s] = s}
467:         return join_table(type, table, h, options)
468:       end
469: 
470:       case options
471:       when Hash
472:         table_alias = options[:table_alias]
473:         last_alias = options[:implicit_qualifier]
474:       when Symbol, String, SQL::Identifier
475:         table_alias = options
476:         last_alias = nil 
477:       else
478:         raise Error, "invalid options format for join_table: #{options.inspect}"
479:       end
480: 
481:       if Dataset === table
482:         if table_alias.nil?
483:           table_alias_num = (@opts[:num_dataset_sources] || 0) + 1
484:           table_alias = dataset_alias(table_alias_num)
485:         end
486:         table_name = table_alias
487:       else
488:         table = table.table_name if table.respond_to?(:table_name)
489:         table, implicit_table_alias = split_alias(table)
490:         table_alias ||= implicit_table_alias
491:         table_name = table_alias || table
492:       end
493: 
494:       join = if expr.nil? and !block
495:         SQL::JoinClause.new(type, table, table_alias)
496:       elsif using_join
497:         raise(Sequel::Error, "can't use a block if providing an array of symbols as expr") if block
498:         SQL::JoinUsingClause.new(expr, type, table, table_alias)
499:       else
500:         last_alias ||= @opts[:last_joined_table] || first_source_alias
501:         if Sequel.condition_specifier?(expr)
502:           expr = expr.collect do |k, v|
503:             k = qualified_column_name(k, table_name) if k.is_a?(Symbol)
504:             v = qualified_column_name(v, last_alias) if v.is_a?(Symbol)
505:             [k,v]
506:           end
507:           expr = SQL::BooleanExpression.from_value_pairs(expr)
508:         end
509:         if block
510:           expr2 = yield(table_name, last_alias, @opts[:join] || [])
511:           expr = expr ? SQL::BooleanExpression.new(:AND, expr, expr2) : expr2
512:         end
513:         SQL::JoinOnClause.new(expr, type, table, table_alias)
514:       end
515: 
516:       opts = {:join => (@opts[:join] || []) + [join], :last_joined_table => table_name}
517:       opts[:num_dataset_sources] = table_alias_num if table_alias_num
518:       clone(opts)
519:     end

If given an integer, the dataset will contain only the first l results. If given a range, it will contain only those at offsets within that range. If a second argument is given, it is used as an offset. To use an offset without a limit, pass nil as the first argument.

  DB[:items].limit(10) # SELECT * FROM items LIMIT 10
  DB[:items].limit(10, 20) # SELECT * FROM items LIMIT 10 OFFSET 20
  DB[:items].limit(10...20) # SELECT * FROM items LIMIT 10 OFFSET 10
  DB[:items].limit(10..20) # SELECT * FROM items LIMIT 11 OFFSET 10
  DB[:items].limit(nil, 20) # SELECT * FROM items OFFSET 20

[Source]

     # File lib/sequel/dataset/query.rb, line 538
538:     def limit(l, o = nil)
539:       return from_self.limit(l, o) if @opts[:sql]
540: 
541:       if Range === l
542:         o = l.first
543:         l = l.last - l.first + (l.exclude_end? ? 0 : 1)
544:       end
545:       l = l.to_i if l.is_a?(String) && !l.is_a?(LiteralString)
546:       if l.is_a?(Integer)
547:         raise(Error, 'Limits must be greater than or equal to 1') unless l >= 1
548:       end
549:       opts = {:limit => l}
550:       if o
551:         o = o.to_i if o.is_a?(String) && !o.is_a?(LiteralString)
552:         if o.is_a?(Integer)
553:           raise(Error, 'Offsets must be greater than or equal to 0') unless o >= 0
554:         end
555:         opts[:offset] = o
556:       end
557:       clone(opts)
558:     end

Returns a cloned dataset with the given lock style. If style is a string, it will be used directly. Otherwise, a symbol may be used for database independent locking. Currently :update is respected by most databases, and :share is supported by some.

  DB[:items].lock_style('FOR SHARE') # SELECT * FROM items FOR SHARE

[Source]

     # File lib/sequel/dataset/query.rb, line 566
566:     def lock_style(style)
567:       clone(:lock => style)
568:     end

Returns a cloned dataset without a row_proc.

  ds = DB[:items]
  ds.row_proc = proc{|r| r.invert}
  ds.all # => [{2=>:id}]
  ds.naked.all # => [{:id=>2}]

[Source]

     # File lib/sequel/dataset/query.rb, line 576
576:     def naked
577:       ds = clone
578:       ds.row_proc = nil
579:       ds
580:     end

Adds an alternate filter to an existing filter using OR. If no filter exists an Error is raised.

  DB[:items].filter(:a).or(:b) # SELECT * FROM items WHERE a OR b

[Source]

     # File lib/sequel/dataset/query.rb, line 586
586:     def or(*cond, &block)
587:       clause = (@opts[:having] ? :having : :where)
588:       raise(InvalidOperation, "No existing filter found.") unless @opts[clause]
589:       cond = cond.first if cond.size == 1
590:       clone(clause => SQL::BooleanExpression.new(:OR, @opts[clause], filter_expr(cond, &block)))
591:     end

Returns a copy of the dataset with the order changed. If the dataset has an existing order, it is ignored and overwritten with this order. If a nil is given the returned dataset has no order. This can accept multiple arguments of varying kinds, such as SQL functions. If a block is given, it is treated as a virtual row block, similar to filter.

  DB[:items].order(:name) # SELECT * FROM items ORDER BY name
  DB[:items].order(:a, :b) # SELECT * FROM items ORDER BY a, b
  DB[:items].order('a + b'.lit) # SELECT * FROM items ORDER BY a + b
  DB[:items].order(:a + :b) # SELECT * FROM items ORDER BY (a + b)
  DB[:items].order(:name.desc) # SELECT * FROM items ORDER BY name DESC
  DB[:items].order(:name.asc(:nulls=>:last)) # SELECT * FROM items ORDER BY name ASC NULLS LAST
  DB[:items].order{sum(name).desc} # SELECT * FROM items ORDER BY sum(name) DESC
  DB[:items].order(nil) # SELECT * FROM items

[Source]

     # File lib/sequel/dataset/query.rb, line 607
607:     def order(*columns, &block)
608:       virtual_row_columns(columns, block)
609:       clone(:order => (columns.compact.empty?) ? nil : columns)
610:     end

Alias of order_more, for naming consistency with order_prepend.

[Source]

     # File lib/sequel/dataset/query.rb, line 613
613:     def order_append(*columns, &block)
614:       order_more(*columns, &block)
615:     end

Alias of order

[Source]

     # File lib/sequel/dataset/query.rb, line 618
618:     def order_by(*columns, &block)
619:       order(*columns, &block)
620:     end

Returns a copy of the dataset with the order columns added to the end of the existing order.

  DB[:items].order(:a).order(:b) # SELECT * FROM items ORDER BY b
  DB[:items].order(:a).order_more(:b) # SELECT * FROM items ORDER BY a, b

[Source]

     # File lib/sequel/dataset/query.rb, line 627
627:     def order_more(*columns, &block)
628:       columns = @opts[:order] + columns if @opts[:order]
629:       order(*columns, &block)
630:     end

Returns a copy of the dataset with the order columns added to the beginning of the existing order.

  DB[:items].order(:a).order(:b) # SELECT * FROM items ORDER BY b
  DB[:items].order(:a).order_prepend(:b) # SELECT * FROM items ORDER BY b, a

[Source]

     # File lib/sequel/dataset/query.rb, line 637
637:     def order_prepend(*columns, &block)
638:       ds = order(*columns, &block)
639:       @opts[:order] ? ds.order_more(*@opts[:order]) : ds
640:     end

Qualify to the given table, or first source if no table is given.

  DB[:items].filter(:id=>1).qualify
  # SELECT items.* FROM items WHERE (items.id = 1)

  DB[:items].filter(:id=>1).qualify(:i)
  # SELECT i.* FROM items WHERE (i.id = 1)

[Source]

     # File lib/sequel/dataset/query.rb, line 649
649:     def qualify(table=first_source)
650:       qualify_to(table)
651:     end

Return a copy of the dataset with unqualified identifiers in the SELECT, WHERE, GROUP, HAVING, and ORDER clauses qualified by the given table. If no columns are currently selected, select all columns of the given table.

  DB[:items].filter(:id=>1).qualify_to(:i)
  # SELECT i.* FROM items WHERE (i.id = 1)

[Source]

     # File lib/sequel/dataset/query.rb, line 660
660:     def qualify_to(table)
661:       o = @opts
662:       return clone if o[:sql]
663:       h = {}
664:       (o.keys & QUALIFY_KEYS).each do |k|
665:         h[k] = qualified_expression(o[k], table)
666:       end
667:       h[:select] = [SQL::ColumnAll.new(table)] if !o[:select] || o[:select].empty?
668:       clone(h)
669:     end

Qualify the dataset to its current first source. This is useful if you have unqualified identifiers in the query that all refer to the first source, and you want to join to another table which has columns with the same name as columns in the current dataset. See qualify_to.

  DB[:items].filter(:id=>1).qualify_to_first_source
  # SELECT items.* FROM items WHERE (items.id = 1)

[Source]

     # File lib/sequel/dataset/query.rb, line 679
679:     def qualify_to_first_source
680:       qualify_to(first_source)
681:     end

Modify the RETURNING clause, only supported on a few databases. If returning is used, instead of insert returning the autogenerated primary key or update/delete returning the number of modified rows, results are returned using fetch_rows.

  DB[:items].returning # RETURNING *
  DB[:items].returning(nil) # RETURNING NULL
  DB[:items].returning(:id, :name) # RETURNING id, name

[Source]

     # File lib/sequel/dataset/query.rb, line 691
691:     def returning(*values)
692:       clone(:returning=>values)
693:     end

Returns a copy of the dataset with the order reversed. If no order is given, the existing order is inverted.

  DB[:items].reverse(:id) # SELECT * FROM items ORDER BY id DESC
  DB[:items].order(:id).reverse # SELECT * FROM items ORDER BY id DESC
  DB[:items].order(:id).reverse(:name.asc) # SELECT * FROM items ORDER BY name ASC

[Source]

     # File lib/sequel/dataset/query.rb, line 701
701:     def reverse(*order)
702:       order(*invert_order(order.empty? ? @opts[:order] : order))
703:     end

Alias of reverse

[Source]

     # File lib/sequel/dataset/query.rb, line 706
706:     def reverse_order(*order)
707:       reverse(*order)
708:     end

Returns a copy of the dataset with the columns selected changed to the given columns. This also takes a virtual row block, similar to filter.

  DB[:items].select(:a) # SELECT a FROM items
  DB[:items].select(:a, :b) # SELECT a, b FROM items
  DB[:items].select{[a, sum(b)]} # SELECT a, sum(b) FROM items

[Source]

     # File lib/sequel/dataset/query.rb, line 717
717:     def select(*columns, &block)
718:       virtual_row_columns(columns, block)
719:       m = []
720:       columns.each do |i|
721:         i.is_a?(Hash) ? m.concat(i.map{|k, v| SQL::AliasedExpression.new(k,v)}) : m << i
722:       end
723:       clone(:select => m)
724:     end

Returns a copy of the dataset selecting the wildcard if no arguments are given. If arguments are given, treat them as tables and select all columns (using the wildcard) from each table.

  DB[:items].select(:a).select_all # SELECT * FROM items
  DB[:items].select_all(:items) # SELECT items.* FROM items
  DB[:items].select_all(:items, :foo) # SELECT items.*, foo.* FROM items

[Source]

     # File lib/sequel/dataset/query.rb, line 733
733:     def select_all(*tables)
734:       if tables.empty?
735:         clone(:select => nil)
736:       else
737:         select(*tables.map{|t| i, a = split_alias(t); a || i}.map{|t| SQL::ColumnAll.new(t)})
738:       end
739:     end

Returns a copy of the dataset with the given columns added to the existing selected columns. If no columns are currently selected, it will select the columns given in addition to *.

  DB[:items].select(:a).select(:b) # SELECT b FROM items
  DB[:items].select(:a).select_append(:b) # SELECT a, b FROM items
  DB[:items].select_append(:b) # SELECT *, b FROM items

[Source]

     # File lib/sequel/dataset/query.rb, line 748
748:     def select_append(*columns, &block)
749:       cur_sel = @opts[:select]
750:       if !cur_sel || cur_sel.empty?
751:         unless supports_select_all_and_column?
752:           return select_all(*(Array(@opts[:from]) + Array(@opts[:join]))).select_more(*columns, &block)
753:         end
754:         cur_sel = [WILDCARD]
755:       end
756:       select(*(cur_sel + columns), &block)
757:     end

Set both the select and group clauses with the given columns. Column aliases may be supplied, and will be included in the select clause. This also takes a virtual row block similar to filter.

  DB[:items].select_group(:a, :b)
  # SELECT a, b FROM items GROUP BY a, b

  DB[:items].select_group(:c___a){f(c2)}
  # SELECT c AS a, f(c2) FROM items GROUP BY c, f(c2)

[Source]

     # File lib/sequel/dataset/query.rb, line 768
768:     def select_group(*columns, &block)
769:       virtual_row_columns(columns, block)
770:       select(*columns).group(*columns.map{|c| unaliased_identifier(c)})
771:     end

Returns a copy of the dataset with the given columns added to the existing selected columns. If no columns are currently selected it will just select the columns given.

  DB[:items].select(:a).select(:b) # SELECT b FROM items
  DB[:items].select(:a).select_more(:b) # SELECT a, b FROM items
  DB[:items].select_more(:b) # SELECT b FROM items

[Source]

     # File lib/sequel/dataset/query.rb, line 780
780:     def select_more(*columns, &block)
781:       columns = @opts[:select] + columns if @opts[:select]
782:       select(*columns, &block)
783:     end

Set the server for this dataset to use. Used to pick a specific database shard to run a query against, or to override the default (where SELECT uses :read_only database and all other queries use the :default database). This method is always available but is only useful when database sharding is being used.

  DB[:items].all # Uses the :read_only or :default server
  DB[:items].delete # Uses the :default server
  DB[:items].server(:blah).delete # Uses the :blah server

[Source]

     # File lib/sequel/dataset/query.rb, line 794
794:     def server(servr)
795:       clone(:server=>servr)
796:     end

Set the default values for insert and update statements. The values hash passed to insert or update are merged into this hash, so any values in the hash passed to insert or update will override values passed to this method.

  DB[:items].set_defaults(:a=>'a', :c=>'c').insert(:a=>'d', :b=>'b')
  # INSERT INTO items (a, c, b) VALUES ('d', 'c', 'b')

[Source]

     # File lib/sequel/dataset/query.rb, line 804
804:     def set_defaults(hash)
805:       clone(:defaults=>(@opts[:defaults]||{}).merge(hash))
806:     end

Set values that override hash arguments given to insert and update statements. This hash is merged into the hash provided to insert or update, so values will override any values given in the insert/update hashes.

  DB[:items].set_overrides(:a=>'a', :c=>'c').insert(:a=>'d', :b=>'b')
  # INSERT INTO items (a, c, b) VALUES ('a', 'c', 'b')

[Source]

     # File lib/sequel/dataset/query.rb, line 814
814:     def set_overrides(hash)
815:       clone(:overrides=>hash.merge(@opts[:overrides]||{}))
816:     end

Unbind bound variables from this dataset‘s filter and return an array of two objects. The first object is a modified dataset where the filter has been replaced with one that uses bound variable placeholders. The second object is the hash of unbound variables. You can then prepare and execute (or just call) the dataset with the bound variables to get results.

  ds, bv = DB[:items].filter(:a=>1).unbind
  ds # SELECT * FROM items WHERE (a = $a)
  bv #  {:a => 1}
  ds.call(:select, bv)

[Source]

     # File lib/sequel/dataset/query.rb, line 828
828:     def unbind
829:       u = Unbinder.new
830:       ds = clone(:where=>u.transform(opts[:where]), :join=>u.transform(opts[:join]))
831:       [ds, u.binds]
832:     end

Returns a copy of the dataset with no filters (HAVING or WHERE clause) applied.

  DB[:items].group(:a).having(:a=>1).where(:b).unfiltered
  # SELECT * FROM items GROUP BY a

[Source]

     # File lib/sequel/dataset/query.rb, line 838
838:     def unfiltered
839:       clone(:where => nil, :having => nil)
840:     end

Returns a copy of the dataset with no grouping (GROUP or HAVING clause) applied.

  DB[:items].group(:a).having(:a=>1).where(:b).ungrouped
  # SELECT * FROM items WHERE b

[Source]

     # File lib/sequel/dataset/query.rb, line 846
846:     def ungrouped
847:       clone(:group => nil, :having => nil)
848:     end

Adds a UNION clause using a second dataset object. A UNION compound dataset returns all rows in either the current dataset or the given dataset. Options:

:alias :Use the given value as the from_self alias
:all :Set to true to use UNION ALL instead of UNION, so duplicate rows can occur
:from_self :Set to false to not wrap the returned dataset in a from_self, use with care.
  DB[:items].union(DB[:other_items])
  # SELECT * FROM (SELECT * FROM items UNION SELECT * FROM other_items) AS t1

  DB[:items].union(DB[:other_items], :all=>true, :from_self=>false)
  # SELECT * FROM items UNION ALL SELECT * FROM other_items

  DB[:items].union(DB[:other_items], :alias=>:i)
  # SELECT * FROM (SELECT * FROM items UNION SELECT * FROM other_items) AS i

[Source]

     # File lib/sequel/dataset/query.rb, line 866
866:     def union(dataset, opts={})
867:       opts = {:all=>opts} unless opts.is_a?(Hash)
868:       compound_clone(:union, dataset, opts)
869:     end

Returns a copy of the dataset with no limit or offset.

  DB[:items].limit(10, 20).unlimited # SELECT * FROM items

[Source]

     # File lib/sequel/dataset/query.rb, line 874
874:     def unlimited
875:       clone(:limit=>nil, :offset=>nil)
876:     end

Returns a copy of the dataset with no order.

  DB[:items].order(:a).unordered # SELECT * FROM items

[Source]

     # File lib/sequel/dataset/query.rb, line 881
881:     def unordered
882:       order(nil)
883:     end

Add a condition to the WHERE clause. See filter for argument types.

  DB[:items].group(:a).having(:a).filter(:b)
  # SELECT * FROM items GROUP BY a HAVING a AND b

  DB[:items].group(:a).having(:a).where(:b)
  # SELECT * FROM items WHERE b GROUP BY a HAVING a

[Source]

     # File lib/sequel/dataset/query.rb, line 892
892:     def where(*cond, &block)
893:       _filter(:where, *cond, &block)
894:     end

Add a common table expression (CTE) with the given name and a dataset that defines the CTE. A common table expression acts as an inline view for the query. Options:

:args :Specify the arguments/columns for the CTE, should be an array of symbols.
:recursive :Specify that this is a recursive CTE
  DB[:items].with(:items, DB[:syx].filter(:name.like('A%')))
  # WITH items AS (SELECT * FROM syx WHERE (name LIKE 'A%')) SELECT * FROM items

[Source]

     # File lib/sequel/dataset/query.rb, line 904
904:     def with(name, dataset, opts={})
905:       raise(Error, 'This datatset does not support common table expressions') unless supports_cte?
906:       if hoist_cte?(dataset)
907:         s, ds = hoist_cte(dataset)
908:         s.with(name, ds, opts)
909:       else
910:         clone(:with=>(@opts[:with]||[]) + [opts.merge(:name=>name, :dataset=>dataset)])
911:       end
912:     end

Add a recursive common table expression (CTE) with the given name, a dataset that defines the nonrecursive part of the CTE, and a dataset that defines the recursive part of the CTE. Options:

:args :Specify the arguments/columns for the CTE, should be an array of symbols.
:union_all :Set to false to use UNION instead of UNION ALL combining the nonrecursive and recursive parts.
  DB[:t].select(:i___id, :pi___parent_id).
   with_recursive(:t,
                  DB[:i1].filter(:parent_id=>nil),
                  DB[:t].join(:t, :i=>:parent_id).select(:i1__id, :i1__parent_id),
                  :args=>[:i, :pi])
  # WITH RECURSIVE t(i, pi) AS (
  #   SELECT * FROM i1 WHERE (parent_id IS NULL)
  #   UNION ALL
  #   SELECT i1.id, i1.parent_id FROM t INNER JOIN t ON (t.i = t.parent_id)
  # )
  # SELECT i AS id, pi AS parent_id FROM t

[Source]

     # File lib/sequel/dataset/query.rb, line 931
931:     def with_recursive(name, nonrecursive, recursive, opts={})
932:       raise(Error, 'This datatset does not support common table expressions') unless supports_cte?
933:       if hoist_cte?(nonrecursive)
934:         s, ds = hoist_cte(nonrecursive)
935:         s.with_recursive(name, ds, recursive, opts)
936:       elsif hoist_cte?(recursive)
937:         s, ds = hoist_cte(recursive)
938:         s.with_recursive(name, nonrecursive, ds, opts)
939:       else
940:         clone(:with=>(@opts[:with]||[]) + [opts.merge(:recursive=>true, :name=>name, :dataset=>nonrecursive.union(recursive, {:all=>opts[:union_all] != false, :from_self=>false}))])
941:       end
942:     end

Returns a copy of the dataset with the static SQL used. This is useful if you want to keep the same row_proc/graph, but change the SQL used to custom SQL.

  DB[:items].with_sql('SELECT * FROM foo') # SELECT * FROM foo

You can use placeholders in your SQL and provide arguments for those placeholders:

  DB[:items].with_sql('SELECT ? FROM foo', 1) # SELECT 1 FROM foo

You can also provide a method name and arguments to call to get the SQL:

  DB[:items].with_sql(:insert_sql, :b=>1) # INSERT INTO items (b) VALUES (1)

[Source]

     # File lib/sequel/dataset/query.rb, line 956
956:     def with_sql(sql, *args)
957:       if sql.is_a?(Symbol)
958:         sql = send(sql, *args)
959:       else
960:         sql = SQL::PlaceholderLiteralString.new(sql, args) unless args.empty?
961:       end
962:       clone(:sql=>sql)
963:     end

Protected Instance methods

Add the dataset to the list of compounds

[Source]

     # File lib/sequel/dataset/query.rb, line 968
968:     def compound_clone(type, dataset, opts)
969:       if hoist_cte?(dataset)
970:         s, ds = hoist_cte(dataset)
971:         return s.compound_clone(type, ds, opts)
972:       end
973:       ds = compound_from_self.clone(:compounds=>Array(@opts[:compounds]).map{|x| x.dup} + [[type, dataset.compound_from_self, opts[:all]]])
974:       opts[:from_self] == false ? ds : ds.from_self(opts)
975:     end

Return true if the dataset has a non-nil value for any key in opts.

[Source]

     # File lib/sequel/dataset/query.rb, line 978
978:     def options_overlap(opts)
979:       !(@opts.collect{|k,v| k unless v.nil?}.compact & opts).empty?
980:     end

Whether this dataset is a simple SELECT * FROM table.

[Source]

     # File lib/sequel/dataset/query.rb, line 983
983:     def simple_select_all?
984:       o = @opts.reject{|k,v| v.nil? || NON_SQL_OPTIONS.include?(k)}
985:       o.length == 1 && (f = o[:from]) && f.length == 1 && (f.first.is_a?(Symbol) || f.first.is_a?(SQL::AliasedExpression))
986:     end

3 - User Methods relating to SQL Creation

These are methods you can call to see what SQL will be generated by the dataset.

Public Instance methods

Returns a DELETE SQL query string. See delete.

  dataset.filter{|o| o.price >= 100}.delete_sql
  # => "DELETE FROM items WHERE (price >= 100)"

[Source]

    # File lib/sequel/dataset/sql.rb, line 12
12:     def delete_sql
13:       return static_sql(opts[:sql]) if opts[:sql]
14:       check_modification_allowed!
15:       clause_sql(:delete)
16:     end

Returns an EXISTS clause for the dataset as a LiteralString.

  DB.select(1).where(DB[:items].exists)
  # SELECT 1 WHERE (EXISTS (SELECT * FROM items))

[Source]

    # File lib/sequel/dataset/sql.rb, line 22
22:     def exists
23:       SQL::PlaceholderLiteralString.new(EXISTS, [self], true)
24:     end

Returns an INSERT SQL query string. See insert.

  DB[:items].insert_sql(:a=>1)
  # => "INSERT INTO items (a) VALUES (1)"

[Source]

    # File lib/sequel/dataset/sql.rb, line 30
30:     def insert_sql(*values)
31:       return static_sql(@opts[:sql]) if @opts[:sql]
32: 
33:       check_modification_allowed!
34: 
35:       columns = []
36: 
37:       case values.size
38:       when 0
39:         return insert_sql({})
40:       when 1
41:         case vals = values.at(0)
42:         when Hash
43:           vals = @opts[:defaults].merge(vals) if @opts[:defaults]
44:           vals = vals.merge(@opts[:overrides]) if @opts[:overrides]
45:           values = []
46:           vals.each do |k,v| 
47:             columns << k
48:             values << v
49:           end
50:         when Dataset, Array, LiteralString
51:           values = vals
52:         else
53:           if vals.respond_to?(:values) && (v = vals.values).is_a?(Hash)
54:             return insert_sql(v) 
55:           end
56:         end
57:       when 2
58:         if (v0 = values.at(0)).is_a?(Array) && ((v1 = values.at(1)).is_a?(Array) || v1.is_a?(Dataset) || v1.is_a?(LiteralString))
59:           columns, values = v0, v1
60:           raise(Error, "Different number of values and columns given to insert_sql") if values.is_a?(Array) and columns.length != values.length
61:         end
62:       end
63: 
64:       if values.is_a?(Array) && values.empty? && !insert_supports_empty_values? 
65:         columns = [columns().last]
66:         values = [DEFAULT]
67:       end
68:       clone(:columns=>columns, :values=>values)._insert_sql
69:     end

Returns a literal representation of a value to be used as part of an SQL expression.

  DB[:items].literal("abc'def\\") #=> "'abc''def\\\\'"
  DB[:items].literal(:items__id) #=> "items.id"
  DB[:items].literal([1, 2, 3]) => "(1, 2, 3)"
  DB[:items].literal(DB[:items]) => "(SELECT * FROM items)"
  DB[:items].literal(:x + 1 > :y) => "((x + 1) > y)"

If an unsupported object is given, an Error is raised.

[Source]

     # File lib/sequel/dataset/sql.rb, line 81
 81:     def literal_append(sql, v)
 82:       case v
 83:       when Symbol
 84:         literal_symbol_append(sql, v)
 85:       when String
 86:         case v
 87:         when LiteralString
 88:           sql << v
 89:         when SQL::Blob
 90:           literal_blob_append(sql, v)
 91:         else
 92:           literal_string_append(sql, v)
 93:         end
 94:       when Integer
 95:         sql << literal_integer(v)
 96:       when Hash
 97:         literal_hash_append(sql, v)
 98:       when SQL::Expression
 99:         literal_expression_append(sql, v)
100:       when Float
101:         sql << literal_float(v)
102:       when BigDecimal
103:         sql << literal_big_decimal(v)
104:       when NilClass
105:         sql << literal_nil
106:       when TrueClass
107:         sql << literal_true
108:       when FalseClass
109:         sql << literal_false
110:       when Array
111:         literal_array_append(sql, v)
112:       when Time
113:         sql << (v.is_a?(SQLTime) ? literal_sqltime(v) : literal_time(v))
114:       when DateTime
115:         sql << literal_datetime(v)
116:       when Date
117:         sql << literal_date(v)
118:       when Dataset
119:         literal_dataset_append(sql, v)
120:       else
121:         literal_other_append(sql, v)
122:       end
123:     end

Returns an array of insert statements for inserting multiple records. This method is used by multi_insert to format insert statements and expects a keys array and and an array of value arrays.

This method should be overridden by descendants if the support inserting multiple records in a single SQL statement.

[Source]

     # File lib/sequel/dataset/sql.rb, line 131
131:     def multi_insert_sql(columns, values)
132:       values.map{|r| insert_sql(columns, r)}
133:     end

Returns a SELECT SQL query string.

  dataset.select_sql # => "SELECT * FROM items"

[Source]

     # File lib/sequel/dataset/sql.rb, line 138
138:     def select_sql
139:       return static_sql(@opts[:sql]) if @opts[:sql]
140:       clause_sql(:select)
141:     end

Same as select_sql, not aliased directly to make subclassing simpler.

[Source]

     # File lib/sequel/dataset/sql.rb, line 144
144:     def sql
145:       select_sql
146:     end

Returns a TRUNCATE SQL query string. See truncate

  DB[:items].truncate_sql # => 'TRUNCATE items'

[Source]

     # File lib/sequel/dataset/sql.rb, line 151
151:     def truncate_sql
152:       if opts[:sql]
153:         static_sql(opts[:sql])
154:       else
155:         check_truncation_allowed!
156:         raise(InvalidOperation, "Can't truncate filtered datasets") if opts[:where] || opts[:having]
157:         _truncate_sql(source_list(opts[:from]))
158:       end
159:     end

Formats an UPDATE statement using the given values. See update.

  DB[:items].update_sql(:price => 100, :category => 'software')
  # => "UPDATE items SET price = 100, category = 'software'

Raises an Error if the dataset is grouped or includes more than one table.

[Source]

     # File lib/sequel/dataset/sql.rb, line 168
168:     def update_sql(values = {})
169:       return static_sql(opts[:sql]) if opts[:sql]
170:       check_modification_allowed!
171:       clone(:values=>values)._update_sql
172:     end

9 - Internal Methods relating to SQL Creation

These methods, while public, are not designed to be used directly by the end user.

Constants

WILDCARD = LiteralString.new('*').freeze
ALL = ' ALL'.freeze
AND_SEPARATOR = " AND ".freeze
APOS = "'".freeze
APOS_RE = /'/.freeze
ARRAY_EMPTY = '(NULL)'.freeze
AS = ' AS '.freeze
ASC = ' ASC'.freeze
BOOL_FALSE = "'f'".freeze
BOOL_TRUE = "'t'".freeze
BRACKET_CLOSE = ']'.freeze
BRACKET_OPEN = '['.freeze
CASE_ELSE = " ELSE ".freeze
CASE_END = " END)".freeze
CASE_OPEN = '(CASE'.freeze
CASE_THEN = " THEN ".freeze
CASE_WHEN = " WHEN ".freeze
CAST_OPEN = 'CAST('.freeze
COLUMN_REF_RE1 = /\A((?:(?!__).)+)__((?:(?!___).)+)___(.+)\z/.freeze
COLUMN_REF_RE2 = /\A((?:(?!___).)+)___(.+)\z/.freeze
COLUMN_REF_RE3 = /\A((?:(?!__).)+)__(.+)\z/.freeze
COMMA = ', '.freeze
COMMA_SEPARATOR = COMMA
CONDITION_FALSE = '(1 = 0)'.freeze
CONDITION_TRUE = '(1 = 1)'.freeze
COUNT_FROM_SELF_OPTS = [:distinct, :group, :sql, :limit, :compounds]
COUNT_OF_ALL_AS_COUNT = SQL::Function.new(:count, WILDCARD).as(:count)
DATASET_ALIAS_BASE_NAME = 't'.freeze
DEFAULT = LiteralString.new('DEFAULT').freeze
DEFAULT_VALUES = " DEFAULT VALUES".freeze
DELETE = 'DELETE'.freeze
DELETE_CLAUSE_METHODS = clause_methods(:delete, %w'delete from where')
DESC = ' DESC'.freeze
DISTINCT = " DISTINCT".freeze
DOT = '.'.freeze
DOUBLE_APOS = "''".freeze
DOUBLE_QUOTE = '""'.freeze
EQUAL = ' = '.freeze
EXTRACT = 'extract('.freeze
EXISTS = ['EXISTS '.freeze].freeze
FOR_UPDATE = ' FOR UPDATE'.freeze
FORMAT_DATE = "'%Y-%m-%d'".freeze
FORMAT_DATE_STANDARD = "DATE '%Y-%m-%d'".freeze
FORMAT_OFFSET = "%+03i%02i".freeze
FORMAT_TIMESTAMP_RE = /%[Nz]/.freeze
FORMAT_TIMESTAMP_USEC = ".%06d".freeze
FORMAT_USEC = '%N'.freeze
FRAME_ALL = "ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING".freeze
FRAME_ROWS = "ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW".freeze
FROM = ' FROM '.freeze
FUNCTION_EMPTY = '()'.freeze
GROUP_BY = " GROUP BY ".freeze
HAVING = " HAVING ".freeze
INSERT = "INSERT".freeze
INSERT_CLAUSE_METHODS = clause_methods(:insert, %w'insert into columns values')
INTO = " INTO ".freeze
IS_LITERALS = {nil=>'NULL'.freeze, true=>'TRUE'.freeze, false=>'FALSE'.freeze}.freeze
IS_OPERATORS = ::Sequel::SQL::ComplexExpression::IS_OPERATORS
LIMIT = " LIMIT ".freeze
N_ARITY_OPERATORS = ::Sequel::SQL::ComplexExpression::N_ARITY_OPERATORS
NOT_SPACE = 'NOT '.freeze
NULL = "NULL".freeze
NULLS_FIRST = " NULLS FIRST".freeze
NULLS_LAST = " NULLS LAST".freeze
OFFSET = " OFFSET ".freeze
ON = ' ON '.freeze
ON_PAREN = " ON (".freeze
ORDER_BY = " ORDER BY ".freeze
ORDER_BY_NS = "ORDER BY ".freeze
OVER = ' OVER '.freeze
PAREN_CLOSE = ')'.freeze
PAREN_OPEN = '('.freeze
PAREN_SPACE_OPEN = ' ('.freeze
PARTITION_BY = "PARTITION BY ".freeze
QUALIFY_KEYS = [:select, :where, :having, :order, :group]
QUESTION_MARK = '?'.freeze
QUESTION_MARK_RE = /\?/.freeze
QUOTE = '"'.freeze
QUOTE_RE = /"/.freeze
RETURNING = " RETURNING ".freeze
SELECT = 'SELECT'.freeze
SELECT_CLAUSE_METHODS = clause_methods(:select, %w'with select distinct columns from join where group having compounds order limit lock')
SET = ' SET '.freeze
SPACE = ' '.freeze
SQL_WITH = "WITH ".freeze
SPACE_WITH = " WITH ".freeze
TILDE = '~'.freeze
TIMESTAMP_FORMAT = "'%Y-%m-%d %H:%M:%S%N%z'".freeze
STANDARD_TIMESTAMP_FORMAT = "TIMESTAMP #{TIMESTAMP_FORMAT}".freeze
TWO_ARITY_OPERATORS = ::Sequel::SQL::ComplexExpression::TWO_ARITY_OPERATORS
UNDERSCORE = '_'.freeze
UPDATE = 'UPDATE'.freeze
UPDATE_CLAUSE_METHODS = clause_methods(:update, %w'update table set where')
USING = ' USING ('.freeze
VALUES = " VALUES ".freeze
V190 = '1.9.0'.freeze
WHERE = " WHERE ".freeze
PUBLIC_APPEND_METHODS = (<<-END).split.map{|x| x.to_sym} literal aliased_expression_sql array_sql boolean_constant_sql case_expression_sql cast_sql column_all_sql complex_expression_sql constant_sql function_sql join_clause_sql join_on_clause_sql join_using_clause_sql negative_boolean_constant_sql ordered_expression_sql placeholder_literal_string_sql qualified_identifier_sql quote_identifier quote_schema_table quoted_identifier subscript_sql window_sql window_function_sql END ).split.map{|x| x.to_sym}
PRIVATE_APPEND_METHODS = (<<-END).split.map{|x| x.to_sym} argument_list as_sql column_list compound_dataset_sql expression_list literal_array literal_blob literal_dataset literal_expression literal_hash literal_other literal_string literal_symbol source_list subselect_sql table_ref END ).split.map{|x| x.to_sym}

Public Class methods

Given a type (e.g. select) and an array of clauses, return an array of methods to call to build the SQL string.

[Source]

     # File lib/sequel/dataset/sql.rb, line 181
181:     def self.clause_methods(type, clauses)
182:       clauses.map{|clause| "#{type}_#{clause}_sql""#{type}_#{clause}_sql"}.freeze
183:     end

[Source]

     # File lib/sequel/dataset/sql.rb, line 328
328:     def self.def_append_methods(meths)
329:       meths.each do |meth|
330:         class_eval("def \#{meth}(*args, &block)\ns = ''\n\#{meth}_append(s, *args, &block)\ns\nend\n", __FILE__, __LINE__ + 1)
331:       end
332:     end

Public Instance methods

SQL fragment for AliasedExpression

[Source]

     # File lib/sequel/dataset/sql.rb, line 344
344:     def aliased_expression_sql_append(sql, ae)
345:       literal_append(sql, ae.expression)
346:       as_sql_append(sql, ae.aliaz)
347:     end

SQL fragment for Array

[Source]

     # File lib/sequel/dataset/sql.rb, line 350
350:     def array_sql_append(sql, a)
351:       if a.empty?
352:         sql << ARRAY_EMPTY
353:       else
354:         sql << PAREN_OPEN
355:         expression_list_append(sql, a)
356:         sql << PAREN_CLOSE
357:       end
358:     end

SQL fragment for BooleanConstants

[Source]

     # File lib/sequel/dataset/sql.rb, line 361
361:     def boolean_constant_sql_append(sql, constant)
362:       if (constant == true || constant == false) && !supports_where_true?
363:         sql << (constant == true ? CONDITION_TRUE : CONDITION_FALSE)
364:       else
365:         literal_append(sql, constant)
366:       end
367:     end

SQL fragment for CaseExpression

[Source]

     # File lib/sequel/dataset/sql.rb, line 370
370:     def case_expression_sql_append(sql, ce)
371:       sql << CASE_OPEN
372:       if ce.expression?
373:         sql << SPACE
374:         literal_append(sql, ce.expression)
375:       end
376:       w = CASE_WHEN
377:       t = CASE_THEN
378:       ce.conditions.each do |c,r|
379:         sql << w
380:         literal_append(sql, c)
381:         sql << t
382:         literal_append(sql, r)
383:       end
384:       sql << CASE_ELSE
385:       literal_append(sql, ce.default)
386:       sql << CASE_END
387:     end

SQL fragment for the SQL CAST expression

[Source]

     # File lib/sequel/dataset/sql.rb, line 390
390:     def cast_sql_append(sql, expr, type)
391:       sql << CAST_OPEN
392:       literal_append(sql, expr)
393:       sql << AS << db.cast_type_literal(type).to_s
394:       sql << PAREN_CLOSE
395:     end

SQL fragment for specifying all columns in a given table

[Source]

     # File lib/sequel/dataset/sql.rb, line 398
398:     def column_all_sql_append(sql, ca)
399:       qualified_identifier_sql_append(sql, ca.table, WILDCARD)
400:     end

SQL fragment for the complex expression.

[Source]

     # File lib/sequel/dataset/sql.rb, line 403
403:     def complex_expression_sql_append(sql, op, args)
404:       case op
405:       when *IS_OPERATORS
406:         r = args.at(1)
407:         if r.nil? || supports_is_true?
408:           raise(InvalidOperation, 'Invalid argument used for IS operator') unless v = IS_LITERALS[r]
409:           sql << PAREN_OPEN
410:           literal_append(sql, args.at(0))
411:           sql << SPACE << op.to_s << SPACE
412:           sql << v << PAREN_CLOSE
413:         elsif op == :IS
414:           complex_expression_sql_append(sql, "=""=", args)
415:         else
416:           complex_expression_sql_append(sql, :OR, [SQL::BooleanExpression.new("!=""!=", *args), SQL::BooleanExpression.new(:IS, args.at(0), nil)])
417:         end
418:       when :IN, "NOT IN""NOT IN"
419:         cols = args.at(0)
420:         vals = args.at(1)
421:         col_array = true if cols.is_a?(Array)
422:         if vals.is_a?(Array)
423:           val_array = true
424:           empty_val_array = vals == []
425:         end
426:         if empty_val_array
427:           literal_append(sql, empty_array_value(op, cols))
428:         elsif col_array
429:           if !supports_multiple_column_in?
430:             if val_array
431:               expr = SQL::BooleanExpression.new(:OR, *vals.to_a.map{|vs| SQL::BooleanExpression.from_value_pairs(cols.to_a.zip(vs).map{|c, v| [c, v]})})
432:               literal_append(sql, op == :IN ? expr : ~expr)
433:             else
434:               old_vals = vals
435:               vals = vals.naked if vals.is_a?(Sequel::Dataset)
436:               vals = vals.to_a
437:               val_cols = old_vals.columns
438:               complex_expression_sql_append(sql, op, [cols, vals.map!{|x| x.values_at(*val_cols)}])
439:             end
440:           else
441:             # If the columns and values are both arrays, use array_sql instead of
442:             # literal so that if values is an array of two element arrays, it
443:             # will be treated as a value list instead of a condition specifier.
444:             sql << PAREN_OPEN
445:             literal_append(sql, cols)
446:             sql << SPACE << op.to_s << SPACE
447:             if val_array
448:               array_sql_append(sql, vals)
449:             else
450:               literal_append(sql, vals)
451:             end
452:             sql << PAREN_CLOSE
453:           end
454:         else
455:           sql << PAREN_OPEN
456:           literal_append(sql, cols)
457:           sql << SPACE << op.to_s << SPACE
458:           literal_append(sql, vals)
459:           sql << PAREN_CLOSE
460:         end
461:       when *TWO_ARITY_OPERATORS
462:         sql << PAREN_OPEN
463:         literal_append(sql, args.at(0))
464:         sql << SPACE << op.to_s << SPACE
465:         literal_append(sql, args.at(1))
466:         sql << PAREN_CLOSE
467:       when *N_ARITY_OPERATORS
468:         sql << PAREN_OPEN
469:         c = false
470:         op_str = " #{op} "
471:         args.each do |a|
472:           sql << op_str if c
473:           literal_append(sql, a)
474:           c ||= true
475:         end
476:         sql << PAREN_CLOSE
477:       when :NOT
478:         sql << NOT_SPACE
479:         literal_append(sql, args.at(0))
480:       when :NOOP
481:         literal_append(sql, args.at(0))
482:       when 'B~''B~'
483:         sql << TILDE
484:         literal_append(sql, args.at(0))
485:       when :extract
486:         sql << EXTRACT << args.at(0).to_s << FROM
487:         literal_append(sql, args.at(1))
488:         sql << PAREN_CLOSE
489:       else
490:         raise(InvalidOperation, "invalid operator #{op}")
491:       end
492:     end

SQL fragment for constants

[Source]

     # File lib/sequel/dataset/sql.rb, line 495
495:     def constant_sql_append(sql, constant)
496:       sql << constant.to_s
497:     end

SQL fragment specifying an SQL function call

[Source]

     # File lib/sequel/dataset/sql.rb, line 500
500:     def function_sql_append(sql, f)
501:       sql << f.f.to_s
502:       args = f.args
503:       if args.empty?
504:         sql << FUNCTION_EMPTY
505:       else
506:         literal_append(sql, args)
507:       end
508:     end

SQL fragment specifying a JOIN clause without ON or USING.

[Source]

     # File lib/sequel/dataset/sql.rb, line 511
511:     def join_clause_sql_append(sql, jc)
512:       table = jc.table
513:       table_alias = jc.table_alias
514:       table_alias = nil if table == table_alias
515:       sql << SPACE << join_type_sql(jc.join_type) << SPACE
516:       identifier_append(sql, table)
517:       as_sql_append(sql, table_alias) if table_alias
518:     end

SQL fragment specifying a JOIN clause with ON.

[Source]

     # File lib/sequel/dataset/sql.rb, line 521
521:     def join_on_clause_sql_append(sql, jc)
522:       join_clause_sql_append(sql, jc)
523:       sql << ON
524:       literal_append(sql, filter_expr(jc.on))
525:     end

SQL fragment specifying a JOIN clause with USING.

[Source]

     # File lib/sequel/dataset/sql.rb, line 528
528:     def join_using_clause_sql_append(sql, jc)
529:       join_clause_sql_append(sql, jc)
530:       sql << USING
531:       column_list_append(sql, jc.using)
532:       sql << PAREN_CLOSE
533:     end

SQL fragment for NegativeBooleanConstants

[Source]

     # File lib/sequel/dataset/sql.rb, line 536
536:     def negative_boolean_constant_sql_append(sql, constant)
537:       sql << NOT_SPACE
538:       boolean_constant_sql_append(sql, constant)
539:     end

SQL fragment for the ordered expression, used in the ORDER BY clause.

[Source]

     # File lib/sequel/dataset/sql.rb, line 543
543:     def ordered_expression_sql_append(sql, oe)
544:       literal_append(sql, oe.expression)
545:       sql << (oe.descending ? DESC : ASC)
546:       case oe.nulls
547:       when :first
548:         sql << NULLS_FIRST
549:       when :last
550:         sql << NULLS_LAST
551:       end
552:     end

SQL fragment for a literal string with placeholders

[Source]

     # File lib/sequel/dataset/sql.rb, line 555
555:     def placeholder_literal_string_sql_append(sql, pls)
556:       args = pls.args
557:       str = pls.str
558:       sql << PAREN_OPEN if pls.parens
559:       if args.is_a?(Hash)
560:         re = /:(#{args.keys.map{|k| Regexp.escape(k.to_s)}.join('|')})\b/
561:         loop do
562:           previous, q, str = str.partition(re)
563:           sql << previous
564:           literal_append(sql, args[($1||q[1..-1].to_s).to_sym]) unless q.empty?
565:           break if str.empty?
566:         end
567:       elsif str.is_a?(Array)
568:         len = args.length
569:         str.each_with_index do |s, i|
570:           sql << s
571:           literal_append(sql, args[i]) unless i == len
572:         end
573:       else
574:         i = -1
575:         loop do
576:           previous, q, str = str.partition(QUESTION_MARK)
577:           sql << previous
578:            literal_append(sql, args.at(i+=1)) unless q.empty?
579:           break if str.empty?
580:         end
581:       end
582:       sql << PAREN_CLOSE if pls.parens
583:     end

SQL fragment for the qualifed identifier, specifying a table and a column (or schema and table). If 3 arguments are given, the 2nd should be the table/qualifier and the third should be column/qualified. If 2 arguments are given, the 2nd should be an SQL::QualifiedIdentifier.

[Source]

     # File lib/sequel/dataset/sql.rb, line 589
589:     def qualified_identifier_sql_append(sql, table, column=(c = table.column; table = table.table; c))
590:       identifier_append(sql, table)
591:       sql << DOT
592:       identifier_append(sql, column)
593:     end

Adds quoting to identifiers (columns and tables). If identifiers are not being quoted, returns name as a string. If identifiers are being quoted quote the name with quoted_identifier.

[Source]

     # File lib/sequel/dataset/sql.rb, line 598
598:     def quote_identifier_append(sql, name)
599:       if name.is_a?(LiteralString)
600:         sql << name
601:       else
602:         name = name.value if name.is_a?(SQL::Identifier)
603:         name = input_identifier(name)
604:         if quote_identifiers?
605:           quoted_identifier_append(sql, name)
606:         else
607:           sql << name
608:         end
609:       end
610:     end

Separates the schema from the table and returns a string with them quoted (if quoting identifiers)

[Source]

     # File lib/sequel/dataset/sql.rb, line 614
614:     def quote_schema_table_append(sql, table)
615:       schema, table = schema_and_table(table)
616:       if schema
617:         quote_identifier_append(sql, schema)
618:         sql << DOT
619:       end
620:       quote_identifier_append(sql, table)
621:     end

This method quotes the given name with the SQL standard double quote. should be overridden by subclasses to provide quoting not matching the SQL standard, such as backtick (used by MySQL and SQLite).

[Source]

     # File lib/sequel/dataset/sql.rb, line 626
626:     def quoted_identifier_append(sql, name)
627:       sql << QUOTE << name.to_s.gsub(QUOTE_RE, DOUBLE_QUOTE) << QUOTE
628:     end

Split the schema information from the table

[Source]

     # File lib/sequel/dataset/sql.rb, line 631
631:     def schema_and_table(table_name)
632:       sch = db.default_schema if db
633:       case table_name
634:       when Symbol
635:         s, t, a = split_symbol(table_name)
636:         [s||sch, t]
637:       when SQL::QualifiedIdentifier
638:         [table_name.table, table_name.column]
639:       when SQL::Identifier
640:         [sch, table_name.value]
641:       when String
642:         [sch, table_name]
643:       else
644:         raise Error, 'table_name should be a Symbol, SQL::QualifiedIdentifier, SQL::Identifier, or String'
645:       end
646:     end

SQL fragment for specifying subscripts (SQL array accesses)

[Source]

     # File lib/sequel/dataset/sql.rb, line 649
649:     def subscript_sql_append(sql, s)
650:       literal_append(sql, s.f)
651:       sql << BRACKET_OPEN
652:       expression_list_append(sql, s.sub)
653:       sql << BRACKET_CLOSE
654:     end

The SQL fragment for the given window function‘s function and window.

[Source]

     # File lib/sequel/dataset/sql.rb, line 698
698:     def window_function_sql_append(sql, function, window)
699:       literal_append(sql, function)
700:       sql << OVER
701:       literal_append(sql, window)
702:     end

The SQL fragment for the given window‘s options.

[Source]

     # File lib/sequel/dataset/sql.rb, line 657
657:     def window_sql_append(sql, opts)
658:       raise(Error, 'This dataset does not support window functions') unless supports_window_functions?
659:       sql << PAREN_OPEN
660:       window, part, order, frame = opts.values_at(:window, :partition, :order, :frame)
661:       space = false
662:       space_s = SPACE
663:       if window
664:         literal_append(sql, window)
665:         space = true
666:       end
667:       if part
668:         sql << space_s if space
669:         sql << PARTITION_BY
670:         expression_list_append(sql, Array(part))
671:         space = true
672:       end
673:       if order
674:         sql << space_s if space
675:         sql << ORDER_BY_NS
676:         expression_list_append(sql, Array(order))
677:         space = true
678:       end
679:       case frame
680:         when nil
681:           # nothing
682:         when :all
683:           sql << space_s if space
684:           sql << FRAME_ALL
685:         when :rows
686:           sql << space_s if space
687:           sql << FRAME_ROWS
688:         when String
689:           sql << space_s if space
690:           sql << frame
691:         else
692:           raise Error, "invalid window frame clause, should be :all, :rows, a string, or nil"
693:       end
694:       sql << PAREN_CLOSE
695:     end

Protected Instance methods

Formats in INSERT statement using the stored columns and values.

[Source]

     # File lib/sequel/dataset/sql.rb, line 707
707:     def _insert_sql
708:       clause_sql(:insert)
709:     end

Formats an UPDATE statement using the stored values.

[Source]

     # File lib/sequel/dataset/sql.rb, line 712
712:     def _update_sql
713:       clause_sql(:update)
714:     end

Return a from_self dataset if an order or limit is specified, so it works as expected with UNION, EXCEPT, and INTERSECT clauses.

[Source]

     # File lib/sequel/dataset/sql.rb, line 718
718:     def compound_from_self
719:       (@opts[:limit] || @opts[:order]) ? from_self : self
720:     end

7 - Mutation methods

These methods modify the receiving dataset and should be used with care.

Constants

MUTATION_METHODS = QUERY_METHODS - [:paginate, :naked]   All methods that should have a ! method added that modifies the receiver.

Attributes

identifier_input_method  [W]  Set the method to call on identifiers going into the database for this dataset
identifier_output_method  [W]  Set the method to call on identifiers coming the database for this dataset
quote_identifiers  [W]  Whether to quote identifiers for this dataset
row_proc  [RW]  The row_proc for this database, should be any object that responds to call with a single hash argument and returns the object you want each to return.

Public Class methods

Setup mutation (e.g. filter!) methods. These operate the same as the non-! methods, but replace the options of the current dataset with the options of the resulting dataset.

[Source]

    # File lib/sequel/dataset/mutation.rb, line 14
14:     def self.def_mutation_method(*meths)
15:       meths.each do |meth|
16:         class_eval("def #{meth}!(*args, &block); mutation_method(:#{meth}, *args, &block) end", __FILE__, __LINE__)
17:       end
18:     end

Enable column introspection for every dataset.

[Source]

    # File lib/sequel/extensions/columns_introspection.rb, line 56
56:     def self.introspect_all_columns
57:       include ColumnsIntrospection
58:       remove_method(:columns) if instance_methods(false).map{|x| x.to_s}.include?('columns')
59:     end

Public Instance methods

columns_without_introspection()

Alias for columns

Add a mutation method to this dataset instance.

[Source]

    # File lib/sequel/dataset/mutation.rb, line 37
37:     def def_mutation_method(*meths)
38:       meths.each do |meth|
39:         instance_eval("def #{meth}!(*args, &block); mutation_method(:#{meth}, *args, &block) end", __FILE__, __LINE__)
40:       end
41:     end

Yields a paginated dataset for each page and returns the receiver. Does a count to find the total number of records for this dataset.

[Source]

    # File lib/sequel/extensions/pagination.rb, line 20
20:     def each_page(page_size)
21:       raise(Error, "You cannot paginate a dataset that already has a limit") if @opts[:limit]
22:       record_count = count
23:       total_pages = (record_count / page_size.to_f).ceil
24:       (1..total_pages).each{|page_no| yield paginate(page_no, page_size, record_count)}
25:       self
26:     end

Remove the row_proc from the current dataset.

[Source]

    # File lib/sequel/dataset/mutation.rb, line 44
44:     def naked!
45:       self.row_proc = nil
46:       self
47:     end

Return a cloned nullified dataset.

[Source]

    # File lib/sequel/extensions/null_dataset.rb, line 81
81:     def nullify
82:       clone.nullify!
83:     end

Nullify the current dataset

[Source]

    # File lib/sequel/extensions/null_dataset.rb, line 86
86:     def nullify!
87:       extend NullDataset
88:     end

Returns a paginated dataset. The returned dataset is limited to the page size at the correct offset, and extended with the Pagination module. If a record count is not provided, does a count of total number of records for this dataset.

[Source]

    # File lib/sequel/extensions/pagination.rb, line 11
11:     def paginate(page_no, page_size, record_count=nil)
12:       raise(Error, "You cannot paginate a dataset that already has a limit") if @opts[:limit]
13:       paginated = limit(page_size, (page_no - 1) * page_size)
14:       paginated.extend(Pagination)
15:       paginated.set_pagination_info(page_no, page_size, record_count || count)
16:     end

Pretty prints the records in the dataset as plain-text table.

[Source]

    # File lib/sequel/extensions/pretty_table.rb, line 10
10:     def print(*cols)
11:       ds = naked
12:       rows = ds.all
13:       Sequel::PrettyTable.print(rows, cols.empty? ? ds.columns : cols)
14:     end

Translates a query block into a dataset. Query blocks can be useful when expressing complex SELECT statements, e.g.:

  dataset = DB[:items].query do
    select :x, :y, :z
    filter{|o| (o.x > 1) & (o.y > 2)}
    order :z.desc
  end

Which is the same as:

 dataset = DB[:items].select(:x, :y, :z).filter{|o| (o.x > 1) & (o.y > 2)}.order(:z.desc)

Note that inside a call to query, you cannot call each, insert, update, or delete (or any method that calls those), or Sequel will raise an error.

[Source]

    # File lib/sequel/extensions/query.rb, line 30
30:     def query(&block)
31:       copy = clone({})
32:       copy.extend(QueryBlockCopy)
33:       copy.instance_eval(&block)
34:       clone(copy.opts)
35:     end

Remove columns from the list of selected columns. If any of the currently selected columns use expressions/aliases, this will remove selected columns with the given aliases. It will also remove entries from the selection that match exactly:

  # Assume columns a, b, and c in items table
  DB[:items] # SELECT * FROM items
  DB[:items].select_remove(:c) # SELECT a, b FROM items
  DB[:items].select(:a, :b___c, :c___b).select_remove(:c) # SELECT a, c AS b FROM items
  DB[:items].select(:a, :b___c, :c___b).select_remove(:c___b) # SELECT a, b AS c FROM items

Note that there are a few cases where this method may not work correctly:

  • This dataset joins multiple tables and does not have an existing explicit selection. In this case, the code will currently use unqualified column names for all columns the dataset returns, except for the columns given.
  • This dataset has an existing explicit selection containing an item that returns multiple database columns (e.g. :table.*, ‘column1, column2’.lit). In this case, the behavior is undefined and this method should not be used.

There may be other cases where this method does not work correctly, use it with caution.

[Source]

    # File lib/sequel/extensions/select_remove.rb, line 27
27:     def select_remove(*cols)
28:       if (sel = @opts[:select]) && !sel.empty?
29:         select(*(columns.zip(sel).reject{|c, s| cols.include?(c)}.map{|c, s| s} - cols))
30:       else
31:         select(*(columns - cols))
32:       end
33:     end

Return a string that can be processed by the dot program (included with graphviz) in order to see a visualization of the dataset‘s abstract syntax tree.

[Source]

     # File lib/sequel/extensions/to_dot.rb, line 145
145:     def to_dot
146:       ToDot.output(self)
147:     end