# File lib/tapsoob/utils.rb, line 170 def primary_key(db, table) db.schema(table).select { |c| c[1][:primary_key] }.map { |c| c[0] } end
module Tapsoob::Utils
Public Instance Methods
base64decode(data)
click to toggle source
# File lib/tapsoob/utils.rb, line 35 def base64decode(data) data.unpack("m").first end
base64encode(data)
click to toggle source
# File lib/tapsoob/utils.rb, line 31 def base64encode(data) [data].pack("m") end
bin(cmd)
click to toggle source
# File lib/tapsoob/utils.rb, line 18 def bin(cmd) cmd = "#{cmd}.cmd" if windows? cmd end
calculate_chunksize(old_chunksize) { |c| ... }
click to toggle source
# File lib/tapsoob/utils.rb, line 104 def calculate_chunksize(old_chunksize) c = Tapsoob::Chunksize.new(old_chunksize) begin c.start_time = Time.now c.time_in_db = yield c rescue Errno::EPIPE c.retries += 1 raise if c.retries > 2 # we got disconnected, the chunksize could be too large # reset the chunksize based on the number of retries c.reset_chunksize retry end c.end_time = Time.now c.calc_new_chunksize end
checksum(data)
click to toggle source
# File lib/tapsoob/utils.rb, line 23 def checksum(data) Zlib.crc32(data) end
encode_blobs(row, columns)
click to toggle source
# File lib/tapsoob/utils.rb, line 96 def encode_blobs(row, columns) return row if columns.size == 0 columns.each do |c| row[c] = base64encode(row[c]) unless row[c].nil? end row end
export_indexes(dump_path, table, index_data)
click to toggle source
# File lib/tapsoob/utils.rb, line 130 def export_indexes(dump_path, table, index_data) data = [index_data] if File.exists?(File.join(dump_path, "indexes", "#{table}.json")) previous_data = JSON.parse(File.read(File.join(dump_path, "indexes", "#{table}.json"))) data = data + previous_data end File.open(File.join(dump_path, "indexes", "#{table}.json"), 'w') do |file| file.write(JSON.generate(data)) end end
export_rows(dump_path, table, row_data)
click to toggle source
# File lib/tapsoob/utils.rb, line 142 def export_rows(dump_path, table, row_data) data = row_data if File.exists?(File.join(dump_path, "data", "#{table}.json")) previous_data = JSON.parse(File.read(File.join(dump_path, "data", "#{table}.json"))) data[:data] = previous_data["data"] + row_data[:data] unless row_data[:data].nil? end File.open(File.join(dump_path, "data", "#{table}.json"), 'w') do |file| file.write(JSON.generate(data)) end end
export_schema(dump_path, table, schema_data)
click to toggle source
# File lib/tapsoob/utils.rb, line 124 def export_schema(dump_path, table, schema_data) File.open(File.join(dump_path, "schemas", "#{table}.rb"), 'w') do |file| file.write(schema_data) end end
format_data(data, opts = {})
click to toggle source
# File lib/tapsoob/utils.rb, line 39 def format_data(data, opts = {}) return {} if data.size == 0 string_columns = opts[:string_columns] || [] schema = opts[:schema] || [] table = opts[:table] max_lengths = schema.inject({}) do |hash, (column, meta)| if meta[:db_type] =~ /^varchar\((\d+)\)/ hash.update(column => $1.to_i) end hash end header = data[0].keys only_data = data.collect do |row| row = encode_blobs(row, string_columns) row.each do |column, data| if data.to_s.length > (max_lengths[column] || data.to_s.length) raise Tapsoob::InvalidData.new(<<-ERROR) Detected data that exceeds the length limitation of its column. This is generally due to the fact that SQLite does not enforce length restrictions. Table : #{table} Column : #{column} Type : #{schema.detect{|s| s.first == column}.last[:db_type]} Data : #{data} ERROR end # Type conversion row[column] = data.strftime('%Y-%m-%d %H:%M:%S') if data.is_a?(Time) end header.collect { |h| row[h] } end res = { table_name: table, header: header, data: only_data } # Add types if schema isn't empty res[:types] = schema.map { |c| c.last[:type] } unless schema.empty? res end
incorrect_blobs(db, table)
click to toggle source
mysql text and blobs fields are handled the same way internally this is not true for other databases so we must check if the field is actually text and manually convert it back to a string
# File lib/tapsoob/utils.rb, line 85 def incorrect_blobs(db, table) return [] if (db.url =~ /(mysql|mysql2):\/\//).nil? columns = [] db.schema(table).each do |data| column, cdata = data columns << column if cdata[:type] == :blob end columns end
load_indexes(database_url, index)
click to toggle source
# File lib/tapsoob/utils.rb, line 159 def load_indexes(database_url, index) Tapsoob::Schema.load_indexes(database_url, index) end
load_schema(dump_path, database_url, table)
click to toggle source
# File lib/tapsoob/utils.rb, line 154 def load_schema(dump_path, database_url, table) schema = File.join(dump_path, "schemas", "#{table}.rb") schema_bin(:load, database_url, schema.to_s) end
order_by(db, table)
click to toggle source
# File lib/tapsoob/utils.rb, line 180 def order_by(db, table) pkey = primary_key(db, table) if pkey pkey.kind_of?(Array) ? pkey : [pkey.to_sym] else table = table.to_sym unless table.kind_of?(Sequel::SQL::Identifier) db[table].columns end end
primary_key(db, table)
click to toggle source
schema_bin(command, *args)
click to toggle source
# File lib/tapsoob/utils.rb, line 163 def schema_bin(command, *args) require 'tapsoob/cli' subcommand = "schema" script = Tapsoob::CLI::Schema.new script.invoke(command, args.map { |a| "#{a}" }) end
single_integer_primary_key(db, table)
click to toggle source
# File lib/tapsoob/utils.rb, line 174 def single_integer_primary_key(db, table) table = table.to_sym unless table.kind_of?(Sequel::SQL::Identifier) keys = db.schema(table).select { |c| c[1][:primary_key] and c[1][:type] == :integer } not keys.nil? and keys.size == 1 end
valid_data?(data, crc32)
click to toggle source
# File lib/tapsoob/utils.rb, line 27 def valid_data?(data, crc32) Zlib.crc32(data) == crc32.to_i end
windows?()
click to toggle source
# File lib/tapsoob/utils.rb, line 12 def windows? return @windows if defined?(@windows) require 'rbconfig' @windows = !!(::RbConfig::CONFIG['host_os'] =~ /mswin|mingw/) end