Initial Commit

This commit is contained in:
Riley Schneider
2025-12-03 16:38:10 +01:00
parent c5e26bf594
commit b732d8d4b5
17680 changed files with 5977495 additions and 2 deletions

2634
database/perl/vendor/lib/DBI/Changes.pm vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,238 @@
# $Id: ANSI.pm 8696 2007-01-24 23:12:38Z Tim $
#
# Copyright (c) 2002 Tim Bunce Ireland
#
# Constant data describing ANSI CLI info types and return values for the
# SQLGetInfo() method of ODBC.
#
# You may distribute under the terms of either the GNU General Public
# License or the Artistic License, as specified in the Perl README file.
use strict;
package DBI::Const::GetInfo::ANSI;
our (%InfoTypes,%ReturnTypes,%ReturnValues,);
=head1 NAME
DBI::Const::GetInfo::ANSI - ISO/IEC SQL/CLI Constants for GetInfo
=head1 SYNOPSIS
The API for this module is private and subject to change.
=head1 DESCRIPTION
Information requested by GetInfo().
See: A.1 C header file SQLCLI.H, Page 316, 317.
The API for this module is private and subject to change.
=head1 REFERENCES
ISO/IEC FCD 9075-3:200x Information technology - Database Languages -
SQL - Part 3: Call-Level Interface (SQL/CLI)
SC32 N00744 = WG3:VIE-005 = H2-2002-007
Date: 2002-01-15
=cut
my
$VERSION = "2.008697";
%InfoTypes =
(
SQL_ALTER_TABLE => 86
, SQL_CATALOG_NAME => 10003
, SQL_COLLATING_SEQUENCE => 10004
, SQL_CURSOR_COMMIT_BEHAVIOR => 23
, SQL_CURSOR_SENSITIVITY => 10001
, SQL_DATA_SOURCE_NAME => 2
, SQL_DATA_SOURCE_READ_ONLY => 25
, SQL_DBMS_NAME => 17
, SQL_DBMS_VERSION => 18
, SQL_DEFAULT_TRANSACTION_ISOLATION => 26
, SQL_DESCRIBE_PARAMETER => 10002
, SQL_FETCH_DIRECTION => 8
, SQL_GETDATA_EXTENSIONS => 81
, SQL_IDENTIFIER_CASE => 28
, SQL_INTEGRITY => 73
, SQL_MAXIMUM_CATALOG_NAME_LENGTH => 34
, SQL_MAXIMUM_COLUMNS_IN_GROUP_BY => 97
, SQL_MAXIMUM_COLUMNS_IN_ORDER_BY => 99
, SQL_MAXIMUM_COLUMNS_IN_SELECT => 100
, SQL_MAXIMUM_COLUMNS_IN_TABLE => 101
, SQL_MAXIMUM_COLUMN_NAME_LENGTH => 30
, SQL_MAXIMUM_CONCURRENT_ACTIVITIES => 1
, SQL_MAXIMUM_CURSOR_NAME_LENGTH => 31
, SQL_MAXIMUM_DRIVER_CONNECTIONS => 0
, SQL_MAXIMUM_IDENTIFIER_LENGTH => 10005
, SQL_MAXIMUM_SCHEMA_NAME_LENGTH => 32
, SQL_MAXIMUM_STMT_OCTETS => 20000
, SQL_MAXIMUM_STMT_OCTETS_DATA => 20001
, SQL_MAXIMUM_STMT_OCTETS_SCHEMA => 20002
, SQL_MAXIMUM_TABLES_IN_SELECT => 106
, SQL_MAXIMUM_TABLE_NAME_LENGTH => 35
, SQL_MAXIMUM_USER_NAME_LENGTH => 107
, SQL_NULL_COLLATION => 85
, SQL_ORDER_BY_COLUMNS_IN_SELECT => 90
, SQL_OUTER_JOIN_CAPABILITIES => 115
, SQL_SCROLL_CONCURRENCY => 43
, SQL_SEARCH_PATTERN_ESCAPE => 14
, SQL_SERVER_NAME => 13
, SQL_SPECIAL_CHARACTERS => 94
, SQL_TRANSACTION_CAPABLE => 46
, SQL_TRANSACTION_ISOLATION_OPTION => 72
, SQL_USER_NAME => 47
);
=head2 %ReturnTypes
See: Codes and data types for implementation information (Table 28), Page 85, 86.
Mapped to ODBC datatype names.
=cut
%ReturnTypes = # maxlen
(
SQL_ALTER_TABLE => 'SQLUINTEGER bitmask' # INTEGER
, SQL_CATALOG_NAME => 'SQLCHAR' # CHARACTER (1)
, SQL_COLLATING_SEQUENCE => 'SQLCHAR' # CHARACTER (254)
, SQL_CURSOR_COMMIT_BEHAVIOR => 'SQLUSMALLINT' # SMALLINT
, SQL_CURSOR_SENSITIVITY => 'SQLUINTEGER' # INTEGER
, SQL_DATA_SOURCE_NAME => 'SQLCHAR' # CHARACTER (128)
, SQL_DATA_SOURCE_READ_ONLY => 'SQLCHAR' # CHARACTER (1)
, SQL_DBMS_NAME => 'SQLCHAR' # CHARACTER (254)
, SQL_DBMS_VERSION => 'SQLCHAR' # CHARACTER (254)
, SQL_DEFAULT_TRANSACTION_ISOLATION => 'SQLUINTEGER' # INTEGER
, SQL_DESCRIBE_PARAMETER => 'SQLCHAR' # CHARACTER (1)
, SQL_FETCH_DIRECTION => 'SQLUINTEGER bitmask' # INTEGER
, SQL_GETDATA_EXTENSIONS => 'SQLUINTEGER bitmask' # INTEGER
, SQL_IDENTIFIER_CASE => 'SQLUSMALLINT' # SMALLINT
, SQL_INTEGRITY => 'SQLCHAR' # CHARACTER (1)
, SQL_MAXIMUM_CATALOG_NAME_LENGTH => 'SQLUSMALLINT' # SMALLINT
, SQL_MAXIMUM_COLUMNS_IN_GROUP_BY => 'SQLUSMALLINT' # SMALLINT
, SQL_MAXIMUM_COLUMNS_IN_ORDER_BY => 'SQLUSMALLINT' # SMALLINT
, SQL_MAXIMUM_COLUMNS_IN_SELECT => 'SQLUSMALLINT' # SMALLINT
, SQL_MAXIMUM_COLUMNS_IN_TABLE => 'SQLUSMALLINT' # SMALLINT
, SQL_MAXIMUM_COLUMN_NAME_LENGTH => 'SQLUSMALLINT' # SMALLINT
, SQL_MAXIMUM_CONCURRENT_ACTIVITIES => 'SQLUSMALLINT' # SMALLINT
, SQL_MAXIMUM_CURSOR_NAME_LENGTH => 'SQLUSMALLINT' # SMALLINT
, SQL_MAXIMUM_DRIVER_CONNECTIONS => 'SQLUSMALLINT' # SMALLINT
, SQL_MAXIMUM_IDENTIFIER_LENGTH => 'SQLUSMALLINT' # SMALLINT
, SQL_MAXIMUM_SCHEMA_NAME_LENGTH => 'SQLUSMALLINT' # SMALLINT
, SQL_MAXIMUM_STMT_OCTETS => 'SQLUSMALLINT' # SMALLINT
, SQL_MAXIMUM_STMT_OCTETS_DATA => 'SQLUSMALLINT' # SMALLINT
, SQL_MAXIMUM_STMT_OCTETS_SCHEMA => 'SQLUSMALLINT' # SMALLINT
, SQL_MAXIMUM_TABLES_IN_SELECT => 'SQLUSMALLINT' # SMALLINT
, SQL_MAXIMUM_TABLE_NAME_LENGTH => 'SQLUSMALLINT' # SMALLINT
, SQL_MAXIMUM_USER_NAME_LENGTH => 'SQLUSMALLINT' # SMALLINT
, SQL_NULL_COLLATION => 'SQLUSMALLINT' # SMALLINT
, SQL_ORDER_BY_COLUMNS_IN_SELECT => 'SQLCHAR' # CHARACTER (1)
, SQL_OUTER_JOIN_CAPABILITIES => 'SQLUINTEGER bitmask' # INTEGER
, SQL_SCROLL_CONCURRENCY => 'SQLUINTEGER bitmask' # INTEGER
, SQL_SEARCH_PATTERN_ESCAPE => 'SQLCHAR' # CHARACTER (1)
, SQL_SERVER_NAME => 'SQLCHAR' # CHARACTER (128)
, SQL_SPECIAL_CHARACTERS => 'SQLCHAR' # CHARACTER (254)
, SQL_TRANSACTION_CAPABLE => 'SQLUSMALLINT' # SMALLINT
, SQL_TRANSACTION_ISOLATION_OPTION => 'SQLUINTEGER bitmask' # INTEGER
, SQL_USER_NAME => 'SQLCHAR' # CHARACTER (128)
);
=head2 %ReturnValues
See: A.1 C header file SQLCLI.H, Page 317, 318.
=cut
$ReturnValues{SQL_ALTER_TABLE} =
{
SQL_AT_ADD_COLUMN => 0x00000001
, SQL_AT_DROP_COLUMN => 0x00000002
, SQL_AT_ALTER_COLUMN => 0x00000004
, SQL_AT_ADD_CONSTRAINT => 0x00000008
, SQL_AT_DROP_CONSTRAINT => 0x00000010
};
$ReturnValues{SQL_CURSOR_COMMIT_BEHAVIOR} =
{
SQL_CB_DELETE => 0
, SQL_CB_CLOSE => 1
, SQL_CB_PRESERVE => 2
};
$ReturnValues{SQL_FETCH_DIRECTION} =
{
SQL_FD_FETCH_NEXT => 0x00000001
, SQL_FD_FETCH_FIRST => 0x00000002
, SQL_FD_FETCH_LAST => 0x00000004
, SQL_FD_FETCH_PRIOR => 0x00000008
, SQL_FD_FETCH_ABSOLUTE => 0x00000010
, SQL_FD_FETCH_RELATIVE => 0x00000020
};
$ReturnValues{SQL_GETDATA_EXTENSIONS} =
{
SQL_GD_ANY_COLUMN => 0x00000001
, SQL_GD_ANY_ORDER => 0x00000002
};
$ReturnValues{SQL_IDENTIFIER_CASE} =
{
SQL_IC_UPPER => 1
, SQL_IC_LOWER => 2
, SQL_IC_SENSITIVE => 3
, SQL_IC_MIXED => 4
};
$ReturnValues{SQL_NULL_COLLATION} =
{
SQL_NC_HIGH => 1
, SQL_NC_LOW => 2
};
$ReturnValues{SQL_OUTER_JOIN_CAPABILITIES} =
{
SQL_OUTER_JOIN_LEFT => 0x00000001
, SQL_OUTER_JOIN_RIGHT => 0x00000002
, SQL_OUTER_JOIN_FULL => 0x00000004
, SQL_OUTER_JOIN_NESTED => 0x00000008
, SQL_OUTER_JOIN_NOT_ORDERED => 0x00000010
, SQL_OUTER_JOIN_INNER => 0x00000020
, SQL_OUTER_JOIN_ALL_COMPARISON_OPS => 0x00000040
};
$ReturnValues{SQL_SCROLL_CONCURRENCY} =
{
SQL_SCCO_READ_ONLY => 0x00000001
, SQL_SCCO_LOCK => 0x00000002
, SQL_SCCO_OPT_ROWVER => 0x00000004
, SQL_SCCO_OPT_VALUES => 0x00000008
};
$ReturnValues{SQL_TRANSACTION_ACCESS_MODE} =
{
SQL_TRANSACTION_READ_ONLY => 0x00000001
, SQL_TRANSACTION_READ_WRITE => 0x00000002
};
$ReturnValues{SQL_TRANSACTION_CAPABLE} =
{
SQL_TC_NONE => 0
, SQL_TC_DML => 1
, SQL_TC_ALL => 2
, SQL_TC_DDL_COMMIT => 3
, SQL_TC_DDL_IGNORE => 4
};
$ReturnValues{SQL_TRANSACTION_ISOLATION} =
{
SQL_TRANSACTION_READ_UNCOMMITTED => 0x00000001
, SQL_TRANSACTION_READ_COMMITTED => 0x00000002
, SQL_TRANSACTION_REPEATABLE_READ => 0x00000004
, SQL_TRANSACTION_SERIALIZABLE => 0x00000008
};
1;
=head1 TODO
Corrections, e.g.:
SQL_TRANSACTION_ISOLATION_OPTION vs. SQL_TRANSACTION_ISOLATION
=cut

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,104 @@
# $Id: GetInfoReturn.pm 8696 2007-01-24 23:12:38Z Tim $
#
# Copyright (c) 2002 Tim Bunce Ireland
#
# Constant data describing return values from the DBI getinfo function.
#
# You may distribute under the terms of either the GNU General Public
# License or the Artistic License, as specified in the Perl README file.
package DBI::Const::GetInfoReturn;
use strict;
use Exporter ();
use vars qw(@ISA @EXPORT @EXPORT_OK %GetInfoReturnTypes %GetInfoReturnValues);
@ISA = qw(Exporter);
@EXPORT = qw(%GetInfoReturnTypes %GetInfoReturnValues);
my
$VERSION = "2.008697";
=head1 NAME
DBI::Const::GetInfoReturn - Data and functions for describing GetInfo results
=head1 SYNOPSIS
The interface to this module is undocumented and liable to change.
=head1 DESCRIPTION
Data and functions for describing GetInfo results
=cut
use DBI::Const::GetInfoType;
use DBI::Const::GetInfo::ANSI ();
use DBI::Const::GetInfo::ODBC ();
%GetInfoReturnTypes =
(
%DBI::Const::GetInfo::ANSI::ReturnTypes
, %DBI::Const::GetInfo::ODBC::ReturnTypes
);
%GetInfoReturnValues = ();
{
my $A = \%DBI::Const::GetInfo::ANSI::ReturnValues;
my $O = \%DBI::Const::GetInfo::ODBC::ReturnValues;
while ( my ($k, $v) = each %$A ) {
my %h = ( exists $O->{$k} ) ? ( %$v, %{$O->{$k}} ) : %$v;
$GetInfoReturnValues{$k} = \%h;
}
while ( my ($k, $v) = each %$O ) {
next if exists $A->{$k};
my %h = %$v;
$GetInfoReturnValues{$k} = \%h;
}
}
# -----------------------------------------------------------------------------
sub Format {
my $InfoType = shift;
my $Value = shift;
return '' unless defined $Value;
my $ReturnType = $GetInfoReturnTypes{$InfoType};
return sprintf '0x%08X', $Value if $ReturnType eq 'SQLUINTEGER bitmask';
return sprintf '0x%08X', $Value if $ReturnType eq 'SQLINTEGER bitmask';
# return '"' . $Value . '"' if $ReturnType eq 'SQLCHAR';
return $Value;
}
sub Explain {
my $InfoType = shift;
my $Value = shift;
return '' unless defined $Value;
return '' unless exists $GetInfoReturnValues{$InfoType};
$Value = int $Value;
my $ReturnType = $GetInfoReturnTypes{$InfoType};
my %h = reverse %{$GetInfoReturnValues{$InfoType}};
if ( $ReturnType eq 'SQLUINTEGER bitmask'|| $ReturnType eq 'SQLINTEGER bitmask') {
my @a = ();
for my $k ( sort { $a <=> $b } keys %h ) {
push @a, $h{$k} if $Value & $k;
}
return wantarray ? @a : join(' ', @a );
}
else {
return $h{$Value} ||'?';
}
}
1;

View File

@@ -0,0 +1,53 @@
# $Id: GetInfoType.pm 8696 2007-01-24 23:12:38Z Tim $
#
# Copyright (c) 2002 Tim Bunce Ireland
#
# Constant data describing info type codes for the DBI getinfo function.
#
# You may distribute under the terms of either the GNU General Public
# License or the Artistic License, as specified in the Perl README file.
package DBI::Const::GetInfoType;
use strict;
use Exporter ();
use vars qw(@ISA @EXPORT @EXPORT_OK %GetInfoType);
@ISA = qw(Exporter);
@EXPORT = qw(%GetInfoType);
my
$VERSION = "2.008697";
=head1 NAME
DBI::Const::GetInfoType - Data describing GetInfo type codes
=head1 SYNOPSIS
use DBI::Const::GetInfoType;
=head1 DESCRIPTION
Imports a %GetInfoType hash which maps names for GetInfo Type Codes
into their corresponding numeric values. For example:
$database_version = $dbh->get_info( $GetInfoType{SQL_DBMS_VER} );
The interface to this module is new and nothing beyond what is
written here is guaranteed.
=cut
use DBI::Const::GetInfo::ANSI (); # liable to change
use DBI::Const::GetInfo::ODBC (); # liable to change
%GetInfoType =
(
%DBI::Const::GetInfo::ANSI::InfoTypes # liable to change
, %DBI::Const::GetInfo::ODBC::InfoTypes # liable to change
);
1;

3500
database/perl/vendor/lib/DBI/DBD.pm vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,492 @@
package DBI::DBD::Metadata;
# $Id: Metadata.pm 14213 2010-06-30 19:29:18Z Martin $
#
# Copyright (c) 1997-2003 Jonathan Leffler, Jochen Wiedmann,
# Steffen Goeldner and Tim Bunce
#
# You may distribute under the terms of either the GNU General Public
# License or the Artistic License, as specified in the Perl README file.
use strict;
use Exporter ();
use Carp;
use DBI;
use DBI::Const::GetInfoType qw(%GetInfoType);
our @ISA = qw(Exporter);
our @EXPORT = qw(write_getinfo_pm write_typeinfo_pm);
our $VERSION = "2.014214";
=head1 NAME
DBI::DBD::Metadata - Generate the code and data for some DBI metadata methods
=head1 SYNOPSIS
The idea is to extract metadata information from a good quality
ODBC driver and use it to generate code and data to use in your own
DBI driver for the same database.
To generate code to support the get_info method:
perl -MDBI::DBD::Metadata -e "write_getinfo_pm('dbi:ODBC:dsn-name','user','pass','Driver')"
perl -MDBI::DBD::Metadata -e write_getinfo_pm dbi:ODBC:foo_db username password Driver
To generate code to support the type_info method:
perl -MDBI::DBD::Metadata -e "write_typeinfo_pm('dbi:ODBC:dsn-name','user','pass','Driver')"
perl -MDBI::DBD::Metadata -e write_typeinfo_pm dbi:ODBC:dsn-name user pass Driver
Where C<dbi:ODBC:dsn-name> is the connection to use to extract the
data, and C<Driver> is the name of the driver you want the code
generated for (the driver name gets embedded into the output in
numerous places).
=head1 Generating a GetInfo package for a driver
The C<write_getinfo_pm> in the DBI::DBD::Metadata module generates a
DBD::Driver::GetInfo package on standard output.
This method generates a DBD::Driver::GetInfo package from the data
source you specified in the parameter list or in the environment
variable DBI_DSN.
DBD::Driver::GetInfo should help a DBD author implement the DBI
get_info() method.
Because you are just creating this package, it is very unlikely that
DBD::Driver already provides a good implementation for get_info().
Thus you will probably connect via DBD::ODBC.
Once you are sure that it is producing reasonably sane data, you should
typically redirect the standard output to lib/DBD/Driver/GetInfo.pm, and
then hand edit the result.
Do not forget to update your Makefile.PL and MANIFEST to include this as
an extra PM file that should be installed.
If you connect via DBD::ODBC, you should use version 0.38 or greater;
Please take a critical look at the data returned!
ODBC drivers vary dramatically in their quality.
The generator assumes that most values are static and places these
values directly in the %info hash.
A few examples show the use of CODE references and the implementation
via subroutines.
It is very likely that you will have to write additional subroutines for
values depending on the session state or server version, e.g.
SQL_DBMS_VER.
A possible implementation of DBD::Driver::db::get_info() may look like:
sub get_info {
my($dbh, $info_type) = @_;
require DBD::Driver::GetInfo;
my $v = $DBD::Driver::GetInfo::info{int($info_type)};
$v = $v->($dbh) if ref $v eq 'CODE';
return $v;
}
Please replace Driver (or "<foo>") with the name of your driver.
Note that this stub function is generated for you by write_getinfo_pm
function, but you must manually transfer the code to Driver.pm.
=cut
sub write_getinfo_pm
{
my ($dsn, $user, $pass, $driver) = @_ ? @_ : @ARGV;
my $dbh = DBI->connect($dsn, $user, $pass, {RaiseError=>1});
$driver = "<foo>" unless defined $driver;
print <<PERL;
# Transfer this to ${driver}.pm
# The get_info function was automatically generated by
# DBI::DBD::Metadata::write_getinfo_pm v$DBI::DBD::Metadata::VERSION.
package DBD::${driver}::db; # This line can be removed once transferred.
sub get_info {
my(\$dbh, \$info_type) = \@_;
require DBD::${driver}::GetInfo;
my \$v = \$DBD::${driver}::GetInfo::info{int(\$info_type)};
\$v = \$v->(\$dbh) if ref \$v eq 'CODE';
return \$v;
}
# Transfer this to lib/DBD/${driver}/GetInfo.pm
# The \%info hash was automatically generated by
# DBI::DBD::Metadata::write_getinfo_pm v$DBI::DBD::Metadata::VERSION.
package DBD::${driver}::GetInfo;
use strict;
use DBD::${driver};
# Beware: not officially documented interfaces...
# use DBI::Const::GetInfoType qw(\%GetInfoType);
# use DBI::Const::GetInfoReturn qw(\%GetInfoReturnTypes \%GetInfoReturnValues);
my \$sql_driver = '${driver}';
my \$sql_ver_fmt = '%02d.%02d.%04d'; # ODBC version string: ##.##.#####
my \$sql_driver_ver = sprintf \$sql_ver_fmt, split (/\\./, \$DBD::${driver}::VERSION);
PERL
my $kw_map = 0;
{
# Informix CLI (ODBC) v3.81.0000 does not return a list of keywords.
local $\ = "\n";
local $, = "\n";
my ($kw) = $dbh->get_info($GetInfoType{SQL_KEYWORDS});
if ($kw)
{
print "\nmy \@Keywords = qw(\n";
print sort split /,/, $kw;
print ");\n\n";
print "sub sql_keywords {\n";
print q% return join ',', @Keywords;%;
print "\n}\n\n";
$kw_map = 1;
}
}
print <<'PERL';
sub sql_data_source_name {
my $dbh = shift;
return "dbi:$sql_driver:" . $dbh->{Name};
}
sub sql_user_name {
my $dbh = shift;
# CURRENT_USER is a non-standard attribute, probably undef
# Username is a standard DBI attribute
return $dbh->{CURRENT_USER} || $dbh->{Username};
}
PERL
print "\nour \%info = (\n";
foreach my $key (sort keys %GetInfoType)
{
my $num = $GetInfoType{$key};
my $val = eval { $dbh->get_info($num); };
if ($key eq 'SQL_DATA_SOURCE_NAME') {
$val = '\&sql_data_source_name';
}
elsif ($key eq 'SQL_KEYWORDS') {
$val = ($kw_map) ? '\&sql_keywords' : 'undef';
}
elsif ($key eq 'SQL_DRIVER_NAME') {
$val = "\$INC{'DBD/$driver.pm'}";
}
elsif ($key eq 'SQL_DRIVER_VER') {
$val = '$sql_driver_ver';
}
elsif ($key eq 'SQL_USER_NAME') {
$val = '\&sql_user_name';
}
elsif (not defined $val) {
$val = 'undef';
}
elsif ($val eq '') {
$val = "''";
}
elsif ($val =~ /\D/) {
$val =~ s/\\/\\\\/g;
$val =~ s/'/\\'/g;
$val = "'$val'";
}
printf "%s %5d => %-30s # %s\n", (($val eq 'undef') ? '#' : ' '), $num, "$val,", $key;
}
print ");\n\n1;\n\n__END__\n";
}
=head1 Generating a TypeInfo package for a driver
The C<write_typeinfo_pm> function in the DBI::DBD::Metadata module generates
on standard output the data needed for a driver's type_info_all method.
It also provides default implementations of the type_info_all
method for inclusion in the driver's main implementation file.
The driver parameter is the name of the driver for which the methods
will be generated; for the sake of examples, this will be "Driver".
Typically, the dsn parameter will be of the form "dbi:ODBC:odbc_dsn",
where the odbc_dsn is a DSN for one of the driver's databases.
The user and pass parameters are the other optional connection
parameters that will be provided to the DBI connect method.
Once you are sure that it is producing reasonably sane data, you should
typically redirect the standard output to lib/DBD/Driver/TypeInfo.pm,
and then hand edit the result if necessary.
Do not forget to update your Makefile.PL and MANIFEST to include this as
an extra PM file that should be installed.
Please take a critical look at the data returned!
ODBC drivers vary dramatically in their quality.
The generator assumes that all the values are static and places these
values directly in the %info hash.
A possible implementation of DBD::Driver::type_info_all() may look like:
sub type_info_all {
my ($dbh) = @_;
require DBD::Driver::TypeInfo;
return [ @$DBD::Driver::TypeInfo::type_info_all ];
}
Please replace Driver (or "<foo>") with the name of your driver.
Note that this stub function is generated for you by the write_typeinfo_pm
function, but you must manually transfer the code to Driver.pm.
=cut
# These two are used by fmt_value...
my %dbi_inv;
my %sql_type_inv;
#-DEBUGGING-#
#sub print_hash
#{
# my ($name, %hash) = @_;
# print "Hash: $name\n";
# foreach my $key (keys %hash)
# {
# print "$key => $hash{$key}\n";
# }
#}
#-DEBUGGING-#
sub inverse_hash
{
my (%hash) = @_;
my (%inv);
foreach my $key (keys %hash)
{
my $val = $hash{$key};
die "Double mapping for key value $val ($inv{$val}, $key)!"
if (defined $inv{$val});
$inv{$val} = $key;
}
return %inv;
}
sub fmt_value
{
my ($num, $val) = @_;
if (!defined $val)
{
$val = "undef";
}
elsif ($val !~ m/^[-+]?\d+$/)
{
# All the numbers in type_info_all are integers!
# Anything that isn't an integer is a string.
# Ensure that no double quotes screw things up.
$val =~ s/"/\\"/g if ($val =~ m/"/o);
$val = qq{"$val"};
}
elsif ($dbi_inv{$num} =~ m/^(SQL_)?DATA_TYPE$/)
{
# All numeric...
$val = $sql_type_inv{$val}
if (defined $sql_type_inv{$val});
}
return $val;
}
sub write_typeinfo_pm
{
my ($dsn, $user, $pass, $driver) = @_ ? @_ : @ARGV;
my $dbh = DBI->connect($dsn, $user, $pass, {AutoCommit=>1, RaiseError=>1});
$driver = "<foo>" unless defined $driver;
print <<PERL;
# Transfer this to ${driver}.pm
# The type_info_all function was automatically generated by
# DBI::DBD::Metadata::write_typeinfo_pm v$DBI::DBD::Metadata::VERSION.
package DBD::${driver}::db; # This line can be removed once transferred.
sub type_info_all
{
my (\$dbh) = \@_;
require DBD::${driver}::TypeInfo;
return [ \@\$DBD::${driver}::TypeInfo::type_info_all ];
}
# Transfer this to lib/DBD/${driver}/TypeInfo.pm.
# Don't forget to add version and intellectual property control information.
# The \%type_info_all hash was automatically generated by
# DBI::DBD::Metadata::write_typeinfo_pm v$DBI::DBD::Metadata::VERSION.
package DBD::${driver}::TypeInfo;
{
require Exporter;
require DynaLoader;
\@ISA = qw(Exporter DynaLoader);
\@EXPORT = qw(type_info_all);
use DBI qw(:sql_types);
PERL
# Generate SQL type name mapping hashes.
# See code fragment in DBI specification.
my %sql_type_map;
foreach (@{$DBI::EXPORT_TAGS{sql_types}})
{
no strict 'refs';
$sql_type_map{$_} = &{"DBI::$_"}();
$sql_type_inv{$sql_type_map{$_}} = $_;
}
#-DEBUG-# print_hash("sql_type_map", %sql_type_map);
#-DEBUG-# print_hash("sql_type_inv", %sql_type_inv);
my %dbi_map =
(
TYPE_NAME => 0,
DATA_TYPE => 1,
COLUMN_SIZE => 2,
LITERAL_PREFIX => 3,
LITERAL_SUFFIX => 4,
CREATE_PARAMS => 5,
NULLABLE => 6,
CASE_SENSITIVE => 7,
SEARCHABLE => 8,
UNSIGNED_ATTRIBUTE => 9,
FIXED_PREC_SCALE => 10,
AUTO_UNIQUE_VALUE => 11,
LOCAL_TYPE_NAME => 12,
MINIMUM_SCALE => 13,
MAXIMUM_SCALE => 14,
SQL_DATA_TYPE => 15,
SQL_DATETIME_SUB => 16,
NUM_PREC_RADIX => 17,
INTERVAL_PRECISION => 18,
);
#-DEBUG-# print_hash("dbi_map", %dbi_map);
%dbi_inv = inverse_hash(%dbi_map);
#-DEBUG-# print_hash("dbi_inv", %dbi_inv);
my $maxlen = 0;
foreach my $key (keys %dbi_map)
{
$maxlen = length($key) if length($key) > $maxlen;
}
# Print the name/value mapping entry in the type_info_all array;
my $fmt = " \%-${maxlen}s => \%2d,\n";
my $numkey = 0;
my $maxkey = 0;
print " \$type_info_all = [\n {\n";
foreach my $i (sort { $a <=> $b } keys %dbi_inv)
{
printf($fmt, $dbi_inv{$i}, $i);
$numkey++;
$maxkey = $i;
}
print " },\n";
print STDERR "### WARNING - Non-dense set of keys ($numkey keys, $maxkey max key)\n"
unless $numkey = $maxkey + 1;
my $h = $dbh->type_info_all;
my @tia = @$h;
my %odbc_map = map { uc $_ => $tia[0]->{$_} } keys %{$tia[0]};
shift @tia; # Remove the mapping reference.
my $numtyp = $#tia;
#-DEBUG-# print_hash("odbc_map", %odbc_map);
# In theory, the key/number mapping sequence for %dbi_map
# should be the same as the one from the ODBC driver. However, to
# prevent the possibility of mismatches, and to deal with older
# missing attributes or unexpected new ones, we chase back through
# the %dbi_inv and %odbc_map hashes, generating @dbi_to_odbc
# to map our new key number to the old one.
# Report if @dbi_to_odbc is not an identity mapping.
my @dbi_to_odbc;
foreach my $num (sort { $a <=> $b } keys %dbi_inv)
{
# Find the name in %dbi_inv that matches this index number.
my $dbi_key = $dbi_inv{$num};
#-DEBUG-# print "dbi_key = $dbi_key\n";
#-DEBUG-# print "odbc_key = $odbc_map{$dbi_key}\n";
# Find the index in %odbc_map that has this key.
$dbi_to_odbc[$num] = (defined $odbc_map{$dbi_key}) ? $odbc_map{$dbi_key} : undef;
}
# Determine the length of the longest formatted value in each field
my @len;
for (my $i = 0; $i <= $numtyp; $i++)
{
my @odbc_val = @{$tia[$i]};
for (my $num = 0; $num <= $maxkey; $num++)
{
# Find the value of the entry in the @odbc_val array.
my $val = (defined $dbi_to_odbc[$num]) ? $odbc_val[$dbi_to_odbc[$num]] : undef;
$val = fmt_value($num, $val);
#-DEBUG-# print "val = $val\n";
$val = "$val,";
$len[$num] = length($val) if !defined $len[$num] || length($val) > $len[$num];
}
}
# Generate format strings to left justify each string in maximum field width.
my @fmt;
for (my $i = 0; $i <= $maxkey; $i++)
{
$fmt[$i] = "%-$len[$i]s";
#-DEBUG-# print "fmt[$i] = $fmt[$i]\n";
}
# Format the data from type_info_all
for (my $i = 0; $i <= $numtyp; $i++)
{
my @odbc_val = @{$tia[$i]};
print " [ ";
for (my $num = 0; $num <= $maxkey; $num++)
{
# Find the value of the entry in the @odbc_val array.
my $val = (defined $dbi_to_odbc[$num]) ? $odbc_val[$dbi_to_odbc[$num]] : undef;
$val = fmt_value($num, $val);
printf $fmt[$num], "$val,";
}
print " ],\n";
}
print " ];\n\n 1;\n}\n\n__END__\n";
}
1;
__END__
=head1 AUTHORS
Jonathan Leffler <jleffler@us.ibm.com> (previously <jleffler@informix.com>),
Jochen Wiedmann <joe@ispsoft.de>,
Steffen Goeldner <sgoeldner@cpan.org>,
and Tim Bunce <dbi-users@perl.org>.
=cut

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,851 @@
=head1 NAME
DBI::DBD::SqlEngine::Developers - Developers documentation for DBI::DBD::SqlEngine
=head1 SYNOPSIS
package DBD::myDriver;
use base qw(DBI::DBD::SqlEngine);
sub driver
{
...
my $drh = $proto->SUPER::driver($attr);
...
return $drh->{class};
}
sub CLONE { ... }
package DBD::myDriver::dr;
@ISA = qw(DBI::DBD::SqlEngine::dr);
sub data_sources { ... }
...
package DBD::myDriver::db;
@ISA = qw(DBI::DBD::SqlEngine::db);
sub init_valid_attributes { ... }
sub init_default_attributes { ... }
sub set_versions { ... }
sub validate_STORE_attr { my ($dbh, $attrib, $value) = @_; ... }
sub validate_FETCH_attr { my ($dbh, $attrib) = @_; ... }
sub get_myd_versions { ... }
sub get_avail_tables { ... }
package DBD::myDriver::st;
@ISA = qw(DBI::DBD::SqlEngine::st);
sub FETCH { ... }
sub STORE { ... }
package DBD::myDriver::Statement;
@ISA = qw(DBI::DBD::SqlEngine::Statement);
sub open_table { ... }
package DBD::myDriver::Table;
@ISA = qw(DBI::DBD::SqlEngine::Table);
my %reset_on_modify = (
myd_abc => "myd_foo",
myd_mno => "myd_bar",
);
__PACKAGE__->register_reset_on_modify( \%reset_on_modify );
my %compat_map = (
abc => 'foo_abc',
xyz => 'foo_xyz',
);
__PACKAGE__->register_compat_map( \%compat_map );
sub bootstrap_table_meta { ... }
sub init_table_meta { ... }
sub table_meta_attr_changed { ... }
sub open_data { ... }
sub new { ... }
sub fetch_row { ... }
sub push_row { ... }
sub push_names { ... }
sub seek { ... }
sub truncate { ... }
sub drop { ... }
# optimize the SQL engine by add one or more of
sub update_current_row { ... }
# or
sub update_specific_row { ... }
# or
sub update_one_row { ... }
# or
sub insert_new_row { ... }
# or
sub delete_current_row { ... }
# or
sub delete_one_row { ... }
=head1 DESCRIPTION
This document describes the interface of DBI::DBD::SqlEngine for DBD
developers who write DBI::DBD::SqlEngine based DBI drivers. It supplements
L<DBI::DBD> and L<DBI::DBD::SqlEngine::HowTo>, which you should read first.
=head1 CLASSES
Each DBI driver must provide a package global C<< driver >> method and
three DBI related classes:
=over 4
=item DBI::DBD::SqlEngine::dr
Driver package, contains the methods DBI calls indirectly via DBI
interface:
DBI->connect ('DBI:DBM:', undef, undef, {})
# invokes
package DBD::DBM::dr;
@DBD::DBM::dr::ISA = qw(DBI::DBD::SqlEngine::dr);
sub connect ($$;$$$)
{
...
}
Similar for C<data_sources ()> and C<disconnect_all()>.
Pure Perl DBI drivers derived from DBI::DBD::SqlEngine usually don't need to
override any of the methods provided through the DBD::XXX::dr package.
However if you need additional initialization not fitting in
C<init_valid_attributes()> and C<init_default_attributes()> of you're ::db
class, the connect method might be the final place to be modified.
=item DBI::DBD::SqlEngine::db
Contains the methods which are called through DBI database handles
(C<< $dbh >>). e.g.,
$sth = $dbh->prepare ("select * from foo");
# returns the f_encoding setting for table foo
$dbh->csv_get_meta ("foo", "f_encoding");
DBI::DBD::SqlEngine provides the typical methods required here. Developers who
write DBI drivers based on DBI::DBD::SqlEngine need to override the methods
C<< set_versions >> and C<< init_valid_attributes >>.
=item DBI::DBD::SqlEngine::TieMeta;
Provides the tie-magic for C<< $dbh->{$drv_pfx . "_meta"} >>. Routes
C<STORE> through C<< $drv->set_sql_engine_meta() >> and C<FETCH> through
C<< $drv->get_sql_engine_meta() >>. C<DELETE> is not supported, you have
to execute a C<DROP TABLE> statement, where applicable.
=item DBI::DBD::SqlEngine::TieTables;
Provides the tie-magic for tables in C<< $dbh->{$drv_pfx . "_meta"} >>.
Routes C<STORE> though C<< $tblClass->set_table_meta_attr() >> and C<FETCH>
though C<< $tblClass->get_table_meta_attr() >>. C<DELETE> removes an
attribute from the I<meta object> retrieved by
C<< $tblClass->get_table_meta() >>.
=item DBI::DBD::SqlEngine::st
Contains the methods to deal with prepared statement handles. e.g.,
$sth->execute () or die $sth->errstr;
=item DBI::DBD::SqlEngine::TableSource;
Base class for 3rd party table sources:
$dbh->{sql_table_source} = "DBD::Foo::TableSource";
=item DBI::DBD::SqlEngine::DataSource;
Base class for 3rd party data sources:
$dbh->{sql_data_source} = "DBD::Foo::DataSource";
=item DBI::DBD::SqlEngine::Statement;
Base class for derived drivers statement engine. Implements C<open_table>.
=item DBI::DBD::SqlEngine::Table;
Contains tailoring between SQL engine's requirements and
C<DBI::DBD::SqlEngine> magic for finding the right tables and storage.
Builds bridges between C<sql_meta> handling of C<DBI::DBD::SqlEngine::db>,
table initialization for SQL engines and I<meta object>'s attribute
management for derived drivers.
=back
=head2 DBI::DBD::SqlEngine
This is the main package containing the routines to initialize
DBI::DBD::SqlEngine based DBI drivers. Primarily the
C<< DBI::DBD::SqlEngine::driver >> method is invoked, either directly
from DBI when the driver is initialized or from the derived class.
package DBD::DBM;
use base qw( DBI::DBD::SqlEngine );
sub driver
{
my ( $class, $attr ) = @_;
...
my $drh = $class->SUPER::driver( $attr );
...
return $drh;
}
It is not necessary to implement your own driver method as long as
additional initialization (e.g. installing more private driver
methods) is not required. You do not need to call C<< setup_driver >>
as DBI::DBD::SqlEngine takes care of it.
=head2 DBI::DBD::SqlEngine::dr
The driver package contains the methods DBI calls indirectly via the DBI
interface (see L<DBI/DBI Class Methods>).
DBI::DBD::SqlEngine based DBI drivers usually do not need to implement anything here,
it is enough to do the basic initialization:
package DBD:XXX::dr;
@DBD::XXX::dr::ISA = qw (DBI::DBD::SqlEngine::dr);
$DBD::XXX::dr::imp_data_size = 0;
$DBD::XXX::dr::data_sources_attr = undef;
$DBD::XXX::ATTRIBUTION = "DBD::XXX $DBD::XXX::VERSION by Hans Mustermann";
=head3 Methods provided by C<< DBI::DBD::SqlEngine::dr >>:
=over 4
=item connect
Supervises the driver bootstrap when calling
DBI->connect( "dbi:Foo", , , { ... } );
First it instantiates a new driver using C<DBI::_new_dbh>. After that,
initial bootstrap of the newly instantiated driver is done by
$dbh->func( 0, "init_default_attributes" );
The first argument (C<0>) signals that this is the very first call to
C<init_default_attributes>. Modern drivers understand that and do early
stage setup here after calling
package DBD::Foo::db;
our @DBD::Foo::db::ISA = qw(DBI::DBD::SqlEngine::db);
sub init_default_attributes
{
my ($dbh, $phase) = @_;
$dbh->SUPER::init_default_attributes($phase);
...; # own setup code, maybe separated by phases
}
When the C<$phase> argument is passed down until
C<DBI::DBD::SqlEngine::db::init_default_attributes>, C<connect()> recognizes
a I<modern> driver and initializes the attributes from I<DSN> and I<$attr>
arguments passed via C<< DBI->connect( $dsn, $user, $pass, \%attr ) >>.
At the end of the attribute initialization after I<phase 0>, C<connect()>
invoked C<init_default_attributes> again for I<phase 1>:
$dbh->func( 1, "init_default_attributes" );
=item data_sources
Returns a list of I<DSN>'s using the C<data_sources> method of the
class specified in C<< $dbh->{sql_table_source} >> or via C<\%attr>:
@ary = DBI->data_sources($driver);
@ary = DBI->data_sources($driver, \%attr);
=item disconnect_all
C<DBI::DBD::SqlEngine> doesn't have an overall driver cache, so nothing
happens here at all.
=back
=head2 DBI::DBD::SqlEngine::db
This package defines the database methods, which are called via the DBI
database handle C<< $dbh >>.
=head3 Methods provided by C<< DBI::DBD::SqlEngine::db >>:
=over 4
=item ping
Simply returns the content of the C<< Active >> attribute. Override
when your driver needs more complicated actions here.
=item prepare
Prepares a new SQL statement to execute. Returns a statement handle,
C<< $sth >> - instance of the DBD:XXX::st. It is neither required nor
recommended to override this method.
=item validate_FETCH_attr
Called by C<FETCH> to allow inherited drivers do their own attribute
name validation. Calling convention is similar to C<FETCH> and the
return value is the approved attribute name.
return $validated_attribute_name;
In case of validation fails (e.g. accessing private attribute or similar),
C<validate_FETCH_attr> is permitted to throw an exception.
=item FETCH
Fetches an attribute of a DBI database object. Private handle attributes
must have a prefix (this is mandatory). If a requested attribute is
detected as a private attribute without a valid prefix, the driver prefix
(written as C<$drv_prefix>) is added.
The driver prefix is extracted from the attribute name and verified against
C<< $dbh->{ $drv_prefix . "valid_attrs" } >> (when it exists). If the
requested attribute value is not listed as a valid attribute, this method
croaks. If the attribute is valid and readonly (listed in C<< $dbh->{
$drv_prefix . "readonly_attrs" } >> when it exists), a real copy of the
attribute value is returned. So it's not possible to modify
C<f_valid_attrs> from outside of DBI::DBD::SqlEngine::db or a derived class.
=item validate_STORE_attr
Called by C<STORE> to allow inherited drivers do their own attribute
name validation. Calling convention is similar to C<STORE> and the
return value is the approved attribute name followed by the approved
new value.
return ($validated_attribute_name, $validated_attribute_value);
In case of validation fails (e.g. accessing private attribute or similar),
C<validate_STORE_attr> is permitted to throw an exception
(C<DBI::DBD::SqlEngine::db::validate_STORE_attr> throws an exception when
someone tries to assign value other than C<SQL_IC_UPPER .. SQL_IC_MIXED>
to C<< $dbh->{sql_identifier_case} >> or
C<< $dbh->{sql_quoted_identifier_case} >>).
=item STORE
Stores a database private attribute. Private handle attributes must have a
prefix (this is mandatory). If a requested attribute is detected as a private
attribute without a valid prefix, the driver prefix (written as
C<$drv_prefix>) is added. If the database handle has an attribute
C<${drv_prefix}_valid_attrs> - for attribute names which are not listed in
that hash, this method croaks. If the database handle has an attribute
C<${drv_prefix}_readonly_attrs>, only attributes which are not listed there
can be stored (once they are initialized). Trying to overwrite such an
immutable attribute forces this method to croak.
An example of a valid attributes list can be found in
C<< DBI::DBD::SqlEngine::db::init_valid_attributes >>.
=item set_versions
This method sets the attributes C<< f_version >>, C<< sql_nano_version >>,
C<< sql_statement_version >> and (if not prohibited by a restrictive
C<< ${prefix}_valid_attrs >>) C<< ${prefix}_version >>.
This method is called at the end of the C<< connect () >> phase.
When overriding this method, do not forget to invoke the superior one.
=item init_valid_attributes
This method is called after the database handle is instantiated as the
first attribute initialization.
C<< DBI::DBD::SqlEngine::db::init_valid_attributes >> initializes the
attributes C<sql_valid_attrs> and C<sql_readonly_attrs>.
When overriding this method, do not forget to invoke the superior one,
preferably before doing anything else.
=item init_default_attributes
This method is called after the database handle is instantiated to
initialize the default attributes. It expects one argument: C<$phase>.
If C<$phase> is not given, C<connect> of C<DBI::DBD::SqlEngine::dr>
expects this is an old-fashioned driver which isn't capable of multi-phased
initialization.
C<< DBI::DBD::SqlEngine::db::init_default_attributes >> initializes the
attributes C<sql_identifier_case>, C<sql_quoted_identifier_case>,
C<sql_handler>, C<sql_init_order>, C<sql_meta>, C<sql_engine_version>,
C<sql_nano_version> and C<sql_statement_version> when L<SQL::Statement>
is available.
It sets C<sql_init_order> to the given C<$phase>.
When the derived implementor class provides the attribute to validate
attributes (e.g. C<< $dbh->{dbm_valid_attrs} = {...}; >>) or the attribute
containing the immutable attributes (e.g. C<< $dbh->{dbm_readonly_attrs}
= {...}; >>), the attributes C<drv_valid_attrs>, C<drv_readonly_attrs> and
C<drv_version> are added (when available) to the list of valid and
immutable attributes (where C<drv_> is interpreted as the driver prefix).
=item get_versions
This method is called by the code injected into the instantiated driver to
provide the user callable driver method C<< ${prefix}versions >> (e.g.
C<< dbm_versions >>, C<< csv_versions >>, ...).
The DBI::DBD::SqlEngine implementation returns all version information known by
DBI::DBD::SqlEngine (e.g. DBI version, Perl version, DBI::DBD::SqlEngine version and
the SQL handler version).
C<get_versions> takes the C<$dbh> as the first argument and optionally a
second argument containing a table name. The second argument is not
evaluated in C<< DBI::DBD::SqlEngine::db::get_versions >> itself - but
might be in the future.
If the derived implementor class provides a method named
C<get_${drv_prefix}versions>, this is invoked and the return value of
it is associated to the derived driver name:
if (my $dgv = $dbh->{ImplementorClass}->can ("get_" . $drv_prefix . "versions") {
(my $derived_driver = $dbh->{ImplementorClass}) =~ s/::db$//;
$versions{$derived_driver} = &$dgv ($dbh, $table);
}
Override it to add more version information about your module, (e.g.
some kind of parser version in case of DBD::CSV, ...), if one line is not
enough room to provide all relevant information.
=item sql_parser_object
Returns a L<SQL::Parser> instance, when C<< sql_handler >> is set to
"SQL::Statement". The parser instance is stored in C<< sql_parser_object >>.
It is not recommended to override this method.
=item disconnect
Disconnects from a database. All local table information is discarded and
the C<< Active >> attribute is set to 0.
=item type_info_all
Returns information about all the types supported by DBI::DBD::SqlEngine.
=item table_info
Returns a statement handle which is prepared to deliver information about
all known tables.
=item list_tables
Returns a list of all known table names.
=item quote
Quotes a string for use in SQL statements.
=item commit
Warns about a useless call (if warnings enabled) and returns.
DBI::DBD::SqlEngine is typically a driver which commits every action
instantly when executed.
=item rollback
Warns about a useless call (if warnings enabled) and returns.
DBI::DBD::SqlEngine is typically a driver which commits every action
instantly when executed.
=back
=head3 Attributes used by C<< DBI::DBD::SqlEngine::db >>:
This section describes attributes which are important to developers of DBI
Database Drivers derived from C<DBI::DBD::SqlEngine>.
=over 4
=item sql_init_order
This attribute contains a hash with priorities as key and an array
containing the C<$dbh> attributes to be initialized during before/after
other attributes.
C<DBI::DBD::SqlEngine> initializes following attributes:
$dbh->{sql_init_order} = {
0 => [qw( Profile RaiseError PrintError AutoCommit )],
90 => [ "sql_meta", $dbh->{$drv_pfx_meta} ? $dbh->{$drv_pfx_meta} : () ]
}
The default priority of not listed attribute keys is C<50>. It is well
known that a lot of attributes needed to be set before some table settings
are initialized. For example, for L<DBD::DBM>, when using
my $dbh = DBI->connect( "dbi:DBM:", undef, undef, {
f_dir => "/path/to/dbm/databases",
dbm_type => "BerkeleyDB",
dbm_mldbm => "JSON", # use MLDBM::Serializer::JSON
dbm_tables => {
quick => {
dbm_type => "GDBM_File",
dbm_MLDBM => "FreezeThaw"
}
}
});
This defines a known table C<quick> which uses the L<GDBM_File> backend and
L<FreezeThaw> as serializer instead of the overall default L<BerkeleyDB> and
L<JSON>. B<But> all files containing the table data have to be searched in
C<< $dbh->{f_dir} >>, which requires C<< $dbh->{f_dir} >> must be initialized
before C<< $dbh->{sql_meta}->{quick} >> is initialized by
C<bootstrap_table_meta> method of L</DBI::DBD::SqlEngine::Table> to get
C<< $dbh->{sql_meta}->{quick}->{f_dir} >> being initialized properly.
=item sql_init_phase
This attribute is only set during the initialization steps of the DBI
Database Driver. It contains the value of the currently run initialization
phase. Currently supported phases are I<phase 0> and I<phase 1>. This
attribute is set in C<init_default_attributes> and removed in C<init_done>.
=item sql_engine_in_gofer
This value has a true value in case of this driver is operated via
L<DBD::Gofer>. The impact of being operated via Gofer is a read-only
driver (not read-only databases!), so you cannot modify any attributes
later - neither any table settings. B<But> you won't get an error in
cases you modify table attributes, so please carefully watch
C<sql_engine_in_gofer>.
=item sql_table_source
Names a class which is responsible for delivering I<data sources> and
I<available tables> (Database Driver related). I<data sources> here
refers to L<DBI/data_sources>, not C<sql_data_source>.
See L</DBI::DBD::SqlEngine::TableSource> for details.
=item sql_data_source
Name a class which is responsible for handling table resources open
and completing table names requested via SQL statements.
See L</DBI::DBD::SqlEngine::DataSource> for details.
=item sql_dialect
Controls the dialect understood by SQL::Parser. Possible values (delivery
state of SQL::Statement):
* ANSI
* CSV
* AnyData
Defaults to "CSV". Because an SQL::Parser is instantiated only once and
SQL::Parser doesn't allow one to modify the dialect once instantiated,
it's strongly recommended to set this flag before any statement is
executed (best place is connect attribute hash).
=back
=head2 DBI::DBD::SqlEngine::st
Contains the methods to deal with prepared statement handles:
=over 4
=item bind_param
Common routine to bind placeholders to a statement for execution. It
is dangerous to override this method without detailed knowledge about
the DBI::DBD::SqlEngine internal storage structure.
=item execute
Executes a previously prepared statement (with placeholders, if any).
=item finish
Finishes a statement handle, discards all buffered results. The prepared
statement is not discarded so the statement can be executed again.
=item fetch
Fetches the next row from the result-set. This method may be rewritten
in a later version and if it's overridden in a derived class, the
derived implementation should not rely on the storage details.
=item fetchrow_arrayref
Alias for C<< fetch >>.
=item FETCH
Fetches statement handle attributes. Supported attributes (for full overview
see L<DBI/Statement Handle Attributes>) are C<NAME>, C<TYPE>, C<PRECISION>
and C<NULLABLE>. Each column is returned as C<NULLABLE> which might be wrong
depending on the derived backend storage. If the statement handle has
private attributes, they can be fetched using this method, too. B<Note> that
statement attributes are not associated with any table used in this statement.
This method usually requires extending in a derived implementation.
See L<DBD::CSV> or L<DBD::DBM> for some example.
=item STORE
Allows storing of statement private attributes. No special handling is
currently implemented here.
=item rows
Returns the number of rows affected by the last execute. This method might
return C<undef>.
=back
=head2 DBI::DBD::SqlEngine::TableSource
Provides data sources and table information on database driver and database
handle level.
package DBI::DBD::SqlEngine::TableSource;
sub data_sources ($;$)
{
my ( $class, $drh, $attrs ) = @_;
...
}
sub avail_tables
{
my ( $class, $drh ) = @_;
...
}
The C<data_sources> method is called when the user invokes any of the
following:
@ary = DBI->data_sources($driver);
@ary = DBI->data_sources($driver, \%attr);
@ary = $dbh->data_sources();
@ary = $dbh->data_sources(\%attr);
The C<avail_tables> method is called when the user invokes any of the
following:
@names = $dbh->tables( $catalog, $schema, $table, $type );
$sth = $dbh->table_info( $catalog, $schema, $table, $type );
$sth = $dbh->table_info( $catalog, $schema, $table, $type, \%attr );
$dbh->func( "list_tables" );
Every time where an C<\%attr> argument can be specified, this C<\%attr>
object's C<sql_table_source> attribute is preferred over the C<$dbh>
attribute or the driver default.
=head2 DBI::DBD::SqlEngine::DataSource
Provides base functionality for dealing with tables. It is primarily
designed for allowing transparent access to files on disk or already
opened (file-)streams (e.g. for DBD::CSV).
Derived classes shall be restricted to similar functionality, too (e.g.
opening streams from an archive, transparently compress/uncompress
log files before parsing them,
package DBI::DBD::SqlEngine::DataSource;
sub complete_table_name ($$;$)
{
my ( $self, $meta, $table, $respect_case ) = @_;
...
}
The method C<complete_table_name> is called when first setting up the
I<meta information> for a table:
"SELECT user.id, user.name, user.shell FROM user WHERE ..."
results in opening the table C<user>. First step of the table open
process is completing the name. Let's imagine you're having a L<DBD::CSV>
handle with following settings:
$dbh->{sql_identifier_case} = SQL_IC_LOWER;
$dbh->{f_ext} = '.lst';
$dbh->{f_dir} = '/data/web/adrmgr';
Those settings will result in looking for files matching
C<[Uu][Ss][Ee][Rr](\.lst)?$> in C</data/web/adrmgr/>. The scanning of the
directory C</data/web/adrmgr/> and the pattern match check will be done
in C<DBD::File::DataSource::File> by the C<complete_table_name> method.
If you intend to provide other sources of data streams than files, in
addition to provide an appropriate C<complete_table_name> method, a method
to open the resource is required:
package DBI::DBD::SqlEngine::DataSource;
sub open_data ($)
{
my ( $self, $meta, $attrs, $flags ) = @_;
...
}
After the method C<open_data> has been run successfully, the table's meta
information are in a state which allows the table's data accessor methods
will be able to fetch/store row information. Implementation details heavily
depends on the table implementation, whereby the most famous is surely
L<DBD::File::Table|DBD::File/DBD::File::Table>.
=head2 DBI::DBD::SqlEngine::Statement
Derives from DBI::SQL::Nano::Statement for unified naming when deriving
new drivers. No additional feature is provided from here.
=head2 DBI::DBD::SqlEngine::Table
Derives from DBI::SQL::Nano::Table for unified naming when deriving
new drivers.
You should consult the documentation of C<< SQL::Eval::Table >> (see
L<SQL::Eval>) to get more information about the abstract methods of the
table's base class you have to override and a description of the table
meta information expected by the SQL engines.
=over 4
=item bootstrap_table_meta
Initializes a table meta structure. Can be safely overridden in a
derived class, as long as the C<< SUPER >> method is called at the end
of the overridden method.
It copies the following attributes from the database into the table meta data
C<< $dbh->{ReadOnly} >> into C<< $meta->{readonly} >>, C<sql_identifier_case>
and C<sql_data_source> and makes them sticky to the table.
This method should be called before you attempt to map between file
name and table name to ensure the correct directory, extension etc. are
used.
=item init_table_meta
Initializes more attributes of the table meta data - usually more
expensive ones (e.g. those which require class instantiations) - when
the file name and the table name could mapped.
=item get_table_meta
Returns the table meta data. If there are none for the required table,
a new one is initialized. When after bootstrapping a new I<table_meta>
and L<completing the table name|/DBI::DBD::SqlEngine::DataSource> a
mapping can be established between an existing I<table_meta> and the
new bootstrapped one, the already existing is used and a mapping
shortcut between the recent used table name and the already known
table name is hold in C<< $dbh->{sql_meta_map} >>. When it fails,
nothing is returned. On success, the name of the table and the meta data
structure is returned.
=item get_table_meta_attr
Returns a single attribute from the table meta data. If the attribute
name appears in C<%compat_map>, the attribute name is updated from
there.
=item set_table_meta_attr
Sets a single attribute in the table meta data. If the attribute
name appears in C<%compat_map>, the attribute name is updated from
there.
=item table_meta_attr_changed
Called when an attribute of the meta data is modified.
If the modified attribute requires to reset a calculated attribute, the
calculated attribute is reset (deleted from meta data structure) and
the I<initialized> flag is removed, too. The decision is made based on
C<%register_reset_on_modify>.
=item register_reset_on_modify
Allows C<set_table_meta_attr> to reset meta attributes when special
attributes are modified. For DBD::File, modifying one of C<f_file>, C<f_dir>,
C<f_ext> or C<f_lockfile> will reset C<f_fqfn>. DBD::DBM extends the
list for C<dbm_type> and C<dbm_mldbm> to reset the value of C<dbm_tietype>.
If your DBD has calculated values in the meta data area, then call
C<register_reset_on_modify>:
my %reset_on_modify = ( "xxx_foo" => "xxx_bar" );
__PACKAGE__->register_reset_on_modify( \%reset_on_modify );
=item register_compat_map
Allows C<get_table_meta_attr> and C<set_table_meta_attr> to update the
attribute name to the current favored one:
# from DBD::DBM
my %compat_map = ( "dbm_ext" => "f_ext" );
__PACKAGE__->register_compat_map( \%compat_map );
=item open_data
Called to open the table's data storage. This is silently forwarded
to C<< $meta->{sql_data_source}->open_data() >>.
After this is done, a derived class might add more steps in an overridden
C<< open_file >> method.
=item new
Instantiates the table. This is done in 3 steps:
1. get the table meta data
2. open the data file
3. bless the table data structure using inherited constructor new
It is not recommended to override the constructor of the table class.
Find a reasonable place to add you extensions in one of the above four
methods.
=back
=head1 AUTHOR
The module DBI::DBD::SqlEngine is currently maintained by
H.Merijn Brand < h.m.brand at xs4all.nl > and
Jens Rehsack < rehsack at googlemail.com >
=head1 COPYRIGHT AND LICENSE
Copyright (C) 2010 by H.Merijn Brand & Jens Rehsack
All rights reserved.
You may freely distribute and/or modify this module under the terms of
either the GNU General Public License (GPL) or the Artistic License, as
specified in the Perl README file.
=cut

View File

@@ -0,0 +1,333 @@
=head1 NAME
DBI::DBD::SqlEngine::HowTo - Guide to create DBI::DBD::SqlEngine based driver
=head1 SYNOPSIS
perldoc DBI::DBD::SqlEngine::HowTo
perldoc DBI
perldoc DBI::DBD
perldoc DBI::DBD::SqlEngine::Developers
perldoc SQL::Eval
perldoc DBI::DBD::SqlEngine
perldoc DBI::DBD::SqlEngine::HowTo
perldoc SQL::Statement::Embed
=head1 DESCRIPTION
This document provides a step-by-step guide, how to create a new
C<DBI::DBD::SqlEngine> based DBD. It expects that you carefully read the
L<DBI> documentation and that you're familiar with L<DBI::DBD> and had
read and understood L<DBD::ExampleP>.
This document addresses experienced developers who are really sure that
they need to invest time when writing a new DBI Driver. Writing a DBI
Driver is neither a weekend project nor an easy job for hobby coders
after work. Expect one or two man-month of time for the first start.
Those who are still reading, should be able to sing the rules of
L<DBI::DBD/CREATING A NEW DRIVER>.
=head1 CREATING DRIVER CLASSES
Do you have an entry in DBI's DBD registry? DBI::DBD::SqlEngine expect
having a unique prefix for every driver class in inheritance chain.
It's easy to get a prefix - just drop the DBI team a note
(L<DBI/GETTING_HELP>). If you want for some reason hide your work, take
a look at L<Class::Method::Modifiers> how to wrap a private prefix method
around existing C<driver_prefix>.
For this guide, a prefix of C<foo_> is assumed.
=head2 Sample Skeleton
package DBD::Foo;
use strict;
use warnings;
use vars qw($VERSION);
use base qw(DBI::DBD::SqlEngine);
use DBI ();
$VERSION = "0.001";
package DBD::Foo::dr;
use vars qw(@ISA $imp_data_size);
@ISA = qw(DBI::DBD::SqlEngine::dr);
$imp_data_size = 0;
package DBD::Foo::db;
use vars qw(@ISA $imp_data_size);
@ISA = qw(DBI::DBD::SqlEngine::db);
$imp_data_size = 0;
package DBD::Foo::st;
use vars qw(@ISA $imp_data_size);
@ISA = qw(DBI::DBD::SqlEngine::st);
$imp_data_size = 0;
package DBD::Foo::Statement;
use vars qw(@ISA);
@ISA = qw(DBI::DBD::SqlEngine::Statement);
package DBD::Foo::Table;
use vars qw(@ISA);
@ISA = qw(DBI::DBD::SqlEngine::Table);
1;
Tiny, eh? And all you have now is a DBD named foo which will is able to
deal with temporary tables, as long as you use L<SQL::Statement>. In
L<DBI::SQL::Nano> environments, this DBD can do nothing.
=head2 Deal with own attributes
Before we start doing usable stuff with our DBI driver, we need to think
about what we want to do and how we want to do it.
Do we need tunable knobs accessible by users? Do we need status
information? All this is handled in attributes of the database handles (be
careful when your DBD is running "behind" a L<DBD::Gofer> proxy).
How come the attributes into the DBD and how are they fetchable by the
user? Good question, but you should know because you've read the L<DBI>
documentation.
C<DBI::DBD::SqlEngine::db::FETCH> and C<DBI::DBD::SqlEngine::db::STORE>
taking care for you - all they need to know is which attribute names
are valid and mutable or immutable. Tell them by adding
C<init_valid_attributes> to your db class:
sub init_valid_attributes
{
my $dbh = $_[0];
$dbh->SUPER::init_valid_attributes ();
$dbh->{foo_valid_attrs} = {
foo_version => 1, # contains version of this driver
foo_valid_attrs => 1, # contains the valid attributes of foo drivers
foo_readonly_attrs => 1, # contains immutable attributes of foo drivers
foo_bar => 1, # contains the bar attribute
foo_baz => 1, # contains the baz attribute
foo_manager => 1, # contains the manager of the driver instance
foo_manager_type => 1, # contains the manager class of the driver instance
};
$dbh->{foo_readonly_attrs} = {
foo_version => 1, # ensure no-one modifies the driver version
foo_valid_attrs => 1, # do not permit one to add more valid attributes ...
foo_readonly_attrs => 1, # ... or make the immutable mutable
foo_manager => 1, # manager is set internally only
};
return $dbh;
}
Woooho - but now the user cannot assign new managers? This is intended,
overwrite C<STORE> to handle it!
sub STORE ($$$)
{
my ( $dbh, $attrib, $value ) = @_;
$dbh->SUPER::STORE( $attrib, $value );
# we're still alive, so no exception is thrown ...
# by DBI::DBD::SqlEngine::db::STORE
if ( $attrib eq "foo_manager_type" )
{
$dbh->{foo_manager} = $dbh->{foo_manager_type}->new();
# ... probably correct some states based on the new
# foo_manager_type - see DBD::Sys for an example
}
}
But ... my driver runs without a manager until someone first assignes
a C<foo_manager_type>. Well, no - there're two places where you can
initialize defaults:
sub init_default_attributes
{
my ($dbh, $phase) = @_;
$dbh->SUPER::init_default_attributes($phase);
if( 0 == $phase )
{
# init all attributes which have no knowledge about
# user settings from DSN or the attribute hash
$dbh->{foo_manager_type} = "DBD::Foo::Manager";
}
elsif( 1 == $phase )
{
# init phase with more knowledge from DSN or attribute
# hash
$dbh->{foo_manager} = $dbh->{foo_manager_type}->new();
}
return $dbh;
}
So far we can prevent the users to use our database driver as data
storage for anything and everything. We care only about the real important
stuff for peace on earth and alike attributes. But in fact, the driver
still can't do anything. It can do less than nothing - meanwhile it's
not a stupid storage area anymore.
=head2 User comfort
C<DBI::DBD::SqlEngine> since C<0.05> consolidates all persistent meta data
of a table into a single structure stored in C<< $dbh->{sql_meta} >>. While
DBI::DBD::SqlEngine provides only readonly access to this structure,
modifications are still allowed.
Primarily DBI::DBD::SqlEngine provides access via the setters
C<new_sql_engine_meta>, C<get_sql_engine_meta>, C<get_single_table_meta>,
C<set_single_table_meta>, C<set_sql_engine_meta> and C<clear_sql_engine_meta>.
Those methods are easily accessible by the users via the C<< $dbh->func () >>
interface provided by DBI. Well, many users don't feel comfortize when calling
# don't require extension for tables cars
$dbh->func ("cars", "f_ext", ".csv", "set_sql_engine_meta");
DBI::DBD::SqlEngine will inject a method into your driver to increase the
user comfort to allow:
# don't require extension for tables cars
$dbh->foo_set_meta ("cars", "f_ext", ".csv");
Better, but here and there users likes to do:
# don't require extension for tables cars
$dbh->{foo_tables}->{cars}->{f_ext} = ".csv";
This interface is provided when derived DBD's define following in
C<init_valid_attributes> (re-capture L</Deal with own attributes>):
sub init_valid_attributes
{
my $dbh = $_[0];
$dbh->SUPER::init_valid_attributes ();
$dbh->{foo_valid_attrs} = {
foo_version => 1, # contains version of this driver
foo_valid_attrs => 1, # contains the valid attributes of foo drivers
foo_readonly_attrs => 1, # contains immutable attributes of foo drivers
foo_bar => 1, # contains the bar attribute
foo_baz => 1, # contains the baz attribute
foo_manager => 1, # contains the manager of the driver instance
foo_manager_type => 1, # contains the manager class of the driver instance
foo_meta => 1, # contains the public interface to modify table meta attributes
};
$dbh->{foo_readonly_attrs} = {
foo_version => 1, # ensure no-one modifies the driver version
foo_valid_attrs => 1, # do not permit one to add more valid attributes ...
foo_readonly_attrs => 1, # ... or make the immutable mutable
foo_manager => 1, # manager is set internally only
foo_meta => 1, # ensure public interface to modify table meta attributes are immutable
};
$dbh->{foo_meta} = "foo_tables";
return $dbh;
}
This provides a tied hash in C<< $dbh->{foo_tables} >> and a tied hash for
each table's meta data in C<< $dbh->{foo_tables}->{$table_name} >>.
Modifications on the table meta attributes are done using the table
methods:
sub get_table_meta_attr { ... }
sub set_table_meta_attr { ... }
Both methods can adjust the attribute name for compatibility reasons, e.g.
when former versions of the DBD allowed different names to be used for the
same flag:
my %compat_map = (
abc => 'foo_abc',
xyz => 'foo_xyz',
);
__PACKAGE__->register_compat_map( \%compat_map );
If any user modification on a meta attribute needs reinitialization of
the meta structure (in case of C<DBI::DBD::SqlEngine> these are the attributes
C<f_file>, C<f_dir>, C<f_ext> and C<f_lockfile>), inform DBI::DBD::SqlEngine by
doing
my %reset_on_modify = (
foo_xyz => "foo_bar",
foo_abc => "foo_bar",
);
__PACKAGE__->register_reset_on_modify( \%reset_on_modify );
The next access to the table meta data will force DBI::DBD::SqlEngine to re-do the
entire meta initialization process.
Any further action which needs to be taken can handled in
C<table_meta_attr_changed>:
sub table_meta_attr_changed
{
my ($class, $meta, $attrib, $value) = @_;
...
$class->SUPER::table_meta_attr_changed ($meta, $attrib, $value);
}
This is done before the new value is set in C<$meta>, so the attribute
changed handler can act depending on the old value.
=head2 Dealing with Tables
Let's put some life into it - it's going to be time for it.
This is a good point where a quick side step to L<SQL::Statement::Embed>
will help to shorten the next paragraph. The documentation in
SQL::Statement::Embed regarding embedding in own DBD's works pretty
fine with SQL::Statement and DBI::SQL::Nano.
Second look should go to L<DBI::DBD::SqlEngine::Developers> to get a
picture over the driver part of the table API. Usually there isn't much
to do for an easy driver.
=head2 Testing
Now you should have your first own DBD. Was easy, wasn't it? But does
it work well? Prove it by writing tests and remember to use
dbd_edit_mm_attribs from L<DBI::DBD> to ensure testing even rare cases.
=head1 AUTHOR
This guide is written by Jens Rehsack. DBI::DBD::SqlEngine is written by
Jens Rehsack using code from DBD::File originally written by Jochen
Wiedmann and Jeff Zucker.
The module DBI::DBD::SqlEngine is currently maintained by
H.Merijn Brand < h.m.brand at xs4all.nl > and
Jens Rehsack < rehsack at googlemail.com >
=head1 COPYRIGHT AND LICENSE
Copyright (C) 2010 by H.Merijn Brand & Jens Rehsack
All rights reserved.
You may freely distribute and/or modify this module under the terms of
either the GNU General Public License (GPL) or the Artistic License, as
specified in the Perl README file.
=cut

View File

@@ -0,0 +1,900 @@
package DBI::Gofer::Execute;
# $Id: Execute.pm 14282 2010-07-26 00:12:54Z David $
#
# Copyright (c) 2007, Tim Bunce, Ireland
#
# You may distribute under the terms of either the GNU General Public
# License or the Artistic License, as specified in the Perl README file.
use strict;
use warnings;
use Carp;
use DBI qw(dbi_time);
use DBI::Gofer::Request;
use DBI::Gofer::Response;
use base qw(DBI::Util::_accessor);
our $VERSION = "0.014283";
our @all_dbh_methods = sort map { keys %$_ } $DBI::DBI_methods{db}, $DBI::DBI_methods{common};
our %all_dbh_methods = map { $_ => (DBD::_::db->can($_)||undef) } @all_dbh_methods;
our $local_log = $ENV{DBI_GOFER_LOCAL_LOG}; # do extra logging to stderr
our $current_dbh; # the dbh we're using for this request
# set trace for server-side gofer
# Could use DBI_TRACE env var when it's an unrelated separate process
# but using DBI_GOFER_TRACE makes testing easier for subprocesses (eg stream)
DBI->trace(split /=/, $ENV{DBI_GOFER_TRACE}, 2) if $ENV{DBI_GOFER_TRACE};
# define valid configuration attributes (args to new())
# the values here indicate the basic type of values allowed
my %configuration_attributes = (
gofer_execute_class => 1,
default_connect_dsn => 1,
forced_connect_dsn => 1,
default_connect_attributes => {},
forced_connect_attributes => {},
track_recent => 1,
check_request_sub => sub {},
check_response_sub => sub {},
forced_single_resultset => 1,
max_cached_dbh_per_drh => 1,
max_cached_sth_per_dbh => 1,
forced_response_attributes => {},
forced_gofer_random => 1,
stats => {},
);
__PACKAGE__->mk_accessors(
keys %configuration_attributes
);
sub new {
my ($self, $args) = @_;
$args->{default_connect_attributes} ||= {};
$args->{forced_connect_attributes} ||= {};
$args->{max_cached_sth_per_dbh} ||= 1000;
$args->{stats} ||= {};
return $self->SUPER::new($args);
}
sub valid_configuration_attributes {
my $self = shift;
return { %configuration_attributes };
}
my %extra_attr = (
# Only referenced if the driver doesn't support private_attribute_info method.
# What driver-specific attributes should be returned for the driver being used?
# keyed by $dbh->{Driver}{Name}
# XXX for sth should split into attr specific to resultsets (where NUM_OF_FIELDS > 0) and others
# which would reduce processing/traffic for non-select statements
mysql => {
dbh => [qw(
mysql_errno mysql_error mysql_hostinfo mysql_info mysql_insertid
mysql_protoinfo mysql_serverinfo mysql_stat mysql_thread_id
)],
sth => [qw(
mysql_is_blob mysql_is_key mysql_is_num mysql_is_pri_key mysql_is_auto_increment
mysql_length mysql_max_length mysql_table mysql_type mysql_type_name mysql_insertid
)],
# XXX this dbh_after_sth stuff is a temporary, but important, hack.
# should be done via hash instead of arrays where the hash value contains
# flags that can indicate which attributes need to be handled in this way
dbh_after_sth => [qw(
mysql_insertid
)],
},
Pg => {
dbh => [qw(
pg_protocol pg_lib_version pg_server_version
pg_db pg_host pg_port pg_default_port
pg_options pg_pid
)],
sth => [qw(
pg_size pg_type pg_oid_status pg_cmd_status
)],
},
Sybase => {
dbh => [qw(
syb_dynamic_supported syb_oc_version syb_server_version syb_server_version_string
)],
sth => [qw(
syb_types syb_proc_status syb_result_type
)],
},
SQLite => {
dbh => [qw(
sqlite_version
)],
sth => [qw(
)],
},
ExampleP => {
dbh => [qw(
examplep_private_dbh_attrib
)],
sth => [qw(
examplep_private_sth_attrib
)],
dbh_after_sth => [qw(
examplep_insertid
)],
},
);
sub _connect {
my ($self, $request) = @_;
my $stats = $self->{stats};
# discard CachedKids from time to time
if (++$stats->{_requests_served} % 1000 == 0 # XXX config?
and my $max_cached_dbh_per_drh = $self->{max_cached_dbh_per_drh}
) {
my %drivers = DBI->installed_drivers();
while ( my ($driver, $drh) = each %drivers ) {
next unless my $CK = $drh->{CachedKids};
next unless keys %$CK > $max_cached_dbh_per_drh;
next if $driver eq 'Gofer'; # ie transport=null when testing
DBI->trace_msg(sprintf "Clearing %d cached dbh from $driver",
scalar keys %$CK, $self->{max_cached_dbh_per_drh});
$_->{Active} && $_->disconnect for values %$CK;
%$CK = ();
}
}
# local $ENV{...} can leak, so only do it if required
local $ENV{DBI_AUTOPROXY} if $ENV{DBI_AUTOPROXY};
my ($connect_method, $dsn, $username, $password, $attr) = @{ $request->dbh_connect_call };
$connect_method ||= 'connect_cached';
$stats->{method_calls_dbh}->{$connect_method}++;
# delete attributes we don't want to affect the server-side
# (Could just do this on client-side and trust the client. DoS?)
delete @{$attr}{qw(Profile InactiveDestroy AutoInactiveDestroy HandleError HandleSetErr TraceLevel Taint TaintIn TaintOut)};
$dsn = $self->forced_connect_dsn || $dsn || $self->default_connect_dsn
or die "No forced_connect_dsn, requested dsn, or default_connect_dsn for request";
my $random = $self->{forced_gofer_random} || $ENV{DBI_GOFER_RANDOM} || '';
my $connect_attr = {
# the configured default attributes, if any
%{ $self->default_connect_attributes },
# pass username and password as attributes
# then they can be overridden by forced_connect_attributes
Username => $username,
Password => $password,
# the requested attributes
%$attr,
# force some attributes the way we'd like them
PrintWarn => $local_log,
PrintError => $local_log,
# the configured default attributes, if any
%{ $self->forced_connect_attributes },
# RaiseError must be enabled
RaiseError => 1,
# reset Executed flag (of the cached handle) so we can use it to tell
# if errors happened before the main part of the request was executed
Executed => 0,
# ensure this connect_cached doesn't have the same args as the client
# because that causes subtle issues if in the same process (ie transport=null)
# include pid to avoid problems with forking (ie null transport in mod_perl)
# include gofer-random to avoid random behaviour leaking to other handles
dbi_go_execute_unique => join("|", __PACKAGE__, $$, $random),
};
# XXX implement our own private connect_cached method? (with rate-limited ping)
my $dbh = DBI->$connect_method($dsn, undef, undef, $connect_attr);
$dbh->{ShowErrorStatement} = 1 if $local_log;
# XXX should probably just be a Callbacks => arg to connect_cached
# with a cache of pre-built callback hooks (memoized, without $self)
if (my $random = $self->{forced_gofer_random} || $ENV{DBI_GOFER_RANDOM}) {
$self->_install_rand_callbacks($dbh, $random);
}
my $CK = $dbh->{CachedKids};
if ($CK && keys %$CK > $self->{max_cached_sth_per_dbh}) {
%$CK = (); # clear all statement handles
}
#$dbh->trace(0);
$current_dbh = $dbh;
return $dbh;
}
sub reset_dbh {
my ($self, $dbh) = @_;
$dbh->set_err(undef, undef); # clear any error state
}
sub new_response_with_err {
my ($self, $rv, $eval_error, $dbh) = @_;
# this is the usual way to create a response for both success and failure
# capture err+errstr etc and merge in $eval_error ($@)
my ($err, $errstr, $state) = ($DBI::err, $DBI::errstr, $DBI::state);
if ($eval_error) {
$err ||= $DBI::stderr || 1; # ensure err is true
if ($errstr) {
$eval_error =~ s/(?: : \s)? \Q$errstr//x if $errstr;
chomp $errstr;
$errstr .= "; $eval_error";
}
else {
$errstr = $eval_error;
}
}
chomp $errstr if $errstr;
my $flags;
# (XXX if we ever add transaction support then we'll need to take extra
# steps because the commit/rollback would reset Executed before we get here)
$flags |= GOf_RESPONSE_EXECUTED if $dbh && $dbh->{Executed};
my $response = DBI::Gofer::Response->new({
rv => $rv,
err => $err,
errstr => $errstr,
state => $state,
flags => $flags,
});
return $response;
}
sub execute_request {
my ($self, $request) = @_;
# should never throw an exception
DBI->trace_msg("-----> execute_request\n");
my @warnings;
local $SIG{__WARN__} = sub {
push @warnings, @_;
warn @_ if $local_log;
};
my $response = eval {
if (my $check_request_sub = $self->check_request_sub) {
$request = $check_request_sub->($request, $self)
or die "check_request_sub failed";
}
my $version = $request->version || 0;
die ref($request)." version $version is not supported"
if $version < 0.009116 or $version >= 1;
($request->is_sth_request)
? $self->execute_sth_request($request)
: $self->execute_dbh_request($request);
};
$response ||= $self->new_response_with_err(undef, $@, $current_dbh);
if (my $check_response_sub = $self->check_response_sub) {
# not protected with an eval so it can choose to throw an exception
my $new = $check_response_sub->($response, $self, $request);
$response = $new if ref $new;
}
undef $current_dbh;
$response->warnings(\@warnings) if @warnings;
DBI->trace_msg("<----- execute_request\n");
return $response;
}
sub execute_dbh_request {
my ($self, $request) = @_;
my $stats = $self->{stats};
my $dbh;
my $rv_ref = eval {
$dbh = $self->_connect($request);
my $args = $request->dbh_method_call; # [ wantarray, 'method_name', @args ]
my $wantarray = shift @$args;
my $meth = shift @$args;
$stats->{method_calls_dbh}->{$meth}++;
my @rv = ($wantarray)
? $dbh->$meth(@$args)
: scalar $dbh->$meth(@$args);
\@rv;
} || [];
my $response = $self->new_response_with_err($rv_ref, $@, $dbh);
return $response if not $dbh;
# does this request also want any dbh attributes returned?
if (my $dbh_attributes = $request->dbh_attributes) {
$response->dbh_attributes( $self->gather_dbh_attributes($dbh, $dbh_attributes) );
}
if ($rv_ref and my $lid_args = $request->dbh_last_insert_id_args) {
$stats->{method_calls_dbh}->{last_insert_id}++;
my $id = $dbh->last_insert_id( @$lid_args );
$response->last_insert_id( $id );
}
if ($rv_ref and UNIVERSAL::isa($rv_ref->[0],'DBI::st')) {
# dbh_method_call was probably a metadata method like table_info
# that returns a statement handle, so turn the $sth into resultset
my $sth = $rv_ref->[0];
$response->sth_resultsets( $self->gather_sth_resultsets($sth, $request, $response) );
$response->rv("(sth)"); # don't try to return actual sth
}
# we're finished with this dbh for this request
$self->reset_dbh($dbh);
return $response;
}
sub gather_dbh_attributes {
my ($self, $dbh, $dbh_attributes) = @_;
my @req_attr_names = @$dbh_attributes;
if ($req_attr_names[0] eq '*') { # auto include std + private
shift @req_attr_names;
push @req_attr_names, @{ $self->_std_response_attribute_names($dbh) };
}
my %dbh_attr_values;
@dbh_attr_values{@req_attr_names} = $dbh->FETCH_many(@req_attr_names);
# XXX piggyback installed_methods onto dbh_attributes for now
$dbh_attr_values{dbi_installed_methods} = { DBI->installed_methods };
# XXX piggyback default_methods onto dbh_attributes for now
$dbh_attr_values{dbi_default_methods} = _get_default_methods($dbh);
return \%dbh_attr_values;
}
sub _std_response_attribute_names {
my ($self, $h) = @_;
$h = tied(%$h) || $h; # switch to inner handle
# cache the private_attribute_info data for each handle
# XXX might be better to cache it in the executor
# as it's unlikely to change
# or perhaps at least cache it in the dbh even for sth
# as the sth are typically very short lived
my ($dbh, $h_type, $driver_name, @attr_names);
if ($dbh = $h->{Database}) { # is an sth
# does the dbh already have the answer cached?
return $dbh->{private_gofer_std_attr_names_sth} if $dbh->{private_gofer_std_attr_names_sth};
($h_type, $driver_name) = ('sth', $dbh->{Driver}{Name});
push @attr_names, qw(NUM_OF_PARAMS NUM_OF_FIELDS NAME TYPE NULLABLE PRECISION SCALE);
}
else { # is a dbh
return $h->{private_gofer_std_attr_names_dbh} if $h->{private_gofer_std_attr_names_dbh};
($h_type, $driver_name, $dbh) = ('dbh', $h->{Driver}{Name}, $h);
# explicitly add these because drivers may have different defaults
# add Name so the client gets the real Name of the connection
push @attr_names, qw(ChopBlanks LongReadLen LongTruncOk ReadOnly Name);
}
if (my $pai = $h->private_attribute_info) {
push @attr_names, keys %$pai;
}
else {
push @attr_names, @{ $extra_attr{ $driver_name }{$h_type} || []};
}
if (my $fra = $self->{forced_response_attributes}) {
push @attr_names, @{ $fra->{ $driver_name }{$h_type} || []}
}
$dbh->trace_msg("_std_response_attribute_names for $driver_name $h_type: @attr_names\n");
# cache into the dbh even for sth, as the dbh is usually longer lived
return $dbh->{"private_gofer_std_attr_names_$h_type"} = \@attr_names;
}
sub execute_sth_request {
my ($self, $request) = @_;
my $dbh;
my $sth;
my $last_insert_id;
my $stats = $self->{stats};
my $rv = eval {
$dbh = $self->_connect($request);
my $args = $request->dbh_method_call; # [ wantarray, 'method_name', @args ]
shift @$args; # discard wantarray
my $meth = shift @$args;
$stats->{method_calls_sth}->{$meth}++;
$sth = $dbh->$meth(@$args);
my $last = '(sth)'; # a true value (don't try to return actual sth)
# execute methods on the sth, e.g., bind_param & execute
if (my $calls = $request->sth_method_calls) {
for my $meth_call (@$calls) {
my $method = shift @$meth_call;
$stats->{method_calls_sth}->{$method}++;
$last = $sth->$method(@$meth_call);
}
}
if (my $lid_args = $request->dbh_last_insert_id_args) {
$stats->{method_calls_sth}->{last_insert_id}++;
$last_insert_id = $dbh->last_insert_id( @$lid_args );
}
$last;
};
my $response = $self->new_response_with_err($rv, $@, $dbh);
return $response if not $dbh;
$response->last_insert_id( $last_insert_id )
if defined $last_insert_id;
# even if the eval failed we still want to try to gather attribute values
# (XXX would be nice to be able to support streaming of results.
# which would reduce memory usage and latency for large results)
if ($sth) {
$response->sth_resultsets( $self->gather_sth_resultsets($sth, $request, $response) );
$sth->finish;
}
# does this request also want any dbh attributes returned?
my $dbh_attr_set;
if (my $dbh_attributes = $request->dbh_attributes) {
$dbh_attr_set = $self->gather_dbh_attributes($dbh, $dbh_attributes);
}
# XXX needs to be integrated with private_attribute_info() etc
if (my $dbh_attr = $extra_attr{$dbh->{Driver}{Name}}{dbh_after_sth}) {
@{$dbh_attr_set}{@$dbh_attr} = $dbh->FETCH_many(@$dbh_attr);
}
$response->dbh_attributes($dbh_attr_set) if $dbh_attr_set && %$dbh_attr_set;
$self->reset_dbh($dbh);
return $response;
}
sub gather_sth_resultsets {
my ($self, $sth, $request, $response) = @_;
my $resultsets = eval {
my $attr_names = $self->_std_response_attribute_names($sth);
my $sth_attr = {};
$sth_attr->{$_} = 1 for @$attr_names;
# let the client add/remove sth attributes
if (my $sth_result_attr = $request->sth_result_attr) {
$sth_attr->{$_} = $sth_result_attr->{$_}
for keys %$sth_result_attr;
}
my @sth_attr = grep { $sth_attr->{$_} } keys %$sth_attr;
my $row_count = 0;
my $rs_list = [];
while (1) {
my $rs = $self->fetch_result_set($sth, \@sth_attr);
push @$rs_list, $rs;
if (my $rows = $rs->{rowset}) {
$row_count += @$rows;
}
last if $self->{forced_single_resultset};
last if !($sth->more_results || $sth->{syb_more_results});
}
my $stats = $self->{stats};
$stats->{rows_returned_total} += $row_count;
$stats->{rows_returned_max} = $row_count
if $row_count > ($stats->{rows_returned_max}||0);
$rs_list;
};
$response->add_err(1, $@) if $@;
return $resultsets;
}
sub fetch_result_set {
my ($self, $sth, $sth_attr) = @_;
my %meta;
eval {
@meta{ @$sth_attr } = $sth->FETCH_many(@$sth_attr);
# we assume @$sth_attr contains NUM_OF_FIELDS
$meta{rowset} = $sth->fetchall_arrayref()
if (($meta{NUM_OF_FIELDS}||0) > 0); # is SELECT
# the fetchall_arrayref may fail with a 'not executed' kind of error
# because gather_sth_resultsets/fetch_result_set are called even if
# execute() failed, or even if there was no execute() call at all.
# The corresponding error goes into the resultset err, not the top-level
# response err, so in most cases this resultset err is never noticed.
};
if ($@) {
chomp $@;
$meta{err} = $DBI::err || 1;
$meta{errstr} = $DBI::errstr || $@;
$meta{state} = $DBI::state;
}
return \%meta;
}
sub _get_default_methods {
my ($dbh) = @_;
# returns a ref to a hash of dbh method names for methods which the driver
# hasn't overridden i.e., quote(). These don't need to be forwarded via gofer.
my $ImplementorClass = $dbh->{ImplementorClass} or die;
my %default_methods;
for my $method (@all_dbh_methods) {
my $dbi_sub = $all_dbh_methods{$method} || 42;
my $imp_sub = $ImplementorClass->can($method) || 42;
next if $imp_sub != $dbi_sub;
#warn("default $method\n");
$default_methods{$method} = 1;
}
return \%default_methods;
}
# XXX would be nice to make this a generic DBI module
sub _install_rand_callbacks {
my ($self, $dbh, $dbi_gofer_random) = @_;
my $callbacks = $dbh->{Callbacks} || {};
my $prev = $dbh->{private_gofer_rand_fail_callbacks} || {};
# return if we've already setup this handle with callbacks for these specs
return if (($callbacks->{_dbi_gofer_random_spec}||'') eq $dbi_gofer_random);
#warn "$dbh # $callbacks->{_dbi_gofer_random_spec}";
$callbacks->{_dbi_gofer_random_spec} = $dbi_gofer_random;
my ($fail_percent, $fail_err, $delay_percent, $delay_duration, %spec_part, @spec_note);
my @specs = split /,/, $dbi_gofer_random;
for my $spec (@specs) {
if ($spec =~ m/^fail=(-?[.\d]+)%?$/) {
$fail_percent = $1;
$spec_part{fail} = $spec;
next;
}
if ($spec =~ m/^err=(-?\d+)$/) {
$fail_err = $1;
$spec_part{err} = $spec;
next;
}
if ($spec =~ m/^delay([.\d]+)=(-?[.\d]+)%?$/) {
$delay_duration = $1;
$delay_percent = $2;
$spec_part{delay} = $spec;
next;
}
elsif ($spec !~ m/^(\w+|\*)$/) {
warn "Ignored DBI_GOFER_RANDOM item '$spec' which isn't a config or a dbh method name";
next;
}
my $method = $spec;
if ($callbacks->{$method} && $prev->{$method} && $callbacks->{$method} != $prev->{$method}) {
warn "Callback for $method method already installed so DBI_GOFER_RANDOM callback not installed\n";
next;
}
unless (defined $fail_percent or defined $delay_percent) {
warn "Ignored DBI_GOFER_RANDOM item '$spec' because not preceded by 'fail=N' and/or 'delayN=N'";
next;
}
push @spec_note, join(",", values(%spec_part), $method);
$callbacks->{$method} = $self->_mk_rand_callback($method, $fail_percent, $delay_percent, $delay_duration, $fail_err);
}
warn "DBI_GOFER_RANDOM failures/delays enabled: @spec_note\n"
if @spec_note;
$dbh->{Callbacks} = $callbacks;
$dbh->{private_gofer_rand_fail_callbacks} = $callbacks;
}
my %_mk_rand_callback_seqn;
sub _mk_rand_callback {
my ($self, $method, $fail_percent, $delay_percent, $delay_duration, $fail_err) = @_;
my ($fail_modrate, $delay_modrate);
$fail_percent ||= 0; $fail_modrate = int(1/(-$fail_percent )*100) if $fail_percent;
$delay_percent ||= 0; $delay_modrate = int(1/(-$delay_percent)*100) if $delay_percent;
# note that $method may be "*" but that's not recommended or documented or wise
return sub {
my ($h) = @_;
my $seqn = ++$_mk_rand_callback_seqn{$method};
my $delay = ($delay_percent > 0) ? rand(100) < $delay_percent :
($delay_percent < 0) ? !($seqn % $delay_modrate): 0;
my $fail = ($fail_percent > 0) ? rand(100) < $fail_percent :
($fail_percent < 0) ? !($seqn % $fail_modrate) : 0;
#no warnings 'uninitialized';
#warn "_mk_rand_callback($fail_percent:$fail_modrate, $delay_percent:$delay_modrate): seqn=$seqn fail=$fail delay=$delay";
if ($delay) {
my $msg = "DBI_GOFER_RANDOM delaying execution of $method() by $delay_duration seconds\n";
# Note what's happening in a trace message. If the delay percent is an even
# number then use warn() instead so it's sent back to the client.
($delay_percent % 2 == 1) ? warn($msg) : $h->trace_msg($msg);
select undef, undef, undef, $delay_duration; # allows floating point value
}
if ($fail) {
undef $_; # tell DBI to not call the method
# the "induced by DBI_GOFER_RANDOM" is special and must be included in errstr
# as it's checked for in a few places, such as the gofer retry logic
return $h->set_err($fail_err || $DBI::stderr,
"fake error from $method method induced by DBI_GOFER_RANDOM env var ($fail_percent%)");
}
return;
}
}
sub update_stats {
my ($self,
$request, $response,
$frozen_request, $frozen_response,
$time_received,
$store_meta, $other_meta,
) = @_;
# should always have a response object here
carp("No response object provided") unless $request;
my $stats = $self->{stats};
$stats->{frozen_request_max_bytes} = length($frozen_request)
if $frozen_request
&& length($frozen_request) > ($stats->{frozen_request_max_bytes}||0);
$stats->{frozen_response_max_bytes} = length($frozen_response)
if $frozen_response
&& length($frozen_response) > ($stats->{frozen_response_max_bytes}||0);
my $recent;
if (my $track_recent = $self->{track_recent}) {
$recent = {
request => $frozen_request,
response => $frozen_response,
time_received => $time_received,
duration => dbi_time()-$time_received,
# for any other info
($store_meta) ? (meta => $store_meta) : (),
};
$recent->{request_object} = $request
if !$frozen_request && $request;
$recent->{response_object} = $response
if !$frozen_response;
my @queues = ($stats->{recent_requests} ||= []);
push @queues, ($stats->{recent_errors} ||= [])
if !$response or $response->err;
for my $queue (@queues) {
push @$queue, $recent;
shift @$queue if @$queue > $track_recent;
}
}
return $recent;
}
1;
__END__
=head1 NAME
DBI::Gofer::Execute - Executes Gofer requests and returns Gofer responses
=head1 SYNOPSIS
$executor = DBI::Gofer::Execute->new( { ...config... });
$response = $executor->execute_request( $request );
=head1 DESCRIPTION
Accepts a DBI::Gofer::Request object, executes the requested DBI method calls,
and returns a DBI::Gofer::Response object.
Any error, including any internal 'fatal' errors are caught and converted into
a DBI::Gofer::Response object.
This module is usually invoked by a 'server-side' Gofer transport module.
They usually have names in the "C<DBI::Gofer::Transport::*>" namespace.
Examples include: L<DBI::Gofer::Transport::stream> and L<DBI::Gofer::Transport::mod_perl>.
=head1 CONFIGURATION
=head2 check_request_sub
If defined, it must be a reference to a subroutine that will 'check' the request.
It is passed the request object and the executor as its only arguments.
The subroutine can either return the original request object or die with a
suitable error message (which will be turned into a Gofer response).
It can also construct and return a new request that should be executed instead
of the original request.
=head2 check_response_sub
If defined, it must be a reference to a subroutine that will 'check' the response.
It is passed the response object, the executor, and the request object.
The sub may alter the response object and return undef, or return a new response object.
This mechanism can be used to, for example, terminate the service if specific
database errors are seen.
=head2 forced_connect_dsn
If set, this DSN is always used instead of the one in the request.
=head2 default_connect_dsn
If set, this DSN is used if C<forced_connect_dsn> is not set and the request does not contain a DSN itself.
=head2 forced_connect_attributes
A reference to a hash of connect() attributes. Individual attributes in
C<forced_connect_attributes> will take precedence over corresponding attributes
in the request.
=head2 default_connect_attributes
A reference to a hash of connect() attributes. Individual attributes in the
request take precedence over corresponding attributes in C<default_connect_attributes>.
=head2 max_cached_dbh_per_drh
If set, the loaded drivers will be checked to ensure they don't have more than
this number of cached connections. There is no default value. This limit is not
enforced for every request.
=head2 max_cached_sth_per_dbh
If set, all the cached statement handles will be cleared once the number of
cached statement handles rises above this limit. The default is 1000.
=head2 forced_single_resultset
If true, then only the first result set will be fetched and returned in the response.
=head2 forced_response_attributes
A reference to a data structure that can specify extra attributes to be returned in responses.
forced_response_attributes => {
DriverName => {
dbh => [ qw(dbh_attrib_name) ],
sth => [ qw(sth_attrib_name) ],
},
},
This can be useful in cases where the driver has not implemented the
private_attribute_info() method and DBI::Gofer::Execute's own fallback list of
private attributes doesn't include the driver or attributes you need.
=head2 track_recent
If set, specifies the number of recent requests and responses that should be
kept by the update_stats() method for diagnostics. See L<DBI::Gofer::Transport::mod_perl>.
Note that this setting can significantly increase memory use. Use with caution.
=head2 forced_gofer_random
Enable forced random failures and/or delays for testing. See L</DBI_GOFER_RANDOM> below.
=head1 DRIVER-SPECIFIC ISSUES
Gofer needs to know about any driver-private attributes that should have their
values sent back to the client.
If the driver doesn't support private_attribute_info() method, and very few do,
then the module fallsback to using some hard-coded details, if available, for
the driver being used. Currently hard-coded details are available for the
mysql, Pg, Sybase, and SQLite drivers.
=head1 TESTING
DBD::Gofer, DBD::Execute and related packages are well tested by executing the
DBI test suite with DBI_AUTOPROXY configured to route all DBI calls via DBD::Gofer.
Because Gofer includes timeout and 'retry on error' mechanisms there is a need
for some way to trigger delays and/or errors. This can be done via the
C<forced_gofer_random> configuration item, or else the DBI_GOFER_RANDOM environment
variable.
=head2 DBI_GOFER_RANDOM
The value of the C<forced_gofer_random> configuration item (or else the
DBI_GOFER_RANDOM environment variable) is treated as a series of tokens
separated by commas.
The tokens can be one of three types:
=over 4
=item fail=R%
Set the current failure rate to R where R is a percentage.
The value R can be floating point, e.g., C<fail=0.05%>.
Negative values for R have special meaning, see below.
=item err=N
Sets the current failure err value to N (instead of the DBI's default 'standard
err value' of 2000000000). This is useful when you want to simulate a
specific error.
=item delayN=R%
Set the current random delay rate to R where R is a percentage, and set the
current delay duration to N seconds. The values of R and N can be floating point,
e.g., C<delay0.5=0.2%>. Negative values for R have special meaning, see below.
If R is an odd number (R % 2 == 1) then a message is logged via warn() which
will be returned to, and echoed at, the client.
=item methodname
Applies the current fail, err, and delay values to the named method.
If neither a fail nor delay have been set yet then a warning is generated.
=back
For example:
$executor = DBI::Gofer::Execute->new( {
forced_gofer_random => "fail=0.01%,do,delay60=1%,execute",
});
will cause the do() method to fail for 0.01% of calls, and the execute() method to
fail 0.01% of calls and be delayed by 60 seconds on 1% of calls.
If the percentage value (C<R>) is negative then instead of the failures being
triggered randomly (via the rand() function) they are triggered via a sequence
number. In other words "C<fail=-20%>" will mean every fifth call will fail.
Each method has a distinct sequence number.
=head1 AUTHOR
Tim Bunce, L<http://www.tim.bunce.name>
=head1 LICENCE AND COPYRIGHT
Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
This module is free software; you can redistribute it and/or
modify it under the same terms as Perl itself. See L<perlartistic>.
=cut

View File

@@ -0,0 +1,200 @@
package DBI::Gofer::Request;
# $Id: Request.pm 12536 2009-02-24 22:37:09Z Tim $
#
# Copyright (c) 2007, Tim Bunce, Ireland
#
# You may distribute under the terms of either the GNU General Public
# License or the Artistic License, as specified in the Perl README file.
use strict;
use DBI qw(neat neat_list);
use base qw(DBI::Util::_accessor);
our $VERSION = "0.012537";
use constant GOf_REQUEST_IDEMPOTENT => 0x0001;
use constant GOf_REQUEST_READONLY => 0x0002;
our @EXPORT = qw(GOf_REQUEST_IDEMPOTENT GOf_REQUEST_READONLY);
__PACKAGE__->mk_accessors(qw(
version
flags
dbh_connect_call
dbh_method_call
dbh_attributes
dbh_last_insert_id_args
sth_method_calls
sth_result_attr
));
__PACKAGE__->mk_accessors_using(make_accessor_autoviv_hashref => qw(
meta
));
sub new {
my ($self, $args) = @_;
$args->{version} ||= $VERSION;
return $self->SUPER::new($args);
}
sub reset {
my ($self, $flags) = @_;
# remove everything except connect and version
%$self = (
version => $self->{version},
dbh_connect_call => $self->{dbh_connect_call},
);
$self->{flags} = $flags if $flags;
}
sub init_request {
my ($self, $method_and_args, $dbh) = @_;
$self->reset( $dbh->{ReadOnly} ? GOf_REQUEST_READONLY : 0 );
$self->dbh_method_call($method_and_args);
}
sub is_sth_request {
return shift->{sth_result_attr};
}
sub statements {
my $self = shift;
my @statements;
if (my $dbh_method_call = $self->dbh_method_call) {
my $statement_method_regex = qr/^(?:do|prepare)$/;
my (undef, $method, $arg1) = @$dbh_method_call;
push @statements, $arg1 if $method && $method =~ $statement_method_regex;
}
return @statements;
}
sub is_idempotent {
my $self = shift;
if (my $flags = $self->flags) {
return 1 if $flags & (GOf_REQUEST_IDEMPOTENT|GOf_REQUEST_READONLY);
}
# else check if all statements are SELECT statement that don't include FOR UPDATE
my @statements = $self->statements;
# XXX this is very minimal for now, doesn't even allow comments before the select
# (and can't ever work for "exec stored_procedure_name" kinds of statements)
# XXX it also doesn't deal with multiple statements: prepare("select foo; update bar")
return 1 if @statements == grep {
m/^ \s* SELECT \b /xmsi && !m/ \b FOR \s+ UPDATE \b /xmsi
} @statements;
return 0;
}
sub summary_as_text {
my $self = shift;
my ($context) = @_;
my @s = '';
if ($context && %$context) {
my @keys = sort keys %$context;
push @s, join(", ", map { "$_=>".$context->{$_} } @keys);
}
my ($method, $dsn, $user, $pass, $attr) = @{ $self->dbh_connect_call };
$method ||= 'connect_cached';
$pass = '***' if defined $pass;
my $tmp = '';
if ($attr) {
$tmp = { %{$attr||{}} }; # copy so we can edit
$tmp->{Password} = '***' if exists $tmp->{Password};
$tmp = "{ ".neat_list([ %$tmp ])." }";
}
push @s, sprintf "dbh= $method(%s, %s)", neat_list([$dsn, $user, $pass]), $tmp;
if (my $flags = $self->flags) {
push @s, sprintf "flags: 0x%x", $flags;
}
if (my $dbh_attr = $self->dbh_attributes) {
push @s, sprintf "dbh->FETCH: %s", @$dbh_attr
if @$dbh_attr;
}
my ($wantarray, $meth, @args) = @{ $self->dbh_method_call };
my $args = neat_list(\@args);
$args =~ s/\n+/ /g;
push @s, sprintf "dbh->%s(%s)", $meth, $args;
if (my $lii_args = $self->dbh_last_insert_id_args) {
push @s, sprintf "dbh->last_insert_id(%s)", neat_list($lii_args);
}
for my $call (@{ $self->sth_method_calls || [] }) {
my ($meth, @args) = @$call;
($args = neat_list(\@args)) =~ s/\n+/ /g;
push @s, sprintf "sth->%s(%s)", $meth, $args;
}
if (my $sth_attr = $self->sth_result_attr) {
push @s, sprintf "sth->FETCH: %s", %$sth_attr
if %$sth_attr;
}
return join("\n\t", @s) . "\n";
}
sub outline_as_text { # one-line version of summary_as_text
my $self = shift;
my @s = '';
my $neatlen = 80;
if (my $flags = $self->flags) {
push @s, sprintf "flags=0x%x", $flags;
}
my (undef, $meth, @args) = @{ $self->dbh_method_call };
push @s, sprintf "%s(%s)", $meth, neat_list(\@args, $neatlen);
for my $call (@{ $self->sth_method_calls || [] }) {
my ($meth, @args) = @$call;
push @s, sprintf "%s(%s)", $meth, neat_list(\@args, $neatlen);
}
my ($method, $dsn) = @{ $self->dbh_connect_call };
push @s, "$method($dsn,...)"; # dsn last as it's usually less interesting
(my $outline = join("; ", @s)) =~ s/\s+/ /g; # squish whitespace, incl newlines
return $outline;
}
1;
=head1 NAME
DBI::Gofer::Request - Encapsulate a request from DBD::Gofer to DBI::Gofer::Execute
=head1 DESCRIPTION
This is an internal class.
=head1 AUTHOR
Tim Bunce, L<http://www.tim.bunce.name>
=head1 LICENCE AND COPYRIGHT
Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
This module is free software; you can redistribute it and/or
modify it under the same terms as Perl itself. See L<perlartistic>.
=cut

View File

@@ -0,0 +1,218 @@
package DBI::Gofer::Response;
# $Id: Response.pm 11565 2008-07-22 20:17:33Z Tim $
#
# Copyright (c) 2007, Tim Bunce, Ireland
#
# You may distribute under the terms of either the GNU General Public
# License or the Artistic License, as specified in the Perl README file.
use strict;
use Carp;
use DBI qw(neat neat_list);
use base qw(DBI::Util::_accessor Exporter);
our $VERSION = "0.011566";
use constant GOf_RESPONSE_EXECUTED => 0x0001;
our @EXPORT = qw(GOf_RESPONSE_EXECUTED);
__PACKAGE__->mk_accessors(qw(
version
rv
err
errstr
state
flags
last_insert_id
dbh_attributes
sth_resultsets
warnings
));
__PACKAGE__->mk_accessors_using(make_accessor_autoviv_hashref => qw(
meta
));
sub new {
my ($self, $args) = @_;
$args->{version} ||= $VERSION;
chomp $args->{errstr} if $args->{errstr};
return $self->SUPER::new($args);
}
sub err_errstr_state {
my $self = shift;
return @{$self}{qw(err errstr state)};
}
sub executed_flag_set {
my $flags = shift->flags
or return 0;
return $flags & GOf_RESPONSE_EXECUTED;
}
sub add_err {
my ($self, $err, $errstr, $state, $trace) = @_;
# acts like the DBI's set_err method.
# this code copied from DBI::PurePerl's set_err method.
chomp $errstr if $errstr;
$state ||= '';
carp ref($self)."->add_err($err, $errstr, $state)"
if $trace and defined($err) || $errstr;
my ($r_err, $r_errstr, $r_state) = ($self->{err}, $self->{errstr}, $self->{state});
if ($r_errstr) {
$r_errstr .= sprintf " [err was %s now %s]", $r_err, $err
if $r_err && $err && $r_err ne $err;
$r_errstr .= sprintf " [state was %s now %s]", $r_state, $state
if $r_state and $r_state ne "S1000" && $state && $r_state ne $state;
$r_errstr .= "\n$errstr" if $r_errstr ne $errstr;
}
else {
$r_errstr = $errstr;
}
# assign if higher priority: err > "0" > "" > undef
my $err_changed;
if ($err # new error: so assign
or !defined $r_err # no existing warn/info: so assign
# new warn ("0" len 1) > info ("" len 0): so assign
or defined $err && length($err) > length($r_err)
) {
$r_err = $err;
++$err_changed;
}
$r_state = ($state eq "00000") ? "" : $state
if $state && $err_changed;
($self->{err}, $self->{errstr}, $self->{state}) = ($r_err, $r_errstr, $r_state);
return undef;
}
sub summary_as_text {
my $self = shift;
my ($context) = @_;
my ($rv, $err, $errstr, $state) = ($self->{rv}, $self->{err}, $self->{errstr}, $self->{state});
my @s = sprintf("\trv=%s", (ref $rv) ? "[".neat_list($rv)."]" : neat($rv));
$s[-1] .= sprintf(", err=%s, errstr=%s", $err, neat($errstr))
if defined $err;
$s[-1] .= sprintf(", flags=0x%x", $self->{flags})
if defined $self->{flags};
push @s, "last_insert_id=%s", $self->last_insert_id
if defined $self->last_insert_id;
if (my $dbh_attr = $self->dbh_attributes) {
my @keys = sort keys %$dbh_attr;
push @s, sprintf "dbh= { %s }", join(", ", map { "$_=>".neat($dbh_attr->{$_},100) } @keys)
if @keys;
}
for my $rs (@{$self->sth_resultsets || []}) {
my ($rowset, $err, $errstr, $state)
= @{$rs}{qw(rowset err errstr state)};
my $summary = "rowset: ";
my $NUM_OF_FIELDS = $rs->{NUM_OF_FIELDS} || 0;
my $rows = $rowset ? @$rowset : 0;
if ($rowset || $NUM_OF_FIELDS > 0) {
$summary .= sprintf "%d rows, %d columns", $rows, $NUM_OF_FIELDS;
}
$summary .= sprintf ", err=%s, errstr=%s", $err, neat($errstr) if defined $err;
if ($rows) {
my $NAME = $rs->{NAME};
# generate
my @colinfo = map { "$NAME->[$_]=".neat($rowset->[0][$_], 30) } 0..@{$NAME}-1;
$summary .= sprintf " [%s]", join ", ", @colinfo;
$summary .= ",..." if $rows > 1;
# we can be a little more helpful for Sybase/MSSQL user
$summary .= " syb_result_type=$rs->{syb_result_type}"
if $rs->{syb_result_type} and $rs->{syb_result_type} != 4040;
}
push @s, $summary;
}
for my $w (@{$self->warnings || []}) {
chomp $w;
push @s, "warning: $w";
}
if ($context && %$context) {
my @keys = sort keys %$context;
push @s, join(", ", map { "$_=>".$context->{$_} } @keys);
}
return join("\n\t", @s). "\n";
}
sub outline_as_text { # one-line version of summary_as_text
my $self = shift;
my ($context) = @_;
my ($rv, $err, $errstr, $state) = ($self->{rv}, $self->{err}, $self->{errstr}, $self->{state});
my $s = sprintf("rv=%s", (ref $rv) ? "[".neat_list($rv)."]" : neat($rv));
$s .= sprintf(", err=%s %s", $err, neat($errstr))
if defined $err;
$s .= sprintf(", flags=0x%x", $self->{flags})
if $self->{flags};
if (my $sth_resultsets = $self->sth_resultsets) {
$s .= sprintf(", %d resultsets ", scalar @$sth_resultsets);
my @rs;
for my $rs (@{$self->sth_resultsets || []}) {
my $summary = "";
my ($rowset, $err, $errstr)
= @{$rs}{qw(rowset err errstr)};
my $NUM_OF_FIELDS = $rs->{NUM_OF_FIELDS} || 0;
my $rows = $rowset ? @$rowset : 0;
if ($rowset || $NUM_OF_FIELDS > 0) {
$summary .= sprintf "%dr x %dc", $rows, $NUM_OF_FIELDS;
}
$summary .= sprintf "%serr %s %s", ($summary?", ":""), $err, neat($errstr)
if defined $err;
push @rs, $summary;
}
$s .= join "; ", map { "[$_]" } @rs;
}
return $s;
}
1;
=head1 NAME
DBI::Gofer::Response - Encapsulate a response from DBI::Gofer::Execute to DBD::Gofer
=head1 DESCRIPTION
This is an internal class.
=head1 AUTHOR
Tim Bunce, L<http://www.tim.bunce.name>
=head1 LICENCE AND COPYRIGHT
Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
This module is free software; you can redistribute it and/or
modify it under the same terms as Perl itself. See L<perlartistic>.
=cut

View File

@@ -0,0 +1,64 @@
package DBI::Gofer::Serializer::Base;
# $Id: Base.pm 9949 2007-09-18 09:38:15Z Tim $
#
# Copyright (c) 2007, Tim Bunce, Ireland
#
# You may distribute under the terms of either the GNU General Public
# License or the Artistic License, as specified in the Perl README file.
=head1 NAME
DBI::Gofer::Serializer::Base - base class for Gofer serialization
=head1 SYNOPSIS
$serializer = $serializer_class->new();
$string = $serializer->serialize( $data );
($string, $deserializer_class) = $serializer->serialize( $data );
$data = $serializer->deserialize( $string );
=head1 DESCRIPTION
DBI::Gofer::Serializer::* classes implement a very minimal subset of the L<Data::Serializer> API.
Gofer serializers are expected to be very fast and are not required to deal
with anything other than non-blessed references to arrays and hashes, and plain scalars.
=cut
use strict;
use warnings;
use Carp qw(croak);
our $VERSION = "0.009950";
sub new {
my $class = shift;
my $deserializer_class = $class->deserializer_class;
return bless { deserializer_class => $deserializer_class } => $class;
}
sub deserializer_class {
my $self = shift;
my $class = ref($self) || $self;
$class =~ s/^DBI::Gofer::Serializer:://;
return $class;
}
sub serialize {
my $self = shift;
croak ref($self)." has not implemented the serialize method";
}
sub deserialize {
my $self = shift;
croak ref($self)." has not implemented the deserialize method";
}
1;

View File

@@ -0,0 +1,53 @@
package DBI::Gofer::Serializer::DataDumper;
use strict;
use warnings;
our $VERSION = "0.009950";
# $Id: DataDumper.pm 9949 2007-09-18 09:38:15Z Tim $
#
# Copyright (c) 2007, Tim Bunce, Ireland
#
# You may distribute under the terms of either the GNU General Public
# License or the Artistic License, as specified in the Perl README file.
=head1 NAME
DBI::Gofer::Serializer::DataDumper - Gofer serialization using DataDumper
=head1 SYNOPSIS
$serializer = DBI::Gofer::Serializer::DataDumper->new();
$string = $serializer->serialize( $data );
=head1 DESCRIPTION
Uses DataDumper to serialize. Deserialization is not supported.
The output of this class is only meant for human consumption.
See also L<DBI::Gofer::Serializer::Base>.
=cut
use Data::Dumper;
use base qw(DBI::Gofer::Serializer::Base);
sub serialize {
my $self = shift;
local $Data::Dumper::Indent = 1;
local $Data::Dumper::Terse = 1;
local $Data::Dumper::Useqq = 0; # enabling this disables xs
local $Data::Dumper::Sortkeys = 1;
local $Data::Dumper::Quotekeys = 0;
local $Data::Dumper::Deparse = 0;
local $Data::Dumper::Purity = 0;
my $frozen = Data::Dumper::Dumper(shift);
return $frozen unless wantarray;
return ($frozen, $self->{deserializer_class});
}
1;

View File

@@ -0,0 +1,60 @@
package DBI::Gofer::Serializer::Storable;
use strict;
use warnings;
use base qw(DBI::Gofer::Serializer::Base);
# $Id: Storable.pm 15585 2013-03-22 20:31:22Z Tim $
#
# Copyright (c) 2007, Tim Bunce, Ireland
#
# You may distribute under the terms of either the GNU General Public
# License or the Artistic License, as specified in the Perl README file.
=head1 NAME
DBI::Gofer::Serializer::Storable - Gofer serialization using Storable
=head1 SYNOPSIS
$serializer = DBI::Gofer::Serializer::Storable->new();
$string = $serializer->serialize( $data );
($string, $deserializer_class) = $serializer->serialize( $data );
$data = $serializer->deserialize( $string );
=head1 DESCRIPTION
Uses Storable::nfreeze() to serialize and Storable::thaw() to deserialize.
The serialize() method sets local $Storable::forgive_me = 1; so it doesn't
croak if it encounters any data types that can't be serialized, such as code refs.
See also L<DBI::Gofer::Serializer::Base>.
=cut
use Storable qw(nfreeze thaw);
our $VERSION = "0.015586";
use base qw(DBI::Gofer::Serializer::Base);
sub serialize {
my $self = shift;
local $Storable::forgive_me = 1; # for CODE refs etc
local $Storable::canonical = 1; # for go_cache
my $frozen = nfreeze(shift);
return $frozen unless wantarray;
return ($frozen, $self->{deserializer_class});
}
sub deserialize {
my $self = shift;
return thaw(shift);
}
1;

View File

@@ -0,0 +1,174 @@
package DBI::Gofer::Transport::Base;
# $Id: Base.pm 12536 2009-02-24 22:37:09Z Tim $
#
# Copyright (c) 2007, Tim Bunce, Ireland
#
# You may distribute under the terms of either the GNU General Public
# License or the Artistic License, as specified in the Perl README file.
use strict;
use warnings;
use DBI;
use base qw(DBI::Util::_accessor);
use DBI::Gofer::Serializer::Storable;
use DBI::Gofer::Serializer::DataDumper;
our $VERSION = "0.012537";
__PACKAGE__->mk_accessors(qw(
trace
keep_meta_frozen
serializer_obj
));
# see also $ENV{DBI_GOFER_TRACE} in DBI::Gofer::Execute
sub _init_trace { (split(/=/,$ENV{DBI_GOFER_TRACE}||0))[0] }
sub new {
my ($class, $args) = @_;
$args->{trace} ||= $class->_init_trace;
$args->{serializer_obj} ||= DBI::Gofer::Serializer::Storable->new();
my $self = bless {}, $class;
$self->$_( $args->{$_} ) for keys %$args;
$self->trace_msg("$class->new({ @{[ %$args ]} })\n") if $self->trace;
return $self;
}
my $packet_header_text = "GoFER1:";
my $packet_header_regex = qr/^GoFER(\d+):/;
sub _freeze_data {
my ($self, $data, $serializer, $skip_trace) = @_;
my $frozen = eval {
$self->_dump("freezing $self->{trace} ".ref($data), $data)
if !$skip_trace and $self->trace;
local $data->{meta}; # don't include meta in serialization
$serializer ||= $self->{serializer_obj};
my ($data, $deserializer_class) = $serializer->serialize($data);
$packet_header_text . $data;
};
if ($@) {
chomp $@;
die "Error freezing ".ref($data)." object: $@";
}
# stash the frozen data into the data structure itself
# to make life easy for the client caching code in DBD::Gofer::Transport::Base
$data->{meta}{frozen} = $frozen if $self->keep_meta_frozen;
return $frozen;
}
# public aliases used by subclasses
*freeze_request = \&_freeze_data;
*freeze_response = \&_freeze_data;
sub _thaw_data {
my ($self, $frozen_data, $serializer, $skip_trace) = @_;
my $data;
eval {
# check for and extract our gofer header and the info it contains
(my $frozen = $frozen_data) =~ s/$packet_header_regex//o
or die "does not have gofer header\n";
my ($t_version) = $1;
$serializer ||= $self->{serializer_obj};
$data = $serializer->deserialize($frozen);
die ref($serializer)."->deserialize didn't return a reference"
unless ref $data;
$data->{_transport}{version} = $t_version;
$data->{meta}{frozen} = $frozen_data if $self->keep_meta_frozen;
};
if ($@) {
chomp(my $err = $@);
# remove extra noise from Storable
$err =~ s{ at \S+?/Storable.pm \(autosplit into \S+?/Storable/thaw.al\) line \d+(, \S+ line \d+)?}{};
my $msg = sprintf "Error thawing: %s (data=%s)", $err, DBI::neat($frozen_data,50);
Carp::cluck("$msg, pid $$ stack trace follows:"); # XXX if $self->trace;
die $msg;
}
$self->_dump("thawing $self->{trace} ".ref($data), $data)
if !$skip_trace and $self->trace;
return $data;
}
# public aliases used by subclasses
*thaw_request = \&_thaw_data;
*thaw_response = \&_thaw_data;
# this should probably live in the request and response classes
# and the tace level passed in
sub _dump {
my ($self, $label, $data) = @_;
# don't dump the binary
local $data->{meta}{frozen} if $data->{meta} && $data->{meta}{frozen};
my $trace_level = $self->trace;
my $summary;
if ($trace_level >= 4) {
require Data::Dumper;
local $Data::Dumper::Indent = 1;
local $Data::Dumper::Terse = 1;
local $Data::Dumper::Useqq = 0;
local $Data::Dumper::Sortkeys = 1;
local $Data::Dumper::Quotekeys = 0;
local $Data::Dumper::Deparse = 0;
local $Data::Dumper::Purity = 0;
$summary = Data::Dumper::Dumper($data);
}
elsif ($trace_level >= 2) {
$summary = eval { $data->summary_as_text } || $@ || "no summary available\n";
}
else {
$summary = eval { $data->outline_as_text."\n" } || $@ || "no summary available\n";
}
$self->trace_msg("$label: $summary");
}
sub trace_msg {
my ($self, $msg, $min_level) = @_;
$min_level = 1 unless defined $min_level;
# transport trace level can override DBI's trace level
$min_level = 0 if $self->trace >= $min_level;
return DBI->trace_msg("gofer ".$msg, $min_level);
}
1;
=head1 NAME
DBI::Gofer::Transport::Base - Base class for Gofer transports
=head1 DESCRIPTION
This is the base class for server-side Gofer transports.
It's also the base class for the client-side base class L<DBD::Gofer::Transport::Base>.
This is an internal class.
=head1 AUTHOR
Tim Bunce, L<http://www.tim.bunce.name>
=head1 LICENCE AND COPYRIGHT
Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
This module is free software; you can redistribute it and/or
modify it under the same terms as Perl itself. See L<perlartistic>.
=cut

View File

@@ -0,0 +1,64 @@
package DBI::Gofer::Transport::pipeone;
# $Id: pipeone.pm 12536 2009-02-24 22:37:09Z Tim $
#
# Copyright (c) 2007, Tim Bunce, Ireland
#
# You may distribute under the terms of either the GNU General Public
# License or the Artistic License, as specified in the Perl README file.
use strict;
use warnings;
use DBI::Gofer::Execute;
use base qw(DBI::Gofer::Transport::Base Exporter);
our $VERSION = "0.012537";
our @EXPORT = qw(run_one_stdio);
my $executor = DBI::Gofer::Execute->new();
sub run_one_stdio {
binmode STDIN;
binmode STDOUT;
my $transport = DBI::Gofer::Transport::pipeone->new();
my $frozen_request = do { local $/; <STDIN> };
my $response = $executor->execute_request( $transport->thaw_request($frozen_request) );
my $frozen_response = $transport->freeze_response($response);
print $frozen_response;
# no point calling $executor->update_stats(...) for pipeONE
}
1;
__END__
=head1 NAME
DBI::Gofer::Transport::pipeone - DBD::Gofer server-side transport for pipeone
=head1 SYNOPSIS
See L<DBD::Gofer::Transport::pipeone>.
=head1 AUTHOR
Tim Bunce, L<http://www.tim.bunce.name>
=head1 LICENCE AND COPYRIGHT
Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
This module is free software; you can redistribute it and/or
modify it under the same terms as Perl itself. See L<perlartistic>.
=cut

View File

@@ -0,0 +1,76 @@
package DBI::Gofer::Transport::stream;
# $Id: stream.pm 12536 2009-02-24 22:37:09Z Tim $
#
# Copyright (c) 2007, Tim Bunce, Ireland
#
# You may distribute under the terms of either the GNU General Public
# License or the Artistic License, as specified in the Perl README file.
use strict;
use warnings;
use DBI qw(dbi_time);
use DBI::Gofer::Execute;
use base qw(DBI::Gofer::Transport::pipeone Exporter);
our $VERSION = "0.012537";
our @EXPORT = qw(run_stdio_hex);
my $executor = DBI::Gofer::Execute->new();
sub run_stdio_hex {
my $transport = DBI::Gofer::Transport::stream->new();
local $| = 1;
DBI->trace_msg("$0 started (pid $$)\n");
local $\; # OUTPUT_RECORD_SEPARATOR
local $/ = "\012"; # INPUT_RECORD_SEPARATOR
while ( defined( my $encoded_request = <STDIN> ) ) {
my $time_received = dbi_time();
$encoded_request =~ s/\015?\012$//;
my $frozen_request = pack "H*", $encoded_request;
my $request = $transport->thaw_request( $frozen_request );
my $response = $executor->execute_request( $request );
my $frozen_response = $transport->freeze_response($response);
my $encoded_response = unpack "H*", $frozen_response;
print $encoded_response, "\015\012"; # autoflushed due to $|=1
# there's no way to access the stats currently
# so this just serves as a basic test and illustration of update_stats()
$executor->update_stats($request, $response, $frozen_request, $frozen_response, $time_received, 1);
}
DBI->trace_msg("$0 ending (pid $$)\n");
}
1;
__END__
=head1 NAME
DBI::Gofer::Transport::stream - DBD::Gofer server-side transport for stream
=head1 SYNOPSIS
See L<DBD::Gofer::Transport::stream>.
=head1 AUTHOR
Tim Bunce, L<http://www.tim.bunce.name>
=head1 LICENCE AND COPYRIGHT
Copyright (c) 2007, Tim Bunce, Ireland. All rights reserved.
This module is free software; you can redistribute it and/or
modify it under the same terms as Perl itself. See L<perlartistic>.
=cut

954
database/perl/vendor/lib/DBI/Profile.pm vendored Normal file
View File

@@ -0,0 +1,954 @@
package DBI::Profile;
=head1 NAME
DBI::Profile - Performance profiling and benchmarking for the DBI
=head1 SYNOPSIS
The easiest way to enable DBI profiling is to set the DBI_PROFILE
environment variable to 2 and then run your code as usual:
DBI_PROFILE=2 prog.pl
This will profile your program and then output a textual summary
grouped by query when the program exits. You can also enable profiling by
setting the Profile attribute of any DBI handle:
$dbh->{Profile} = 2;
Then the summary will be printed when the handle is destroyed.
Many other values apart from are possible - see L<"ENABLING A PROFILE"> below.
=head1 DESCRIPTION
The DBI::Profile module provides a simple interface to collect and
report performance and benchmarking data from the DBI.
For a more elaborate interface, suitable for larger programs, see
L<DBI::ProfileDumper|DBI::ProfileDumper> and L<dbiprof|dbiprof>.
For Apache/mod_perl applications see
L<DBI::ProfileDumper::Apache|DBI::ProfileDumper::Apache>.
=head1 OVERVIEW
Performance data collection for the DBI is built around several
concepts which are important to understand clearly.
=over 4
=item Method Dispatch
Every method call on a DBI handle passes through a single 'dispatch'
function which manages all the common aspects of DBI method calls,
such as handling the RaiseError attribute.
=item Data Collection
If profiling is enabled for a handle then the dispatch code takes
a high-resolution timestamp soon after it is entered. Then, after
calling the appropriate method and just before returning, it takes
another high-resolution timestamp and calls a function to record
the information. That function is passed the two timestamps
plus the DBI handle and the name of the method that was called.
That data about a single DBI method call is called a I<profile sample>.
=item Data Filtering
If the method call was invoked by the DBI or by a driver then the call is
ignored for profiling because the time spent will be accounted for by the
original 'outermost' call for your code.
For example, the calls that the selectrow_arrayref() method makes
to prepare() and execute() etc. are not counted individually
because the time spent in those methods is going to be allocated
to the selectrow_arrayref() method when it returns. If this was not
done then it would be very easy to double count time spent inside
the DBI.
=item Data Storage Tree
The profile data is accumulated as 'leaves on a tree'. The 'path' through the
branches of the tree to a particular leaf is determined dynamically for each sample.
This is a key feature of DBI profiling.
For each profiled method call the DBI walks along the Path and uses each value
in the Path to step into and grow the Data tree.
For example, if the Path is
[ 'foo', 'bar', 'baz' ]
then the new profile sample data will be I<merged> into the tree at
$h->{Profile}->{Data}->{foo}->{bar}->{baz}
But it's not very useful to merge all the call data into one leaf node (except
to get an overall 'time spent inside the DBI' total). It's more common to want
the Path to include dynamic values such as the current statement text and/or
the name of the method called to show what the time spent inside the DBI was for.
The Path can contain some 'magic cookie' values that are automatically replaced
by corresponding dynamic values when they're used. These magic cookies always
start with a punctuation character.
For example a value of 'C<!MethodName>' in the Path causes the corresponding
entry in the Data to be the name of the method that was called.
For example, if the Path was:
[ 'foo', '!MethodName', 'bar' ]
and the selectall_arrayref() method was called, then the profile sample data
for that call will be merged into the tree at:
$h->{Profile}->{Data}->{foo}->{selectall_arrayref}->{bar}
=item Profile Data
Profile data is stored at the 'leaves' of the tree as references
to an array of numeric values. For example:
[
106, # 0: count of samples at this node
0.0312958955764771, # 1: total duration
0.000490069389343262, # 2: first duration
0.000176072120666504, # 3: shortest duration
0.00140702724456787, # 4: longest duration
1023115819.83019, # 5: time of first sample
1023115819.86576, # 6: time of last sample
]
After the first sample, later samples always update elements 0, 1, and 6, and
may update 3 or 4 depending on the duration of the sampled call.
=back
=head1 ENABLING A PROFILE
Profiling is enabled for a handle by assigning to the Profile
attribute. For example:
$h->{Profile} = DBI::Profile->new();
The Profile attribute holds a blessed reference to a hash object
that contains the profile data and attributes relating to it.
The class the Profile object is blessed into is expected to
provide at least a DESTROY method which will dump the profile data
to the DBI trace file handle (STDERR by default).
All these examples have the same effect as each other:
$h->{Profile} = 0;
$h->{Profile} = "/DBI::Profile";
$h->{Profile} = DBI::Profile->new();
$h->{Profile} = {};
$h->{Profile} = { Path => [] };
Similarly, these examples have the same effect as each other:
$h->{Profile} = 6;
$h->{Profile} = "6/DBI::Profile";
$h->{Profile} = "!Statement:!MethodName/DBI::Profile";
$h->{Profile} = { Path => [ '!Statement', '!MethodName' ] };
If a non-blessed hash reference is given then the DBI::Profile
module is automatically C<require>'d and the reference is blessed
into that class.
If a string is given then it is processed like this:
($path, $module, $args) = split /\//, $string, 3
@path = split /:/, $path
@args = split /:/, $args
eval "require $module" if $module
$module ||= "DBI::Profile"
$module->new( Path => \@Path, @args )
So the first value is used to select the Path to be used (see below).
The second value, if present, is used as the name of a module which
will be loaded and it's C<new> method called. If not present it
defaults to DBI::Profile. Any other values are passed as arguments
to the C<new> method. For example: "C<2/DBIx::OtherProfile/Foo:42>".
Numbers can be used as a shorthand way to enable common Path values.
The simplest way to explain how the values are interpreted is to show the code:
push @Path, "DBI" if $path_elem & 0x01;
push @Path, "!Statement" if $path_elem & 0x02;
push @Path, "!MethodName" if $path_elem & 0x04;
push @Path, "!MethodClass" if $path_elem & 0x08;
push @Path, "!Caller2" if $path_elem & 0x10;
So "2" is the same as "!Statement" and "6" (2+4) is the same as
"!Statement:!Method". Those are the two most commonly used values. Using a
negative number will reverse the path. Thus "-6" will group by method name then
statement.
The splitting and parsing of string values assigned to the Profile
attribute may seem a little odd, but there's a good reason for it.
Remember that attributes can be embedded in the Data Source Name
string which can be passed in to a script as a parameter. For
example:
dbi:DriverName(Profile=>2):dbname
dbi:DriverName(Profile=>{Username}:!Statement/MyProfiler/Foo:42):dbname
And also, if the C<DBI_PROFILE> environment variable is set then
The DBI arranges for every driver handle to share the same profile
object. When perl exits a single profile summary will be generated
that reflects (as nearly as practical) the total use of the DBI by
the application.
=head1 THE PROFILE OBJECT
The DBI core expects the Profile attribute value to be a hash
reference and if the following values don't exist it will create
them as needed:
=head2 Data
A reference to a hash containing the collected profile data.
=head2 Path
The Path value is a reference to an array. Each element controls the
value to use at the corresponding level of the profile Data tree.
If the value of Path is anything other than an array reference,
it is treated as if it was:
[ '!Statement' ]
The elements of Path array can be one of the following types:
=head3 Special Constant
B<!Statement>
Use the current Statement text. Typically that's the value of the Statement
attribute for the handle the method was called with. Some methods, like
commit() and rollback(), are unrelated to a particular statement. For those
methods !Statement records an empty string.
For statement handles this is always simply the string that was
given to prepare() when the handle was created. For database handles
this is the statement that was last prepared or executed on that
database handle. That can lead to a little 'fuzzyness' because, for
example, calls to the quote() method to build a new statement will
typically be associated with the previous statement. In practice
this isn't a significant issue and the dynamic Path mechanism can
be used to setup your own rules.
B<!MethodName>
Use the name of the DBI method that the profile sample relates to.
B<!MethodClass>
Use the fully qualified name of the DBI method, including
the package, that the profile sample relates to. This shows you
where the method was implemented. For example:
'DBD::_::db::selectrow_arrayref' =>
0.022902s
'DBD::mysql::db::selectrow_arrayref' =>
2.244521s / 99 = 0.022445s avg (first 0.022813s, min 0.022051s, max 0.028932s)
The "DBD::_::db::selectrow_arrayref" shows that the driver has
inherited the selectrow_arrayref method provided by the DBI.
But you'll note that there is only one call to
DBD::_::db::selectrow_arrayref but another 99 to
DBD::mysql::db::selectrow_arrayref. Currently the first
call doesn't record the true location. That may change.
B<!Caller>
Use a string showing the filename and line number of the code calling the method.
B<!Caller2>
Use a string showing the filename and line number of the code calling the
method, as for !Caller, but also include filename and line number of the code
that called that. Calls from DBI:: and DBD:: packages are skipped.
B<!File>
Same as !Caller above except that only the filename is included, not the line number.
B<!File2>
Same as !Caller2 above except that only the filenames are included, not the line number.
B<!Time>
Use the current value of time(). Rarely used. See the more useful C<!Time~N> below.
B<!Time~N>
Where C<N> is an integer. Use the current value of time() but with reduced precision.
The value used is determined in this way:
int( time() / N ) * N
This is a useful way to segregate a profile into time slots. For example:
[ '!Time~60', '!Statement' ]
=head3 Code Reference
The subroutine is passed the handle it was called on and the DBI method name.
The current Statement is in $_. The statement string should not be modified,
so most subs start with C<local $_ = $_;>.
The list of values it returns is used at that point in the Profile Path.
Any undefined values are treated as the string "C<undef>".
The sub can 'veto' (reject) a profile sample by including a reference to undef
(C<\undef>) in the returned list. That can be useful when you want to only profile
statements that match a certain pattern, or only profile certain methods.
=head3 Subroutine Specifier
A Path element that begins with 'C<&>' is treated as the name of a subroutine
in the DBI::ProfileSubs namespace and replaced with the corresponding code reference.
Currently this only works when the Path is specified by the C<DBI_PROFILE>
environment variable.
Also, currently, the only subroutine in the DBI::ProfileSubs namespace is
C<'&norm_std_n3'>. That's a very handy subroutine when profiling code that
doesn't use placeholders. See L<DBI::ProfileSubs> for more information.
=head3 Attribute Specifier
A string enclosed in braces, such as 'C<{Username}>', specifies that the current
value of the corresponding database handle attribute should be used at that
point in the Path.
=head3 Reference to a Scalar
Specifies that the current value of the referenced scalar be used at that point
in the Path. This provides an efficient way to get 'contextual' values into
your profile.
=head3 Other Values
Any other values are stringified and used literally.
(References, and values that begin with punctuation characters are reserved.)
=head1 REPORTING
=head2 Report Format
The current accumulated profile data can be formatted and output using
print $h->{Profile}->format;
To discard the profile data and start collecting fresh data
you can do:
$h->{Profile}->{Data} = undef;
The default results format looks like this:
DBI::Profile: 0.001015s 42.7% (5 calls) programname @ YYYY-MM-DD HH:MM:SS
'' =>
0.000024s / 2 = 0.000012s avg (first 0.000015s, min 0.000009s, max 0.000015s)
'SELECT mode,size,name FROM table' =>
0.000991s / 3 = 0.000330s avg (first 0.000678s, min 0.000009s, max 0.000678s)
Which shows the total time spent inside the DBI, with a count of
the total number of method calls and the name of the script being
run, then a formatted version of the profile data tree.
If the results are being formatted when the perl process is exiting
(which is usually the case when the DBI_PROFILE environment variable
is used) then the percentage of time the process spent inside the
DBI is also shown. If the process is not exiting then the percentage is
calculated using the time between the first and last call to the DBI.
In the example above the paths in the tree are only one level deep and
use the Statement text as the value (that's the default behaviour).
The merged profile data at the 'leaves' of the tree are presented
as total time spent, count, average time spent (which is simply total
time divided by the count), then the time spent on the first call,
the time spent on the fastest call, and finally the time spent on
the slowest call.
The 'avg', 'first', 'min' and 'max' times are not particularly
useful when the profile data path only contains the statement text.
Here's an extract of a more detailed example using both statement
text and method name in the path:
'SELECT mode,size,name FROM table' =>
'FETCH' =>
0.000076s
'fetchrow_hashref' =>
0.036203s / 108 = 0.000335s avg (first 0.000490s, min 0.000152s, max 0.002786s)
Here you can see the 'avg', 'first', 'min' and 'max' for the
108 calls to fetchrow_hashref() become rather more interesting.
Also the data for FETCH just shows a time value because it was only
called once.
Currently the profile data is output sorted by branch names. That
may change in a later version so the leaf nodes are sorted by total
time per leaf node.
=head2 Report Destination
The default method of reporting is for the DESTROY method of the
Profile object to format the results and write them using:
DBI->trace_msg($results, 0); # see $ON_DESTROY_DUMP below
to write them to the DBI trace() filehandle (which defaults to
STDERR). To direct the DBI trace filehandle to write to a file
without enabling tracing the trace() method can be called with a
trace level of 0. For example:
DBI->trace(0, $filename);
The same effect can be achieved without changing the code by
setting the C<DBI_TRACE> environment variable to C<0=filename>.
The $DBI::Profile::ON_DESTROY_DUMP variable holds a code ref
that's called to perform the output of the formatted results.
The default value is:
$ON_DESTROY_DUMP = sub { DBI->trace_msg($results, 0) };
Apart from making it easy to send the dump elsewhere, it can also
be useful as a simple way to disable dumping results.
=head1 CHILD HANDLES
Child handles inherit a reference to the Profile attribute value
of their parent. So if profiling is enabled for a database handle
then by default the statement handles created from it all contribute
to the same merged profile data tree.
=head1 PROFILE OBJECT METHODS
=head2 format
See L</REPORTING>.
=head2 as_node_path_list
@ary = $dbh->{Profile}->as_node_path_list();
@ary = $dbh->{Profile}->as_node_path_list($node, $path);
Returns the collected data ($dbh->{Profile}{Data}) restructured into a list of
array refs, one for each leaf node in the Data tree. This 'flat' structure is
often much simpler for applications to work with.
The first element of each array ref is a reference to the leaf node.
The remaining elements are the 'path' through the data tree to that node.
For example, given a data tree like this:
{key1a}{key2a}[node1]
{key1a}{key2b}[node2]
{key1b}{key2a}{key3a}[node3]
The as_node_path_list() method will return this list:
[ [node1], 'key1a', 'key2a' ]
[ [node2], 'key1a', 'key2b' ]
[ [node3], 'key1b', 'key2a', 'key3a' ]
The nodes are ordered by key, depth-first.
The $node argument can be used to focus on a sub-tree.
If not specified it defaults to $dbh->{Profile}{Data}.
The $path argument can be used to specify a list of path elements that will be
added to each element of the returned list. If not specified it defaults to a
ref to an empty array.
=head2 as_text
@txt = $dbh->{Profile}->as_text();
$txt = $dbh->{Profile}->as_text({
node => undef,
path => [],
separator => " > ",
format => '%1$s: %11$fs / %10$d = %2$fs avg (first %12$fs, min %13$fs, max %14$fs)'."\n";
sortsub => sub { ... },
);
Returns the collected data ($dbh->{Profile}{Data}) reformatted into a list of formatted strings.
In scalar context the list is returned as a single concatenated string.
A hashref can be used to pass in arguments, the default values are shown in the example above.
The C<node> and <path> arguments are passed to as_node_path_list().
The C<separator> argument is used to join the elements of the path for each leaf node.
The C<sortsub> argument is used to pass in a ref to a sub that will order the list.
The subroutine will be passed a reference to the array returned by
as_node_path_list() and should sort the contents of the array in place.
The return value from the sub is ignored. For example, to sort the nodes by the
second level key you could use:
sortsub => sub { my $ary=shift; @$ary = sort { $a->[2] cmp $b->[2] } @$ary }
The C<format> argument is a C<sprintf> format string that specifies the format
to use for each leaf node. It uses the explicit format parameter index
mechanism to specify which of the arguments should appear where in the string.
The arguments to sprintf are:
1: path to node, joined with the separator
2: average duration (total duration/count)
(3 thru 9 are currently unused)
10: count
11: total duration
12: first duration
13: smallest duration
14: largest duration
15: time of first call
16: time of first call
=head1 CUSTOM DATA MANIPULATION
Recall that C<< $h->{Profile}->{Data} >> is a reference to the collected data.
Either to a 'leaf' array (when the Path is empty, i.e., DBI_PROFILE env var is 1),
or a reference to hash containing values that are either further hash
references or leaf array references.
Sometimes it's useful to be able to summarise some or all of the collected data.
The dbi_profile_merge_nodes() function can be used to merge leaf node values.
=head2 dbi_profile_merge_nodes
use DBI qw(dbi_profile_merge_nodes);
$time_in_dbi = dbi_profile_merge_nodes(my $totals=[], @$leaves);
Merges profile data node. Given a reference to a destination array, and zero or
more references to profile data, merges the profile data into the destination array.
For example:
$time_in_dbi = dbi_profile_merge_nodes(
my $totals=[],
[ 10, 0.51, 0.11, 0.01, 0.22, 1023110000, 1023110010 ],
[ 15, 0.42, 0.12, 0.02, 0.23, 1023110005, 1023110009 ],
);
$totals will then contain
[ 25, 0.93, 0.11, 0.01, 0.23, 1023110000, 1023110010 ]
and $time_in_dbi will be 0.93;
The second argument need not be just leaf nodes. If given a reference to a hash
then the hash is recursively searched for leaf nodes and all those found
are merged.
For example, to get the time spent 'inside' the DBI during an http request,
your logging code run at the end of the request (i.e. mod_perl LogHandler)
could use:
my $time_in_dbi = 0;
if (my $Profile = $dbh->{Profile}) { # if DBI profiling is enabled
$time_in_dbi = dbi_profile_merge_nodes(my $total=[], $Profile->{Data});
$Profile->{Data} = {}; # reset the profile data
}
If profiling has been enabled then $time_in_dbi will hold the time spent inside
the DBI for that handle (and any other handles that share the same profile data)
since the last request.
Prior to DBI 1.56 the dbi_profile_merge_nodes() function was called dbi_profile_merge().
That name still exists as an alias.
=head1 CUSTOM DATA COLLECTION
=head2 Using The Path Attribute
XXX example to be added later using a selectall_arrayref call
XXX nested inside a fetch loop where the first column of the
XXX outer loop is bound to the profile Path using
XXX bind_column(1, \${ $dbh->{Profile}->{Path}->[0] })
XXX so you end up with separate profiles for each loop
XXX (patches welcome to add this to the docs :)
=head2 Adding Your Own Samples
The dbi_profile() function can be used to add extra sample data
into the profile data tree. For example:
use DBI;
use DBI::Profile (dbi_profile dbi_time);
my $t1 = dbi_time(); # floating point high-resolution time
... execute code you want to profile here ...
my $t2 = dbi_time();
dbi_profile($h, $statement, $method, $t1, $t2);
The $h parameter is the handle the extra profile sample should be
associated with. The $statement parameter is the string to use where
the Path specifies !Statement. If $statement is undef
then $h->{Statement} will be used. Similarly $method is the string
to use if the Path specifies !MethodName. There is no
default value for $method.
The $h->{Profile}{Path} attribute is processed by dbi_profile() in
the usual way.
The $h parameter is usually a DBI handle but it can also be a reference to a
hash, in which case the dbi_profile() acts on each defined value in the hash.
This is an efficient way to update multiple profiles with a single sample,
and is used by the L<DashProfiler> module.
=head1 SUBCLASSING
Alternate profile modules must subclass DBI::Profile to help ensure
they work with future versions of the DBI.
=head1 CAVEATS
Applications which generate many different statement strings
(typically because they don't use placeholders) and profile with
!Statement in the Path (the default) will consume memory
in the Profile Data structure for each statement. Use a code ref
in the Path to return an edited (simplified) form of the statement.
If a method throws an exception itself (not via RaiseError) then
it won't be counted in the profile.
If a HandleError subroutine throws an exception (rather than returning
0 and letting RaiseError do it) then the method call won't be counted
in the profile.
Time spent in DESTROY is added to the profile of the parent handle.
Time spent in DBI->*() methods is not counted. The time spent in
the driver connect method, $drh->connect(), when it's called by
DBI->connect is counted if the DBI_PROFILE environment variable is set.
Time spent fetching tied variables, $DBI::errstr, is counted.
Time spent in FETCH for $h->{Profile} is not counted, so getting the profile
data doesn't alter it.
DBI::PurePerl does not support profiling (though it could in theory).
For asynchronous queries, time spent while the query is running on the
backend is not counted.
A few platforms don't support the gettimeofday() high resolution
time function used by the DBI (and available via the dbi_time() function).
In which case you'll get integer resolution time which is mostly useless.
On Windows platforms the dbi_time() function is limited to millisecond
resolution. Which isn't sufficiently fine for our needs, but still
much better than integer resolution. This limited resolution means
that fast method calls will often register as taking 0 time. And
timings in general will have much more 'jitter' depending on where
within the 'current millisecond' the start and end timing was taken.
This documentation could be more clear. Probably needs to be reordered
to start with several examples and build from there. Trying to
explain the concepts first seems painful and to lead to just as
many forward references. (Patches welcome!)
=cut
use strict;
use vars qw(@ISA @EXPORT @EXPORT_OK $VERSION);
use Exporter ();
use UNIVERSAL ();
use Carp;
use DBI qw(dbi_time dbi_profile dbi_profile_merge_nodes dbi_profile_merge);
$VERSION = "2.015065";
@ISA = qw(Exporter);
@EXPORT = qw(
DBIprofile_Statement
DBIprofile_MethodName
DBIprofile_MethodClass
dbi_profile
dbi_profile_merge_nodes
dbi_profile_merge
dbi_time
);
@EXPORT_OK = qw(
format_profile_thingy
);
use constant DBIprofile_Statement => '!Statement';
use constant DBIprofile_MethodName => '!MethodName';
use constant DBIprofile_MethodClass => '!MethodClass';
our $ON_DESTROY_DUMP = sub { DBI->trace_msg(shift, 0) };
our $ON_FLUSH_DUMP = sub { DBI->trace_msg(shift, 0) };
sub new {
my $class = shift;
my $profile = { @_ };
return bless $profile => $class;
}
sub _auto_new {
my $class = shift;
my ($arg) = @_;
# This sub is called by DBI internals when a non-hash-ref is
# assigned to the Profile attribute. For example
# dbi:mysql(RaiseError=>1,Profile=>!Statement:!MethodName/DBIx::MyProfile/arg1:arg2):dbname
# This sub works out what to do and returns a suitable hash ref.
$arg =~ s/^DBI::/2\/DBI::/
and carp "Automatically changed old-style DBI::Profile specification to $arg";
# it's a path/module/k1:v1:k2:v2:... list
my ($path, $package, $args) = split /\//, $arg, 3;
my @args = (defined $args) ? split(/:/, $args, -1) : ();
my @Path;
for my $element (split /:/, $path) {
if (DBI::looks_like_number($element)) {
my $reverse = ($element < 0) ? ($element=-$element, 1) : 0;
my @p;
# a single "DBI" is special-cased in format()
push @p, "DBI" if $element & 0x01;
push @p, DBIprofile_Statement if $element & 0x02;
push @p, DBIprofile_MethodName if $element & 0x04;
push @p, DBIprofile_MethodClass if $element & 0x08;
push @p, '!Caller2' if $element & 0x10;
push @Path, ($reverse ? reverse @p : @p);
}
elsif ($element =~ m/^&(\w.*)/) {
my $name = "DBI::ProfileSubs::$1"; # capture $1 early
require DBI::ProfileSubs;
my $code = do { no strict; *{$name}{CODE} };
if (defined $code) {
push @Path, $code;
}
else {
warn "$name: subroutine not found\n";
push @Path, $element;
}
}
else {
push @Path, $element;
}
}
eval "require $package" if $package; # silently ignores errors
$package ||= $class;
return $package->new(Path => \@Path, @args);
}
sub empty { # empty out profile data
my $self = shift;
DBI->trace_msg("profile data discarded\n",0) if $self->{Trace};
$self->{Data} = undef;
}
sub filename { # baseclass method, see DBI::ProfileDumper
return undef;
}
sub flush_to_disk { # baseclass method, see DBI::ProfileDumper & DashProfiler::Core
my $self = shift;
return unless $ON_FLUSH_DUMP;
return unless $self->{Data};
my $detail = $self->format();
$ON_FLUSH_DUMP->($detail) if $detail;
}
sub as_node_path_list {
my ($self, $node, $path) = @_;
# convert the tree into an array of arrays
# from
# {key1a}{key2a}[node1]
# {key1a}{key2b}[node2]
# {key1b}{key2a}{key3a}[node3]
# to
# [ [node1], 'key1a', 'key2a' ]
# [ [node2], 'key1a', 'key2b' ]
# [ [node3], 'key1b', 'key2a', 'key3a' ]
$node ||= $self->{Data} or return;
$path ||= [];
if (ref $node eq 'HASH') { # recurse
$path = [ @$path, undef ];
return map {
$path->[-1] = $_;
($node->{$_}) ? $self->as_node_path_list($node->{$_}, $path) : ()
} sort keys %$node;
}
return [ $node, @$path ];
}
sub as_text {
my ($self, $args_ref) = @_;
my $separator = $args_ref->{separator} || " > ";
my $format_path_element = $args_ref->{format_path_element}
|| "%s"; # or e.g., " key%2$d='%s'"
my $format = $args_ref->{format}
|| '%1$s: %11$fs / %10$d = %2$fs avg (first %12$fs, min %13$fs, max %14$fs)'."\n";
my @node_path_list = $self->as_node_path_list(undef, $args_ref->{path});
$args_ref->{sortsub}->(\@node_path_list) if $args_ref->{sortsub};
my $eval = "qr/".quotemeta($separator)."/";
my $separator_re = eval($eval) || quotemeta($separator);
#warn "[$eval] = [$separator_re]";
my @text;
my @spare_slots = (undef) x 7;
for my $node_path (@node_path_list) {
my ($node, @path) = @$node_path;
my $idx = 0;
for (@path) {
s/[\r\n]+/ /g;
s/$separator_re/ /g;
++$idx;
if ($format_path_element eq "%s") {
$_ = sprintf $format_path_element, $_;
} else {
$_ = sprintf $format_path_element, $_, $idx;
}
}
push @text, sprintf $format,
join($separator, @path), # 1=path
($node->[0] ? $node->[1]/$node->[0] : 0), # 2=avg
@spare_slots,
@$node; # 10=count, 11=dur, 12=first_dur, 13=min, 14=max, 15=first_called, 16=last_called
}
return @text if wantarray;
return join "", @text;
}
sub format {
my $self = shift;
my $class = ref($self) || $self;
my $prologue = "$class: ";
my $detail = $self->format_profile_thingy(
$self->{Data}, 0, " ",
my $path = [],
my $leaves = [],
)."\n";
if (@$leaves) {
dbi_profile_merge_nodes(my $totals=[], @$leaves);
my ($count, $time_in_dbi, undef, undef, undef, $t1, $t2) = @$totals;
(my $progname = $0) =~ s:.*/::;
if ($count) {
$prologue .= sprintf "%fs ", $time_in_dbi;
my $perl_time = ($DBI::PERL_ENDING) ? time() - $^T : $t2-$t1;
$prologue .= sprintf "%.2f%% ", $time_in_dbi/$perl_time*100 if $perl_time;
my @lt = localtime(time);
my $ts = sprintf "%d-%02d-%02d %02d:%02d:%02d",
1900+$lt[5], $lt[4]+1, @lt[3,2,1,0];
$prologue .= sprintf "(%d calls) $progname \@ $ts\n", $count;
}
if (@$leaves == 1 && ref($self->{Data}) eq 'HASH' && $self->{Data}->{DBI}) {
$detail = ""; # hide the "DBI" from DBI_PROFILE=1
}
}
return ($prologue, $detail) if wantarray;
return $prologue.$detail;
}
sub format_profile_leaf {
my ($self, $thingy, $depth, $pad, $path, $leaves) = @_;
croak "format_profile_leaf called on non-leaf ($thingy)"
unless UNIVERSAL::isa($thingy,'ARRAY');
push @$leaves, $thingy if $leaves;
my ($count, $total_time, $first_time, $min, $max, $first_called, $last_called) = @$thingy;
return sprintf "%s%fs\n", ($pad x $depth), $total_time
if $count <= 1;
return sprintf "%s%fs / %d = %fs avg (first %fs, min %fs, max %fs)\n",
($pad x $depth), $total_time, $count, $count ? $total_time/$count : 0,
$first_time, $min, $max;
}
sub format_profile_branch {
my ($self, $thingy, $depth, $pad, $path, $leaves) = @_;
croak "format_profile_branch called on non-branch ($thingy)"
unless UNIVERSAL::isa($thingy,'HASH');
my @chunk;
my @keys = sort keys %$thingy;
while ( @keys ) {
my $k = shift @keys;
my $v = $thingy->{$k};
push @$path, $k;
push @chunk, sprintf "%s'%s' =>\n%s",
($pad x $depth), $k,
$self->format_profile_thingy($v, $depth+1, $pad, $path, $leaves);
pop @$path;
}
return join "", @chunk;
}
sub format_profile_thingy {
my ($self, $thingy, $depth, $pad, $path, $leaves) = @_;
return "undef" if not defined $thingy;
return $self->format_profile_leaf( $thingy, $depth, $pad, $path, $leaves)
if UNIVERSAL::isa($thingy,'ARRAY');
return $self->format_profile_branch($thingy, $depth, $pad, $path, $leaves)
if UNIVERSAL::isa($thingy,'HASH');
return "$thingy\n";
}
sub on_destroy {
my $self = shift;
return unless $ON_DESTROY_DUMP;
return unless $self->{Data};
my $detail = $self->format();
$ON_DESTROY_DUMP->($detail) if $detail;
$self->{Data} = undef;
}
sub DESTROY {
my $self = shift;
local $@;
DBI->trace_msg("profile data DESTROY\n",0)
if (($self->{Trace}||0) >= 2);
eval { $self->on_destroy };
if ($@) {
chomp $@;
my $class = ref($self) || $self;
DBI->trace_msg("$class on_destroy failed: $@", 0);
}
}
1;

View File

@@ -0,0 +1,736 @@
package DBI::ProfileData;
use strict;
=head1 NAME
DBI::ProfileData - manipulate DBI::ProfileDumper data dumps
=head1 SYNOPSIS
The easiest way to use this module is through the dbiprof frontend
(see L<dbiprof> for details):
dbiprof --number 15 --sort count
This module can also be used to roll your own profile analysis:
# load data from dbi.prof
$prof = DBI::ProfileData->new(File => "dbi.prof");
# get a count of the records (unique paths) in the data set
$count = $prof->count();
# sort by longest overall time
$prof->sort(field => "longest");
# sort by longest overall time, least to greatest
$prof->sort(field => "longest", reverse => 1);
# exclude records with key2 eq 'disconnect'
$prof->exclude(key2 => 'disconnect');
# exclude records with key1 matching /^UPDATE/i
$prof->exclude(key1 => qr/^UPDATE/i);
# remove all records except those where key1 matches /^SELECT/i
$prof->match(key1 => qr/^SELECT/i);
# produce a formatted report with the given number of items
$report = $prof->report(number => 10);
# clone the profile data set
$clone = $prof->clone();
# get access to hash of header values
$header = $prof->header();
# get access to sorted array of nodes
$nodes = $prof->nodes();
# format a single node in the same style as report()
$text = $prof->format($nodes->[0]);
# get access to Data hash in DBI::Profile format
$Data = $prof->Data();
=head1 DESCRIPTION
This module offers the ability to read, manipulate and format
L<DBI::ProfileDumper> profile data.
Conceptually, a profile consists of a series of records, or nodes,
each of each has a set of statistics and set of keys. Each record
must have a unique set of keys, but there is no requirement that every
record have the same number of keys.
=head1 METHODS
The following methods are supported by DBI::ProfileData objects.
=cut
our $VERSION = "2.010008";
use Carp qw(croak);
use Symbol;
use Fcntl qw(:flock);
use DBI::Profile qw(dbi_profile_merge);
# some constants for use with node data arrays
sub COUNT () { 0 };
sub TOTAL () { 1 };
sub FIRST () { 2 };
sub SHORTEST () { 3 };
sub LONGEST () { 4 };
sub FIRST_AT () { 5 };
sub LAST_AT () { 6 };
sub PATH () { 7 };
my $HAS_FLOCK = (defined $ENV{DBI_PROFILE_FLOCK})
? $ENV{DBI_PROFILE_FLOCK}
: do { local $@; eval { flock STDOUT, 0; 1 } };
=head2 $prof = DBI::ProfileData->new(File => "dbi.prof")
=head2 $prof = DBI::ProfileData->new(File => "dbi.prof", Filter => sub { ... })
=head2 $prof = DBI::ProfileData->new(Files => [ "dbi.prof.1", "dbi.prof.2" ])
Creates a new DBI::ProfileData object. Takes either a single file
through the File option or a list of Files in an array ref. If
multiple files are specified then the header data from the first file
is used.
=head3 Files
Reference to an array of file names to read.
=head3 File
Name of file to read. Takes precedence over C<Files>.
=head3 DeleteFiles
If true, the files are deleted after being read.
Actually the files are renamed with a C<deleteme> suffix before being read,
and then, after reading all the files, they're all deleted together.
The files are locked while being read which, combined with the rename, makes it
safe to 'consume' files that are still being generated by L<DBI::ProfileDumper>.
=head3 Filter
The C<Filter> parameter can be used to supply a code reference that can
manipulate the profile data as it is being read. This is most useful for
editing SQL statements so that slightly different statements in the raw data
will be merged and aggregated in the loaded data. For example:
Filter => sub {
my ($path_ref, $data_ref) = @_;
s/foo = '.*?'/foo = '...'/ for @$path_ref;
}
Here's an example that performs some normalization on the SQL. It converts all
numbers to C<N> and all quoted strings to C<S>. It can also convert digits to
N within names. Finally, it summarizes long "IN (...)" clauses.
It's aggressive and simplistic, but it's often sufficient, and serves as an
example that you can tailor to suit your own needs:
Filter => sub {
my ($path_ref, $data_ref) = @_;
local $_ = $path_ref->[0]; # whichever element contains the SQL Statement
s/\b\d+\b/N/g; # 42 -> N
s/\b0x[0-9A-Fa-f]+\b/N/g; # 0xFE -> N
s/'.*?'/'S'/g; # single quoted strings (doesn't handle escapes)
s/".*?"/"S"/g; # double quoted strings (doesn't handle escapes)
# convert names like log_20001231 into log_NNNNNNNN, controlled by $opt{n}
s/([a-z_]+)(\d{$opt{n},})/$1.('N' x length($2))/ieg if $opt{n};
# abbreviate massive "in (...)" statements and similar
s!(([NS],){100,})!sprintf("$2,{repeated %d times}",length($1)/2)!eg;
}
It's often better to perform this kinds of normalization in the DBI while the
data is being collected, to avoid too much memory being used by storing profile
data for many different SQL statement. See L<DBI::Profile>.
=cut
sub new {
my $pkg = shift;
my $self = {
Files => [ "dbi.prof" ],
Filter => undef,
DeleteFiles => 0,
LockFile => $HAS_FLOCK,
_header => {},
_nodes => [],
_node_lookup => {},
_sort => 'none',
@_
};
bless $self, $pkg;
# File (singular) overrides Files (plural)
$self->{Files} = [ $self->{File} ] if exists $self->{File};
$self->_read_files();
return $self;
}
# read files into _header and _nodes
sub _read_files {
my $self = shift;
my $files = $self->{Files};
my $read_header = 0;
my @files_to_delete;
my $fh = gensym;
foreach (@$files) {
my $filename = $_;
if ($self->{DeleteFiles}) {
my $newfilename = $filename . ".deleteme";
if ($^O eq 'VMS') {
# VMS default filesystem can only have one period
$newfilename = $filename . 'deleteme';
}
# will clobber an existing $newfilename
rename($filename, $newfilename)
or croak "Can't rename($filename, $newfilename): $!";
# On a versioned filesystem we want old versions to be removed
1 while (unlink $filename);
$filename = $newfilename;
}
open($fh, "<", $filename)
or croak("Unable to read profile file '$filename': $!");
# lock the file in case it's still being written to
# (we'll be forced to wait till the write is complete)
flock($fh, LOCK_SH) if $self->{LockFile};
if (-s $fh) { # not empty
$self->_read_header($fh, $filename, $read_header ? 0 : 1);
$read_header = 1;
$self->_read_body($fh, $filename);
}
close($fh); # and release lock
push @files_to_delete, $filename
if $self->{DeleteFiles};
}
for (@files_to_delete){
# for versioned file systems
1 while (unlink $_);
if(-e $_){
warn "Can't delete '$_': $!";
}
}
# discard node_lookup now that all files are read
delete $self->{_node_lookup};
}
# read the header from the given $fh named $filename. Discards the
# data unless $keep.
sub _read_header {
my ($self, $fh, $filename, $keep) = @_;
# get profiler module id
my $first = <$fh>;
chomp $first;
$self->{_profiler} = $first if $keep;
# collect variables from the header
local $_;
while (<$fh>) {
chomp;
last unless length $_;
/^(\S+)\s*=\s*(.*)/
or croak("Syntax error in header in $filename line $.: $_");
# XXX should compare new with existing (from previous file)
# and warn if they differ (different program or path)
$self->{_header}{$1} = unescape_key($2) if $keep;
}
}
sub unescape_key { # inverse of escape_key() in DBI::ProfileDumper
local $_ = shift;
s/(?<!\\)\\n/\n/g; # expand \n, unless it's a \\n
s/(?<!\\)\\r/\r/g; # expand \r, unless it's a \\r
s/\\\\/\\/g; # \\ to \
return $_;
}
# reads the body of the profile data
sub _read_body {
my ($self, $fh, $filename) = @_;
my $nodes = $self->{_nodes};
my $lookup = $self->{_node_lookup};
my $filter = $self->{Filter};
# build up node array
my @path = ("");
my (@data, $path_key);
local $_;
while (<$fh>) {
chomp;
if (/^\+\s+(\d+)\s?(.*)/) {
# it's a key
my ($key, $index) = ($2, $1 - 1);
$#path = $index; # truncate path to new length
$path[$index] = unescape_key($key); # place new key at end
}
elsif (s/^=\s+//) {
# it's data - file in the node array with the path in index 0
# (the optional minus is to make it more robust against systems
# with unstable high-res clocks - typically due to poor NTP config
# of kernel SMP behaviour, i.e. min time may be -0.000008))
@data = split / /, $_;
# corrupt data?
croak("Invalid number of fields in $filename line $.: $_")
unless @data == 7;
croak("Invalid leaf node characters $filename line $.: $_")
unless m/^[-+ 0-9eE\.]+$/;
# hook to enable pre-processing of the data - such as mangling SQL
# so that slightly different statements get treated as the same
# and so merged in the results
$filter->(\@path, \@data) if $filter;
# elements of @path can't have NULLs in them, so this
# forms a unique string per @path. If there's some way I
# can get this without arbitrarily stripping out a
# character I'd be happy to hear it!
$path_key = join("\0",@path);
# look for previous entry
if (exists $lookup->{$path_key}) {
# merge in the new data
dbi_profile_merge($nodes->[$lookup->{$path_key}], \@data);
} else {
# insert a new node - nodes are arrays with data in 0-6
# and path data after that
push(@$nodes, [ @data, @path ]);
# record node in %seen
$lookup->{$path_key} = $#$nodes;
}
}
else {
croak("Invalid line type syntax error in $filename line $.: $_");
}
}
}
=head2 $copy = $prof->clone();
Clone a profile data set creating a new object.
=cut
sub clone {
my $self = shift;
# start with a simple copy
my $clone = bless { %$self }, ref($self);
# deep copy nodes
$clone->{_nodes} = [ map { [ @$_ ] } @{$self->{_nodes}} ];
# deep copy header
$clone->{_header} = { %{$self->{_header}} };
return $clone;
}
=head2 $header = $prof->header();
Returns a reference to a hash of header values. These are the key
value pairs included in the header section of the L<DBI::ProfileDumper>
data format. For example:
$header = {
Path => [ '!Statement', '!MethodName' ],
Program => 't/42profile_data.t',
};
Note that modifying this hash will modify the header data stored
inside the profile object.
=cut
sub header { shift->{_header} }
=head2 $nodes = $prof->nodes()
Returns a reference the sorted nodes array. Each element in the array
is a single record in the data set. The first seven elements are the
same as the elements provided by L<DBI::Profile>. After that each key is
in a separate element. For example:
$nodes = [
[
2, # 0, count
0.0312958955764771, # 1, total duration
0.000490069389343262, # 2, first duration
0.000176072120666504, # 3, shortest duration
0.00140702724456787, # 4, longest duration
1023115819.83019, # 5, time of first event
1023115819.86576, # 6, time of last event
'SELECT foo FROM bar' # 7, key1
'execute' # 8, key2
# 6+N, keyN
],
# ...
];
Note that modifying this array will modify the node data stored inside
the profile object.
=cut
sub nodes { shift->{_nodes} }
=head2 $count = $prof->count()
Returns the number of items in the profile data set.
=cut
sub count { scalar @{shift->{_nodes}} }
=head2 $prof->sort(field => "field")
=head2 $prof->sort(field => "field", reverse => 1)
Sorts data by the given field. Available fields are:
longest
total
count
shortest
The default sort is greatest to smallest, which is the opposite of the
normal Perl meaning. This, however, matches the expected behavior of
the dbiprof frontend.
=cut
# sorts data by one of the available fields
{
my %FIELDS = (
longest => LONGEST,
total => TOTAL,
count => COUNT,
shortest => SHORTEST,
key1 => PATH+0,
key2 => PATH+1,
key3 => PATH+2,
);
sub sort {
my $self = shift;
my $nodes = $self->{_nodes};
my %opt = @_;
croak("Missing required field option.") unless $opt{field};
my $index = $FIELDS{$opt{field}};
croak("Unrecognized sort field '$opt{field}'.")
unless defined $index;
# sort over index
if ($opt{reverse}) {
@$nodes = sort {
$a->[$index] <=> $b->[$index]
} @$nodes;
} else {
@$nodes = sort {
$b->[$index] <=> $a->[$index]
} @$nodes;
}
# remember how we're sorted
$self->{_sort} = $opt{field};
return $self;
}
}
=head2 $count = $prof->exclude(key2 => "disconnect")
=head2 $count = $prof->exclude(key2 => "disconnect", case_sensitive => 1)
=head2 $count = $prof->exclude(key1 => qr/^SELECT/i)
Removes records from the data set that match the given string or
regular expression. This method modifies the data in a permanent
fashion - use clone() first to maintain the original data after
exclude(). Returns the number of nodes left in the profile data set.
=cut
sub exclude {
my $self = shift;
my $nodes = $self->{_nodes};
my %opt = @_;
# find key index number
my ($index, $val);
foreach (keys %opt) {
if (/^key(\d+)$/) {
$index = PATH + $1 - 1;
$val = $opt{$_};
last;
}
}
croak("Missing required keyN option.") unless $index;
if (UNIVERSAL::isa($val,"Regexp")) {
# regex match
@$nodes = grep {
$#$_ < $index or $_->[$index] !~ /$val/
} @$nodes;
} else {
if ($opt{case_sensitive}) {
@$nodes = grep {
$#$_ < $index or $_->[$index] ne $val;
} @$nodes;
} else {
$val = lc $val;
@$nodes = grep {
$#$_ < $index or lc($_->[$index]) ne $val;
} @$nodes;
}
}
return scalar @$nodes;
}
=head2 $count = $prof->match(key2 => "disconnect")
=head2 $count = $prof->match(key2 => "disconnect", case_sensitive => 1)
=head2 $count = $prof->match(key1 => qr/^SELECT/i)
Removes records from the data set that do not match the given string
or regular expression. This method modifies the data in a permanent
fashion - use clone() first to maintain the original data after
match(). Returns the number of nodes left in the profile data set.
=cut
sub match {
my $self = shift;
my $nodes = $self->{_nodes};
my %opt = @_;
# find key index number
my ($index, $val);
foreach (keys %opt) {
if (/^key(\d+)$/) {
$index = PATH + $1 - 1;
$val = $opt{$_};
last;
}
}
croak("Missing required keyN option.") unless $index;
if (UNIVERSAL::isa($val,"Regexp")) {
# regex match
@$nodes = grep {
$#$_ >= $index and $_->[$index] =~ /$val/
} @$nodes;
} else {
if ($opt{case_sensitive}) {
@$nodes = grep {
$#$_ >= $index and $_->[$index] eq $val;
} @$nodes;
} else {
$val = lc $val;
@$nodes = grep {
$#$_ >= $index and lc($_->[$index]) eq $val;
} @$nodes;
}
}
return scalar @$nodes;
}
=head2 $Data = $prof->Data()
Returns the same Data hash structure as seen in L<DBI::Profile>. This
structure is not sorted. The nodes() structure probably makes more
sense for most analysis.
=cut
sub Data {
my $self = shift;
my (%Data, @data, $ptr);
foreach my $node (@{$self->{_nodes}}) {
# traverse to key location
$ptr = \%Data;
foreach my $key (@{$node}[PATH .. $#$node - 1]) {
$ptr->{$key} = {} unless exists $ptr->{$key};
$ptr = $ptr->{$key};
}
# slice out node data
$ptr->{$node->[-1]} = [ @{$node}[0 .. 6] ];
}
return \%Data;
}
=head2 $text = $prof->format($nodes->[0])
Formats a single node into a human-readable block of text.
=cut
sub format {
my ($self, $node) = @_;
my $format;
# setup keys
my $keys = "";
for (my $i = PATH; $i <= $#$node; $i++) {
my $key = $node->[$i];
# remove leading and trailing space
$key =~ s/^\s+//;
$key =~ s/\s+$//;
# if key has newlines or is long take special precautions
if (length($key) > 72 or $key =~ /\n/) {
$keys .= " Key " . ($i - PATH + 1) . " :\n\n$key\n\n";
} else {
$keys .= " Key " . ($i - PATH + 1) . " : $key\n";
}
}
# nodes with multiple runs get the long entry format, nodes with
# just one run get a single count.
if ($node->[COUNT] > 1) {
$format = <<END;
Count : %d
Total Time : %3.6f seconds
Longest Time : %3.6f seconds
Shortest Time : %3.6f seconds
Average Time : %3.6f seconds
END
return sprintf($format, @{$node}[COUNT,TOTAL,LONGEST,SHORTEST],
$node->[TOTAL] / $node->[COUNT]) . $keys;
} else {
$format = <<END;
Count : %d
Time : %3.6f seconds
END
return sprintf($format, @{$node}[COUNT,TOTAL]) . $keys;
}
}
=head2 $text = $prof->report(number => 10)
Produces a report with the given number of items.
=cut
sub report {
my $self = shift;
my $nodes = $self->{_nodes};
my %opt = @_;
croak("Missing required number option") unless exists $opt{number};
$opt{number} = @$nodes if @$nodes < $opt{number};
my $report = $self->_report_header($opt{number});
for (0 .. $opt{number} - 1) {
$report .= sprintf("#" x 5 . "[ %d ]". "#" x 59 . "\n",
$_ + 1);
$report .= $self->format($nodes->[$_]);
$report .= "\n";
}
return $report;
}
# format the header for report()
sub _report_header {
my ($self, $number) = @_;
my $nodes = $self->{_nodes};
my $node_count = @$nodes;
# find total runtime and method count
my ($time, $count) = (0,0);
foreach my $node (@$nodes) {
$time += $node->[TOTAL];
$count += $node->[COUNT];
}
my $header = <<END;
DBI Profile Data ($self->{_profiler})
END
# output header fields
while (my ($key, $value) = each %{$self->{_header}}) {
$header .= sprintf(" %-13s : %s\n", $key, $value);
}
# output summary data fields
$header .= sprintf(<<END, $node_count, $number, $self->{_sort}, $count, $time);
Total Records : %d (showing %d, sorted by %s)
Total Count : %d
Total Runtime : %3.6f seconds
END
return $header;
}
1;
__END__
=head1 AUTHOR
Sam Tregar <sam@tregar.com>
=head1 COPYRIGHT AND LICENSE
Copyright (C) 2002 Sam Tregar
This program is free software; you can redistribute it and/or modify
it under the same terms as Perl 5 itself.
=cut

View File

@@ -0,0 +1,351 @@
package DBI::ProfileDumper;
use strict;
=head1 NAME
DBI::ProfileDumper - profile DBI usage and output data to a file
=head1 SYNOPSIS
To profile an existing program using DBI::ProfileDumper, set the
DBI_PROFILE environment variable and run your program as usual. For
example, using bash:
DBI_PROFILE=2/DBI::ProfileDumper program.pl
Then analyze the generated file (F<dbi.prof>) with L<dbiprof|dbiprof>:
dbiprof
You can also activate DBI::ProfileDumper from within your code:
use DBI;
# profile with default path (2) and output file (dbi.prof)
$dbh->{Profile} = "!Statement/DBI::ProfileDumper";
# same thing, spelled out
$dbh->{Profile} = "!Statement/DBI::ProfileDumper/File:dbi.prof";
# another way to say it
use DBI::ProfileDumper;
$dbh->{Profile} = DBI::ProfileDumper->new(
Path => [ '!Statement' ],
File => 'dbi.prof' );
# using a custom path
$dbh->{Profile} = DBI::ProfileDumper->new(
Path => [ "foo", "bar" ],
File => 'dbi.prof',
);
=head1 DESCRIPTION
DBI::ProfileDumper is a subclass of L<DBI::Profile|DBI::Profile> which
dumps profile data to disk instead of printing a summary to your
screen. You can then use L<dbiprof|dbiprof> to analyze the data in
a number of interesting ways, or you can roll your own analysis using
L<DBI::ProfileData|DBI::ProfileData>.
B<NOTE:> For Apache/mod_perl applications, use
L<DBI::ProfileDumper::Apache|DBI::ProfileDumper::Apache>.
=head1 USAGE
One way to use this module is just to enable it in your C<$dbh>:
$dbh->{Profile} = "1/DBI::ProfileDumper";
This will write out profile data by statement into a file called
F<dbi.prof>. If you want to modify either of these properties, you
can construct the DBI::ProfileDumper object yourself:
use DBI::ProfileDumper;
$dbh->{Profile} = DBI::ProfileDumper->new(
Path => [ '!Statement' ],
File => 'dbi.prof'
);
The C<Path> option takes the same values as in
L<DBI::Profile>. The C<File> option gives the name of the
file where results will be collected. If it already exists it will be
overwritten.
You can also activate this module by setting the DBI_PROFILE
environment variable:
$ENV{DBI_PROFILE} = "!Statement/DBI::ProfileDumper";
This will cause all DBI handles to share the same profiling object.
=head1 METHODS
The following methods are available to be called using the profile
object. You can get access to the profile object from the Profile key
in any DBI handle:
my $profile = $dbh->{Profile};
=head2 flush_to_disk
$profile->flush_to_disk()
Flushes all collected profile data to disk and empties the Data hash. Returns
the filename written to. If no profile data has been collected then the file is
not written and flush_to_disk() returns undef.
The file is locked while it's being written. A process 'consuming' the files
while they're being written to, should rename the file first, then lock it,
then read it, then close and delete it. The C<DeleteFiles> option to
L<DBI::ProfileData> does the right thing.
This method may be called multiple times during a program run.
=head2 empty
$profile->empty()
Clears the Data hash without writing to disk.
=head2 filename
$filename = $profile->filename();
Get or set the filename.
The filename can be specified as a CODE reference, in which case the referenced
code should return the filename to be used. The code will be called with the
profile object as its first argument.
=head1 DATA FORMAT
The data format written by DBI::ProfileDumper starts with a header
containing the version number of the module used to generate it. Then
a block of variable declarations describes the profile. After two
newlines, the profile data forms the body of the file. For example:
DBI::ProfileDumper 2.003762
Path = [ '!Statement', '!MethodName' ]
Program = t/42profile_data.t
+ 1 SELECT name FROM users WHERE id = ?
+ 2 prepare
= 1 0.0312958955764771 0.000490069389343262 0.000176072120666504 0.00140702724456787 1023115819.83019 1023115819.86576
+ 2 execute
1 0.0312958955764771 0.000490069389343262 0.000176072120666504 0.00140702724456787 1023115819.83019 1023115819.86576
+ 2 fetchrow_hashref
= 1 0.0312958955764771 0.000490069389343262 0.000176072120666504 0.00140702724456787 1023115819.83019 1023115819.86576
+ 1 UPDATE users SET name = ? WHERE id = ?
+ 2 prepare
= 1 0.0312958955764771 0.000490069389343262 0.000176072120666504 0.00140702724456787 1023115819.83019 1023115819.86576
+ 2 execute
= 1 0.0312958955764771 0.000490069389343262 0.000176072120666504 0.00140702724456787 1023115819.83019 1023115819.86576
The lines beginning with C<+> signs signify keys. The number after
the C<+> sign shows the nesting level of the key. Lines beginning
with C<=> are the actual profile data, in the same order as
in DBI::Profile.
Note that the same path may be present multiple times in the data file
since C<format()> may be called more than once. When read by
DBI::ProfileData the data points will be merged to produce a single
data set for each distinct path.
The key strings are transformed in three ways. First, all backslashes
are doubled. Then all newlines and carriage-returns are transformed
into C<\n> and C<\r> respectively. Finally, any NULL bytes (C<\0>)
are entirely removed. When DBI::ProfileData reads the file the first
two transformations will be reversed, but NULL bytes will not be
restored.
=head1 AUTHOR
Sam Tregar <sam@tregar.com>
=head1 COPYRIGHT AND LICENSE
Copyright (C) 2002 Sam Tregar
This program is free software; you can redistribute it and/or modify
it under the same terms as Perl 5 itself.
=cut
# inherit from DBI::Profile
use DBI::Profile;
our @ISA = ("DBI::Profile");
our $VERSION = "2.015325";
use Carp qw(croak);
use Fcntl qw(:flock);
use Symbol;
my $HAS_FLOCK = (defined $ENV{DBI_PROFILE_FLOCK})
? $ENV{DBI_PROFILE_FLOCK}
: do { local $@; eval { flock STDOUT, 0; 1 } };
my $program_header;
# validate params and setup default
sub new {
my $pkg = shift;
my $self = $pkg->SUPER::new(
LockFile => $HAS_FLOCK,
@_,
);
# provide a default filename
$self->filename("dbi.prof") unless $self->filename;
DBI->trace_msg("$self: @{[ %$self ]}\n",0)
if $self->{Trace} && $self->{Trace} >= 2;
return $self;
}
# get/set filename to use
sub filename {
my $self = shift;
$self->{File} = shift if @_;
my $filename = $self->{File};
$filename = $filename->($self) if ref($filename) eq 'CODE';
return $filename;
}
# flush available data to disk
sub flush_to_disk {
my $self = shift;
my $class = ref $self;
my $filename = $self->filename;
my $data = $self->{Data};
if (1) { # make an option
if (not $data or ref $data eq 'HASH' && !%$data) {
DBI->trace_msg("flush_to_disk skipped for empty profile\n",0) if $self->{Trace};
return undef;
}
}
my $fh = gensym;
if (($self->{_wrote_header}||'') eq $filename) {
# append more data to the file
# XXX assumes that Path hasn't changed
open($fh, ">>", $filename)
or croak("Unable to open '$filename' for $class output: $!");
} else {
# create new file (or overwrite existing)
if (-f $filename) {
my $bak = $filename.'.prev';
unlink($bak);
rename($filename, $bak)
or warn "Error renaming $filename to $bak: $!\n";
}
open($fh, ">", $filename)
or croak("Unable to open '$filename' for $class output: $!");
}
# lock the file (before checking size and writing the header)
flock($fh, LOCK_EX) if $self->{LockFile};
# write header if file is empty - typically because we just opened it
# in '>' mode, or perhaps we used '>>' but the file had been truncated externally.
if (-s $fh == 0) {
DBI->trace_msg("flush_to_disk wrote header to $filename\n",0) if $self->{Trace};
$self->write_header($fh);
$self->{_wrote_header} = $filename;
}
my $lines = $self->write_data($fh, $self->{Data}, 1);
DBI->trace_msg("flush_to_disk wrote $lines lines to $filename\n",0) if $self->{Trace};
close($fh) # unlocks the file
or croak("Error closing '$filename': $!");
$self->empty();
return $filename;
}
# write header to a filehandle
sub write_header {
my ($self, $fh) = @_;
# isolate us against globals which effect print
local($\, $,);
# $self->VERSION can return undef during global destruction
my $version = $self->VERSION || $VERSION;
# module name and version number
print $fh ref($self)." $version\n";
# print out Path (may contain CODE refs etc)
my @path_words = map { escape_key($_) } @{ $self->{Path} || [] };
print $fh "Path = [ ", join(', ', @path_words), " ]\n";
# print out $0 and @ARGV
if (!$program_header) {
# XXX should really quote as well as escape
$program_header = "Program = "
. join(" ", map { escape_key($_) } $0, @ARGV)
. "\n";
}
print $fh $program_header;
# all done
print $fh "\n";
}
# write data in the proscribed format
sub write_data {
my ($self, $fh, $data, $level) = @_;
# XXX it's valid for $data to be an ARRAY ref, i.e., Path is empty.
# produce an empty profile for invalid $data
return 0 unless $data and UNIVERSAL::isa($data,'HASH');
# isolate us against globals which affect print
local ($\, $,);
my $lines = 0;
while (my ($key, $value) = each(%$data)) {
# output a key
print $fh "+ $level ". escape_key($key). "\n";
if (UNIVERSAL::isa($value,'ARRAY')) {
# output a data set for a leaf node
print $fh "= ".join(' ', @$value)."\n";
$lines += 1;
} else {
# recurse through keys - this could be rewritten to use a
# stack for some small performance gain
$lines += $self->write_data($fh, $value, $level + 1);
}
}
return $lines;
}
# escape a key for output
sub escape_key {
my $key = shift;
$key =~ s!\\!\\\\!g;
$key =~ s!\n!\\n!g;
$key =~ s!\r!\\r!g;
$key =~ s!\0!!g;
return $key;
}
# flush data to disk when profile object goes out of scope
sub on_destroy {
shift->flush_to_disk();
}
1;

View File

@@ -0,0 +1,219 @@
package DBI::ProfileDumper::Apache;
use strict;
=head1 NAME
DBI::ProfileDumper::Apache - capture DBI profiling data from Apache/mod_perl
=head1 SYNOPSIS
Add this line to your F<httpd.conf>:
PerlSetEnv DBI_PROFILE 2/DBI::ProfileDumper::Apache
(If you're using mod_perl2, see L</When using mod_perl2> for some additional notes.)
Then restart your server. Access the code you wish to test using a
web browser, then shutdown your server. This will create a set of
F<dbi.prof.*> files in your Apache log directory.
Get a profiling report with L<dbiprof|dbiprof>:
dbiprof /path/to/your/apache/logs/dbi.prof.*
When you're ready to perform another profiling run, delete the old files and start again.
=head1 DESCRIPTION
This module interfaces DBI::ProfileDumper to Apache/mod_perl. Using
this module you can collect profiling data from mod_perl applications.
It works by creating a DBI::ProfileDumper data file for each Apache
process. These files are created in your Apache log directory. You
can then use the dbiprof utility to analyze the profile files.
=head1 USAGE
=head2 LOADING THE MODULE
The easiest way to use this module is just to set the DBI_PROFILE
environment variable in your F<httpd.conf>:
PerlSetEnv DBI_PROFILE 2/DBI::ProfileDumper::Apache
The DBI will look after loading and using the module when the first DBI handle
is created.
It's also possible to use this module by setting the Profile attribute
of any DBI handle:
$dbh->{Profile} = "2/DBI::ProfileDumper::Apache";
See L<DBI::ProfileDumper> for more possibilities, and L<DBI::Profile> for full
details of the DBI's profiling mechanism.
=head2 WRITING PROFILE DATA
The profile data files will be written to your Apache log directory by default.
The user that the httpd processes run as will need write access to the
directory. So, for example, if you're running the child httpds as user 'nobody'
and using chronolog to write to the logs directory, then you'll need to change
the default.
You can change the destination directory either by specifying a C<Dir> value
when creating the profile (like C<File> in the L<DBI::ProfileDumper> docs),
or you can use the C<DBI_PROFILE_APACHE_LOG_DIR> env var to change that. For example:
PerlSetEnv DBI_PROFILE_APACHE_LOG_DIR /server_root/logs
=head3 When using mod_perl2
Under mod_perl2 you'll need to either set the C<DBI_PROFILE_APACHE_LOG_DIR> env var,
or enable the mod_perl2 C<GlobalRequest> option, like this:
PerlOptions +GlobalRequest
to the global config section you're about test with DBI::ProfileDumper::Apache.
If you don't do one of those then you'll see messages in your error_log similar to:
DBI::ProfileDumper::Apache on_destroy failed: Global $r object is not available. Set:
PerlOptions +GlobalRequest in httpd.conf at ..../DBI/ProfileDumper/Apache.pm line 144
=head3 Naming the files
The default file name is inherited from L<DBI::ProfileDumper> via the
filename() method, but DBI::ProfileDumper::Apache appends the parent pid and
the current pid, separated by dots, to that name.
=head3 Silencing the log
By default a message is written to STDERR (i.e., the apache error_log file)
when flush_to_disk() is called (either explicitly, or implicitly via DESTROY).
That's usually very useful. If you don't want the log message you can silence
it by setting the C<Quiet> attribute true.
PerlSetEnv DBI_PROFILE 2/DBI::ProfileDumper::Apache/Quiet:1
$dbh->{Profile} = "!Statement/DBI::ProfileDumper/Quiet:1";
$dbh->{Profile} = DBI::ProfileDumper->new(
Path => [ '!Statement' ]
Quiet => 1
);
=head2 GATHERING PROFILE DATA
Once you have the module loaded, use your application as you normally
would. Stop the webserver when your tests are complete. Profile data
files will be produced when Apache exits and you'll see something like
this in your error_log:
DBI::ProfileDumper::Apache writing to /usr/local/apache/logs/dbi.prof.2604.2619
Now you can use dbiprof to examine the data:
dbiprof /usr/local/apache/logs/dbi.prof.2604.*
By passing dbiprof a list of all generated files, dbiprof will
automatically merge them into one result set. You can also pass
dbiprof sorting and querying options, see L<dbiprof> for details.
=head2 CLEANING UP
Once you've made some code changes, you're ready to start again.
First, delete the old profile data files:
rm /usr/local/apache/logs/dbi.prof.*
Then restart your server and get back to work.
=head1 OTHER ISSUES
=head2 Memory usage
DBI::Profile can use a lot of memory for very active applications because it
collects profiling data in memory for each distinct query run.
Calling C<flush_to_disk()> will write the current data to disk and free the
memory it's using. For example:
$dbh->{Profile}->flush_to_disk() if $dbh->{Profile};
or, rather than flush every time, you could flush less often:
$dbh->{Profile}->flush_to_disk()
if $dbh->{Profile} and ++$i % 100;
=head1 AUTHOR
Sam Tregar <sam@tregar.com>
=head1 COPYRIGHT AND LICENSE
Copyright (C) 2002 Sam Tregar
This program is free software; you can redistribute it and/or modify
it under the same terms as Perl 5 itself.
=cut
our $VERSION = "2.014121";
our @ISA = qw(DBI::ProfileDumper);
use DBI::ProfileDumper;
use File::Spec;
my $initial_pid = $$;
use constant MP2 => ($ENV{MOD_PERL_API_VERSION} and $ENV{MOD_PERL_API_VERSION} == 2) ? 1 : 0;
my $server_root_dir;
if (MP2) {
require Apache2::ServerUtil;
$server_root_dir = Apache2::ServerUtil::server_root();
}
else {
require Apache;
$server_root_dir = eval { Apache->server_root_relative('') } || "/tmp";
}
sub _dirname {
my $self = shift;
return $self->{Dir} ||= $ENV{DBI_PROFILE_APACHE_LOG_DIR}
|| File::Spec->catdir($server_root_dir, "logs");
}
sub filename {
my $self = shift;
my $filename = $self->SUPER::filename(@_);
return $filename if not $filename; # not set yet
# to be able to identify groups of profile files from the same set of
# apache processes, we include the parent pid in the file name
# as well as the pid.
my $group_pid = ($$ eq $initial_pid) ? $$ : getppid();
$filename .= ".$group_pid.$$";
return $filename if File::Spec->file_name_is_absolute($filename);
return File::Spec->catfile($self->_dirname, $filename);
}
sub flush_to_disk {
my $self = shift;
my $filename = $self->SUPER::flush_to_disk(@_);
print STDERR ref($self)." pid$$ written to $filename\n"
if $filename && not $self->{Quiet};
return $filename;
}
1;

View File

@@ -0,0 +1,50 @@
package DBI::ProfileSubs;
our $VERSION = "0.009396";
=head1 NAME
DBI::ProfileSubs - Subroutines for dynamic profile Path
=head1 SYNOPSIS
DBI_PROFILE='&norm_std_n3' prog.pl
This is new and still experimental.
=head1 TO DO
Define come kind of naming convention for the subs.
=cut
use strict;
use warnings;
# would be good to refactor these regex into separate subs and find some
# way to compose them in various combinations into multiple subs.
# Perhaps via AUTOLOAD where \&auto_X_Y_Z creates a sub that does X, Y, and Z.
# The final subs always need to be very fast.
#
sub norm_std_n3 {
# my ($h, $method_name) = @_;
local $_ = $_;
s/\b\d+\b/<N>/g; # 42 -> <N>
s/\b0x[0-9A-Fa-f]+\b/<N>/g; # 0xFE -> <N>
s/'.*?'/'<S>'/g; # single quoted strings (doesn't handle escapes)
s/".*?"/"<S>"/g; # double quoted strings (doesn't handle escapes)
# convert names like log20001231 into log<N>
s/([a-z_]+)(\d{3,})\b/${1}<N>/ig;
# abbreviate massive "in (...)" statements and similar
s!((\s*<[NS]>\s*,\s*){100,})!sprintf("$2,<repeated %d times>",length($1)/2)!eg;
return $_;
}
1;

View File

@@ -0,0 +1,897 @@
# $Header: /home/timbo/dbi/lib/DBI/RCS/ProxyServer.pm,v 11.9 2003/05/14 11:08:17 timbo Exp $
# -*- perl -*-
#
# DBI::ProxyServer - a proxy server for DBI drivers
#
# Copyright (c) 1997 Jochen Wiedmann
#
# The DBD::Proxy module is free software; you can redistribute it and/or
# modify it under the same terms as Perl itself. In particular permission
# is granted to Tim Bunce for distributing this as a part of the DBI.
#
#
# Author: Jochen Wiedmann
# Am Eisteich 9
# 72555 Metzingen
# Germany
#
# Email: joe@ispsoft.de
# Phone: +49 7123 14881
#
#
##############################################################################
require 5.004;
use strict;
use RPC::PlServer 0.2001;
require DBI;
require Config;
package DBI::ProxyServer;
############################################################################
#
# Constants
#
############################################################################
use vars qw($VERSION @ISA);
$VERSION = "0.3005";
@ISA = qw(RPC::PlServer DBI);
# Most of the options below are set to default values, we note them here
# just for the sake of documentation.
my %DEFAULT_SERVER_OPTIONS;
{
my $o = \%DEFAULT_SERVER_OPTIONS;
$o->{'chroot'} = undef, # To be used in the initfile,
# after loading the required
# DBI drivers.
$o->{'clients'} =
[ { 'mask' => '.*',
'accept' => 1,
'cipher' => undef
}
];
$o->{'configfile'} = '/etc/dbiproxy.conf' if -f '/etc/dbiproxy.conf';
$o->{'debug'} = 0;
$o->{'facility'} = 'daemon';
$o->{'group'} = undef;
$o->{'localaddr'} = undef; # Bind to any local IP number
$o->{'localport'} = undef; # Must set port number on the
# command line.
$o->{'logfile'} = undef; # Use syslog or EventLog.
# XXX don't restrict methods that can be called (trust users once connected)
$o->{'XXX_methods'} = {
'DBI::ProxyServer' => {
'Version' => 1,
'NewHandle' => 1,
'CallMethod' => 1,
'DestroyHandle' => 1
},
'DBI::ProxyServer::db' => {
'prepare' => 1,
'commit' => 1,
'rollback' => 1,
'STORE' => 1,
'FETCH' => 1,
'func' => 1,
'quote' => 1,
'type_info_all' => 1,
'table_info' => 1,
'disconnect' => 1,
},
'DBI::ProxyServer::st' => {
'execute' => 1,
'STORE' => 1,
'FETCH' => 1,
'func' => 1,
'fetch' => 1,
'finish' => 1
}
};
if ($Config::Config{'usethreads'} eq 'define') {
$o->{'mode'} = 'threads';
} elsif ($Config::Config{'d_fork'} eq 'define') {
$o->{'mode'} = 'fork';
} else {
$o->{'mode'} = 'single';
}
# No pidfile by default, configuration must provide one if needed
$o->{'pidfile'} = 'none';
$o->{'user'} = undef;
};
############################################################################
#
# Name: Version
#
# Purpose: Return version string
#
# Inputs: $class - This class
#
# Result: Version string; suitable for printing by "--version"
#
############################################################################
sub Version {
my $version = $DBI::ProxyServer::VERSION;
"DBI::ProxyServer $version, Copyright (C) 1998, Jochen Wiedmann";
}
############################################################################
#
# Name: AcceptApplication
#
# Purpose: Verify DBI DSN
#
# Inputs: $self - This instance
# $dsn - DBI dsn
#
# Returns: TRUE for a valid DSN, FALSE otherwise
#
############################################################################
sub AcceptApplication {
my $self = shift; my $dsn = shift;
$dsn =~ /^dbi:\w+:/i;
}
############################################################################
#
# Name: AcceptVersion
#
# Purpose: Verify requested DBI version
#
# Inputs: $self - Instance
# $version - DBI version being requested
#
# Returns: TRUE for ok, FALSE otherwise
#
############################################################################
sub AcceptVersion {
my $self = shift; my $version = shift;
require DBI;
DBI::ProxyServer->init_rootclass();
$DBI::VERSION >= $version;
}
############################################################################
#
# Name: AcceptUser
#
# Purpose: Verify user and password by connecting to the client and
# creating a database connection
#
# Inputs: $self - Instance
# $user - User name
# $password - Password
#
############################################################################
sub AcceptUser {
my $self = shift; my $user = shift; my $password = shift;
return 0 if (!$self->SUPER::AcceptUser($user, $password));
my $dsn = $self->{'application'};
$self->Debug("Connecting to $dsn as $user");
local $ENV{DBI_AUTOPROXY} = ''; # :-)
$self->{'dbh'} = eval {
DBI::ProxyServer->connect($dsn, $user, $password,
{ 'PrintError' => 0,
'Warn' => 0,
'RaiseError' => 1,
'HandleError' => sub {
my $err = $_[1]->err;
my $state = $_[1]->state || '';
$_[0] .= " [err=$err,state=$state]";
return 0;
} })
};
if ($@) {
$self->Error("Error while connecting to $dsn as $user: $@");
return 0;
}
[1, $self->StoreHandle($self->{'dbh'}) ];
}
sub CallMethod {
my $server = shift;
my $dbh = $server->{'dbh'};
# We could store the private_server attribute permanently in
# $dbh. However, we'd have a reference loop in that case and
# I would be concerned about garbage collection. :-(
$dbh->{'private_server'} = $server;
$server->Debug("CallMethod: => " . do { local $^W; join(",", @_)});
my @result = eval { $server->SUPER::CallMethod(@_) };
my $msg = $@;
undef $dbh->{'private_server'};
if ($msg) {
$server->Debug("CallMethod died with: $@");
die $msg;
} else {
$server->Debug("CallMethod: <= " . do { local $^W; join(",", @result) });
}
@result;
}
sub main {
my $server = DBI::ProxyServer->new(\%DEFAULT_SERVER_OPTIONS, \@_);
$server->Bind();
}
############################################################################
#
# The DBI part of the proxyserver is implemented as a DBI subclass.
# Thus we can reuse some of the DBI methods and overwrite only
# those that need additional handling.
#
############################################################################
package DBI::ProxyServer::dr;
@DBI::ProxyServer::dr::ISA = qw(DBI::dr);
package DBI::ProxyServer::db;
@DBI::ProxyServer::db::ISA = qw(DBI::db);
sub prepare {
my($dbh, $statement, $attr, $params, $proto_ver) = @_;
my $server = $dbh->{'private_server'};
if (my $client = $server->{'client'}) {
if ($client->{'sql'}) {
if ($statement =~ /^\s*(\S+)/) {
my $st = $1;
if (!($statement = $client->{'sql'}->{$st})) {
die "Unknown SQL query: $st";
}
} else {
die "Cannot parse restricted SQL statement: $statement";
}
}
}
my $sth = $dbh->SUPER::prepare($statement, $attr);
my $handle = $server->StoreHandle($sth);
if ( $proto_ver and $proto_ver > 1 ) {
$sth->{private_proxyserver_described} = 0;
return $handle;
} else {
# The difference between the usual prepare and ours is that we implement
# a combined prepare/execute. The DBD::Proxy driver doesn't call us for
# prepare. Only if an execute happens, then we are called with method
# "prepare". Further execute's are called as "execute".
my @result = $sth->execute($params);
my ($NAME, $TYPE);
my $NUM_OF_FIELDS = $sth->{NUM_OF_FIELDS};
if ($NUM_OF_FIELDS) { # is a SELECT
$NAME = $sth->{NAME};
$TYPE = $sth->{TYPE};
}
($handle, $NUM_OF_FIELDS, $sth->{'NUM_OF_PARAMS'},
$NAME, $TYPE, @result);
}
}
sub table_info {
my $dbh = shift;
my $sth = $dbh->SUPER::table_info();
my $numFields = $sth->{'NUM_OF_FIELDS'};
my $names = $sth->{'NAME'};
my $types = $sth->{'TYPE'};
# We wouldn't need to send all the rows at this point, instead we could
# make use of $rsth->fetch() on the client as usual.
# The problem is that some drivers (namely DBD::ExampleP, DBD::mysql and
# DBD::mSQL) are returning foreign sth's here, thus an instance of
# DBI::st and not DBI::ProxyServer::st. We could fix this by permitting
# the client to execute method DBI::st, but I don't like this.
my @rows;
while (my ($row) = $sth->fetch()) {
last unless defined $row;
push(@rows, [@$row]);
}
($numFields, $names, $types, @rows);
}
package DBI::ProxyServer::st;
@DBI::ProxyServer::st::ISA = qw(DBI::st);
sub execute {
my $sth = shift; my $params = shift; my $proto_ver = shift;
my @outParams;
if ($params) {
for (my $i = 0; $i < @$params;) {
my $param = $params->[$i++];
if (!ref($param)) {
$sth->bind_param($i, $param);
}
else {
if (!ref(@$param[0])) {#It's not a reference
$sth->bind_param($i, @$param);
}
else {
$sth->bind_param_inout($i, @$param);
my $ref = shift @$param;
push(@outParams, $ref);
}
}
}
}
my $rows = $sth->SUPER::execute();
if ( $proto_ver and $proto_ver > 1 and not $sth->{private_proxyserver_described} ) {
my ($NAME, $TYPE);
my $NUM_OF_FIELDS = $sth->{NUM_OF_FIELDS};
if ($NUM_OF_FIELDS) { # is a SELECT
$NAME = $sth->{NAME};
$TYPE = $sth->{TYPE};
}
$sth->{private_proxyserver_described} = 1;
# First execution, we ship back description.
return ($rows, $NUM_OF_FIELDS, $sth->{'NUM_OF_PARAMS'}, $NAME, $TYPE, @outParams);
}
($rows, @outParams);
}
sub fetch {
my $sth = shift; my $numRows = shift || 1;
my($ref, @rows);
while ($numRows-- && ($ref = $sth->SUPER::fetch())) {
push(@rows, [@$ref]);
}
@rows;
}
1;
__END__
=head1 NAME
DBI::ProxyServer - a server for the DBD::Proxy driver
=head1 SYNOPSIS
use DBI::ProxyServer;
DBI::ProxyServer::main(@ARGV);
=head1 DESCRIPTION
DBI::Proxy Server is a module for implementing a proxy for the DBI proxy
driver, DBD::Proxy. It allows access to databases over the network if the
DBMS does not offer networked operations. But the proxy server might be
useful for you, even if you have a DBMS with integrated network
functionality: It can be used as a DBI proxy in a firewalled environment.
DBI::ProxyServer runs as a daemon on the machine with the DBMS or on the
firewall. The client connects to the agent using the DBI driver DBD::Proxy,
thus in the exactly same way than using DBD::mysql, DBD::mSQL or any other
DBI driver.
The agent is implemented as a RPC::PlServer application. Thus you have
access to all the possibilities of this module, in particular encryption
and a similar configuration file. DBI::ProxyServer adds the possibility of
query restrictions: You can define a set of queries that a client may
execute and restrict access to those. (Requires a DBI driver that supports
parameter binding.) See L</CONFIGURATION FILE>.
The provided driver script, L<dbiproxy>, may either be used as it is or
used as the basis for a local version modified to meet your needs.
=head1 OPTIONS
When calling the DBI::ProxyServer::main() function, you supply an
array of options. These options are parsed by the Getopt::Long module.
The ProxyServer inherits all of RPC::PlServer's and hence Net::Daemon's
options and option handling, in particular the ability to read
options from either the command line or a config file. See
L<RPC::PlServer>. See L<Net::Daemon>. Available options include
=over 4
=item I<chroot> (B<--chroot=dir>)
(UNIX only) After doing a bind(), change root directory to the given
directory by doing a chroot(). This is useful for security, but it
restricts the environment a lot. For example, you need to load DBI
drivers in the config file or you have to create hard links to Unix
sockets, if your drivers are using them. For example, with MySQL, a
config file might contain the following lines:
my $rootdir = '/var/dbiproxy';
my $unixsockdir = '/tmp';
my $unixsockfile = 'mysql.sock';
foreach $dir ($rootdir, "$rootdir$unixsockdir") {
mkdir 0755, $dir;
}
link("$unixsockdir/$unixsockfile",
"$rootdir$unixsockdir/$unixsockfile");
require DBD::mysql;
{
'chroot' => $rootdir,
...
}
If you don't know chroot(), think of an FTP server where you can see a
certain directory tree only after logging in. See also the --group and
--user options.
=item I<clients>
An array ref with a list of clients. Clients are hash refs, the attributes
I<accept> (0 for denying access and 1 for permitting) and I<mask>, a Perl
regular expression for the clients IP number or its host name.
=item I<configfile> (B<--configfile=file>)
Config files are assumed to return a single hash ref that overrides the
arguments of the new method. However, command line arguments in turn take
precedence over the config file. See the L<"CONFIGURATION FILE"> section
below for details on the config file.
=item I<debug> (B<--debug>)
Turn debugging mode on. Mainly this asserts that logging messages of
level "debug" are created.
=item I<facility> (B<--facility=mode>)
(UNIX only) Facility to use for L<Sys::Syslog>. The default is
B<daemon>.
=item I<group> (B<--group=gid>)
After doing a bind(), change the real and effective GID to the given.
This is useful, if you want your server to bind to a privileged port
(<1024), but don't want the server to execute as root. See also
the --user option.
GID's can be passed as group names or numeric values.
=item I<localaddr> (B<--localaddr=ip>)
By default a daemon is listening to any IP number that a machine
has. This attribute allows one to restrict the server to the given
IP number.
=item I<localport> (B<--localport=port>)
This attribute sets the port on which the daemon is listening. It
must be given somehow, as there's no default.
=item I<logfile> (B<--logfile=file>)
Be default logging messages will be written to the syslog (Unix) or
to the event log (Windows NT). On other operating systems you need to
specify a log file. The special value "STDERR" forces logging to
stderr. See L<Net::Daemon::Log> for details.
=item I<mode> (B<--mode=modename>)
The server can run in three different modes, depending on the environment.
If you are running Perl 5.005 and did compile it for threads, then the
server will create a new thread for each connection. The thread will
execute the server's Run() method and then terminate. This mode is the
default, you can force it with "--mode=threads".
If threads are not available, but you have a working fork(), then the
server will behave similar by creating a new process for each connection.
This mode will be used automatically in the absence of threads or if
you use the "--mode=fork" option.
Finally there's a single-connection mode: If the server has accepted a
connection, he will enter the Run() method. No other connections are
accepted until the Run() method returns (if the client disconnects).
This operation mode is useful if you have neither threads nor fork(),
for example on the Macintosh. For debugging purposes you can force this
mode with "--mode=single".
=item I<pidfile> (B<--pidfile=file>)
(UNIX only) If this option is present, a PID file will be created at the
given location. Default is to not create a pidfile.
=item I<user> (B<--user=uid>)
After doing a bind(), change the real and effective UID to the given.
This is useful, if you want your server to bind to a privileged port
(<1024), but don't want the server to execute as root. See also
the --group and the --chroot options.
UID's can be passed as group names or numeric values.
=item I<version> (B<--version>)
Suppresses startup of the server; instead the version string will
be printed and the program exits immediately.
=back
=head1 SHUTDOWN
DBI::ProxyServer is built on L<RPC::PlServer> which is, in turn, built on L<Net::Daemon>.
You should refer to L<Net::Daemon> for how to shutdown the server, except that
you can't because it's not currently documented there (as of v0.43).
The bottom-line is that it seems that there's no support for graceful shutdown.
=head1 CONFIGURATION FILE
The configuration file is just that of I<RPC::PlServer> or I<Net::Daemon>
with some additional attributes in the client list.
The config file is a Perl script. At the top of the file you may include
arbitrary Perl source, for example load drivers at the start (useful
to enhance performance), prepare a chroot environment and so on.
The important thing is that you finally return a hash ref of option
name/value pairs. The possible options are listed above.
All possibilities of Net::Daemon and RPC::PlServer apply, in particular
=over 4
=item Host and/or User dependent access control
=item Host and/or User dependent encryption
=item Changing UID and/or GID after binding to the port
=item Running in a chroot() environment
=back
Additionally the server offers you query restrictions. Suggest the
following client list:
'clients' => [
{ 'mask' => '^admin\.company\.com$',
'accept' => 1,
'users' => [ 'root', 'wwwrun' ],
},
{
'mask' => '^admin\.company\.com$',
'accept' => 1,
'users' => [ 'root', 'wwwrun' ],
'sql' => {
'select' => 'SELECT * FROM foo',
'insert' => 'INSERT INTO foo VALUES (?, ?, ?)'
}
}
then only the users root and wwwrun may connect from admin.company.com,
executing arbitrary queries, but only wwwrun may connect from other
hosts and is restricted to
$sth->prepare("select");
or
$sth->prepare("insert");
which in fact are "SELECT * FROM foo" or "INSERT INTO foo VALUES (?, ?, ?)".
=head1 Proxyserver Configuration file (bigger example)
This section tells you how to restrict a DBI-Proxy: Not every user from
every workstation shall be able to execute every query.
There is a perl program "dbiproxy" which runs on a machine which is able
to connect to all the databases we wish to reach. All Perl-DBD-drivers must
be installed on this machine. You can also reach databases for which drivers
are not available on the machine where you run the program querying the
database, e.g. ask MS-Access-database from Linux.
Create a configuration file "proxy_oracle.cfg" at the dbproxy-server:
{
# This shall run in a shell or a DOS-window
# facility => 'daemon',
pidfile => 'your_dbiproxy.pid',
logfile => 1,
debug => 0,
mode => 'single',
localport => '12400',
# Access control, the first match in this list wins!
# So the order is important
clients => [
# hint to organize:
# the most specialized rules for single machines/users are 1st
# then the denying rules
# then the rules about whole networks
# rule: internal_webserver
# desc: to get statistical information
{
# this IP-address only is meant
mask => '^10\.95\.81\.243$',
# accept (not defer) connections like this
accept => 1,
# only users from this list
# are allowed to log on
users => [ 'informationdesk' ],
# only this statistical query is allowed
# to get results for a web-query
sql => {
alive => 'select count(*) from dual',
statistic_area => 'select count(*) from e01admin.e01e203 where geb_bezei like ?',
}
},
# rule: internal_bad_guy_1
{
mask => '^10\.95\.81\.1$',
accept => 0,
},
# rule: employee_workplace
# desc: get detailed information
{
# any IP-address is meant here
mask => '^10\.95\.81\.(\d+)$',
# accept (not defer) connections like this
accept => 1,
# only users from this list
# are allowed to log on
users => [ 'informationdesk', 'lippmann' ],
# all these queries are allowed:
sql => {
search_city => 'select ort_nr, plz, ort from e01admin.e01e200 where plz like ?',
search_area => 'select gebiettyp, geb_bezei from e01admin.e01e203 where geb_bezei like ? or geb_bezei like ?',
}
},
# rule: internal_bad_guy_2
# This does NOT work, because rule "employee_workplace" hits
# with its ip-address-mask of the whole network
{
# don't accept connection from this ip-address
mask => '^10\.95\.81\.5$',
accept => 0,
}
]
}
Start the proxyserver like this:
rem well-set Oracle_home needed for Oracle
set ORACLE_HOME=d:\oracle\ora81
dbiproxy --configfile proxy_oracle.cfg
=head2 Testing the connection from a remote machine
Call a program "dbish" from your commandline. I take the machine from rule "internal_webserver"
dbish "dbi:Proxy:hostname=oracle.zdf;port=12400;dsn=dbi:Oracle:e01" informationdesk xxx
There will be a shell-prompt:
informationdesk@dbi...> alive
Current statement buffer (enter '/'...):
alive
informationdesk@dbi...> /
COUNT(*)
'1'
[1 rows of 1 fields returned]
=head2 Testing the connection with a perl-script
Create a perl-script like this:
# file: oratest.pl
# call me like this: perl oratest.pl user password
use strict;
use DBI;
my $user = shift || die "Usage: $0 user password";
my $pass = shift || die "Usage: $0 user password";
my $config = {
dsn_at_proxy => "dbi:Oracle:e01",
proxy => "hostname=oechsle.zdf;port=12400",
};
my $dsn = sprintf "dbi:Proxy:%s;dsn=%s",
$config->{proxy},
$config->{dsn_at_proxy};
my $dbh = DBI->connect( $dsn, $user, $pass )
|| die "connect did not work: $DBI::errstr";
my $sql = "search_city";
printf "%s\n%s\n%s\n", "="x40, $sql, "="x40;
my $cur = $dbh->prepare($sql);
$cur->bind_param(1,'905%');
&show_result ($cur);
my $sql = "search_area";
printf "%s\n%s\n%s\n", "="x40, $sql, "="x40;
my $cur = $dbh->prepare($sql);
$cur->bind_param(1,'Pfarr%');
$cur->bind_param(2,'Bronnamberg%');
&show_result ($cur);
my $sql = "statistic_area";
printf "%s\n%s\n%s\n", "="x40, $sql, "="x40;
my $cur = $dbh->prepare($sql);
$cur->bind_param(1,'Pfarr%');
&show_result ($cur);
$dbh->disconnect;
exit;
sub show_result {
my $cur = shift;
unless ($cur->execute()) {
print "Could not execute\n";
return;
}
my $rownum = 0;
while (my @row = $cur->fetchrow_array()) {
printf "Row is: %s\n", join(", ",@row);
if ($rownum++ > 5) {
print "... and so on\n";
last;
}
}
$cur->finish;
}
The result
C:\>perl oratest.pl informationdesk xxx
========================================
search_city
========================================
Row is: 3322, 9050, Chemnitz
Row is: 3678, 9051, Chemnitz
Row is: 10447, 9051, Chemnitz
Row is: 12128, 9051, Chemnitz
Row is: 10954, 90513, Zirndorf
Row is: 5808, 90513, Zirndorf
Row is: 5715, 90513, Zirndorf
... and so on
========================================
search_area
========================================
Row is: 101, Bronnamberg
Row is: 400, Pfarramt Zirndorf
Row is: 400, Pfarramt Rosstal
Row is: 400, Pfarramt Oberasbach
Row is: 401, Pfarramt Zirndorf
Row is: 401, Pfarramt Rosstal
========================================
statistic_area
========================================
DBD::Proxy::st execute failed: Server returned error: Failed to execute method CallMethod: Unknown SQL query: statistic_area at E:/Perl/site/lib/DBI/ProxyServer.pm line 258.
Could not execute
=head2 How the configuration works
The most important section to control access to your dbi-proxy is "client=>"
in the file "proxy_oracle.cfg":
Controlling which person at which machine is allowed to access
=over 4
=item * "mask" is a perl regular expression against the plain ip-address of the machine which wishes to connect _or_ the reverse-lookup from a nameserver.
=item * "accept" tells the dbiproxy-server whether ip-adresse like in "mask" are allowed to connect or not (0/1)
=item * "users" is a reference to a list of usernames which must be matched, this is NOT a regular expression.
=back
Controlling which SQL-statements are allowed
You can put every SQL-statement you like in simply omitting "sql => ...", but the more important thing is to restrict the connection so that only allowed queries are possible.
If you include an sql-section in your config-file like this:
sql => {
alive => 'select count(*) from dual',
statistic_area => 'select count(*) from e01admin.e01e203 where geb_bezei like ?',
}
The user is allowed to put two queries against the dbi-proxy. The queries are _not_ "select count(*)...", the queries are "alive" and "statistic_area"! These keywords are replaced by the real query. So you can run a query for "alive":
my $sql = "alive";
my $cur = $dbh->prepare($sql);
...
The flexibility is that you can put parameters in the where-part of the query so the query are not static. Simply replace a value in the where-part of the query through a question mark and bind it as a parameter to the query.
my $sql = "statistic_area";
my $cur = $dbh->prepare($sql);
$cur->bind_param(1,'905%');
# A second parameter would be called like this:
# $cur->bind_param(2,'98%');
The result is this query:
select count(*) from e01admin.e01e203
where geb_bezei like '905%'
Don't try to put parameters into the sql-query like this:
# Does not work like you think.
# Only the first word of the query is parsed,
# so it's changed to "statistic_area", the rest is omitted.
# You _have_ to work with $cur->bind_param.
my $sql = "statistic_area 905%";
my $cur = $dbh->prepare($sql);
...
=head2 Problems
=over 4
=item * I don't know how to restrict users to special databases.
=item * I don't know how to pass query-parameters via dbish
=back
=head1 SECURITY WARNING
L<RPC::PlServer> used underneath is not secure due to serializing and
deserializing data with L<Storable> module. Use the proxy driver only in
trusted environment.
=head1 AUTHOR
Copyright (c) 1997 Jochen Wiedmann
Am Eisteich 9
72555 Metzingen
Germany
Email: joe@ispsoft.de
Phone: +49 7123 14881
The DBI::ProxyServer module is free software; you can redistribute it
and/or modify it under the same terms as Perl itself. In particular
permission is granted to Tim Bunce for distributing this as a part of
the DBI.
=head1 SEE ALSO
L<dbiproxy>, L<DBD::Proxy>, L<DBI>, L<RPC::PlServer>,
L<RPC::PlClient>, L<Net::Daemon>, L<Net::Daemon::Log>,
L<Sys::Syslog>, L<Win32::EventLog>, L<syslog>

1285
database/perl/vendor/lib/DBI/PurePerl.pm vendored Normal file

File diff suppressed because it is too large Load Diff

1014
database/perl/vendor/lib/DBI/SQL/Nano.pm vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,117 @@
package DBI::Util::CacheMemory;
# $Id: CacheMemory.pm 10314 2007-11-26 22:25:33Z Tim $
#
# Copyright (c) 2007, Tim Bunce, Ireland
#
# You may distribute under the terms of either the GNU General Public
# License or the Artistic License, as specified in the Perl README file.
use strict;
use warnings;
=head1 NAME
DBI::Util::CacheMemory - a very fast but very minimal subset of Cache::Memory
=head1 DESCRIPTION
Like Cache::Memory (part of the Cache distribution) but doesn't support any fancy features.
This module aims to be a very fast compatible strict sub-set for simple cases,
such as basic client-side caching for DBD::Gofer.
Like Cache::Memory, and other caches in the Cache and Cache::Cache
distributions, the data will remain in the cache until cleared, it expires,
or the process dies. The cache object simply going out of scope will I<not>
destroy the data.
=head1 METHODS WITH CHANGES
=head2 new
All options except C<namespace> are ignored.
=head2 set
Doesn't support expiry.
=head2 purge
Same as clear() - deletes everything in the namespace.
=head1 METHODS WITHOUT CHANGES
=over
=item clear
=item count
=item exists
=item remove
=back
=head1 UNSUPPORTED METHODS
If it's not listed above, it's not supported.
=cut
our $VERSION = "0.010315";
my %cache;
sub new {
my ($class, %options ) = @_;
my $namespace = $options{namespace} ||= 'Default';
#$options{_cache} = \%cache; # can be handy for debugging/dumping
my $self = bless \%options => $class;
$cache{ $namespace } ||= {}; # init - ensure it exists
return $self;
}
sub set {
my ($self, $key, $value) = @_;
$cache{ $self->{namespace} }->{$key} = $value;
}
sub get {
my ($self, $key) = @_;
return $cache{ $self->{namespace} }->{$key};
}
sub exists {
my ($self, $key) = @_;
return exists $cache{ $self->{namespace} }->{$key};
}
sub remove {
my ($self, $key) = @_;
return delete $cache{ $self->{namespace} }->{$key};
}
sub purge {
return shift->clear;
}
sub clear {
$cache{ shift->{namespace} } = {};
}
sub count {
return scalar keys %{ $cache{ shift->{namespace} } };
}
sub size {
my $c = $cache{ shift->{namespace} };
my $size = 0;
while ( my ($k,$v) = each %$c ) {
$size += length($k) + length($v);
}
return $size;
}
1;

View File

@@ -0,0 +1,65 @@
package DBI::Util::_accessor;
use strict;
use Carp;
our $VERSION = "0.009479";
# inspired by Class::Accessor::Fast
sub new {
my($proto, $fields) = @_;
my($class) = ref $proto || $proto;
$fields ||= {};
my @dubious = grep { !m/^_/ && !$proto->can($_) } keys %$fields;
carp "$class doesn't have accessors for fields: @dubious" if @dubious;
# make a (shallow) copy of $fields.
bless {%$fields}, $class;
}
sub mk_accessors {
my($self, @fields) = @_;
$self->mk_accessors_using('make_accessor', @fields);
}
sub mk_accessors_using {
my($self, $maker, @fields) = @_;
my $class = ref $self || $self;
# So we don't have to do lots of lookups inside the loop.
$maker = $self->can($maker) unless ref $maker;
no strict 'refs';
foreach my $field (@fields) {
my $accessor = $self->$maker($field);
*{$class."\:\:$field"} = $accessor
unless defined &{$class."\:\:$field"};
}
#my $hash_ref = \%{$class."\:\:_accessors_hash};
#$hash_ref->{$_}++ for @fields;
# XXX also copy down _accessors_hash of base class(es)
# so one in this class is complete
return;
}
sub make_accessor {
my($class, $field) = @_;
return sub {
my $self = shift;
return $self->{$field} unless @_;
croak "Too many arguments to $field" if @_ > 1;
return $self->{$field} = shift;
};
}
sub make_accessor_autoviv_hashref {
my($class, $field) = @_;
return sub {
my $self = shift;
return $self->{$field} ||= {} unless @_;
croak "Too many arguments to $field" if @_ > 1;
return $self->{$field} = shift;
};
}
1;

181
database/perl/vendor/lib/DBI/W32ODBC.pm vendored Normal file
View File

@@ -0,0 +1,181 @@
package
DBI; # hide this non-DBI package from simple indexers
# $Id: W32ODBC.pm 8696 2007-01-24 23:12:38Z Tim $
#
# Copyright (c) 1997,1999 Tim Bunce
# With many thanks to Patrick Hollins for polishing.
#
# You may distribute under the terms of either the GNU General Public
# License or the Artistic License, as specified in the Perl README file.
=head1 NAME
DBI::W32ODBC - An experimental DBI emulation layer for Win32::ODBC
=head1 SYNOPSIS
use DBI::W32ODBC;
# apart from the line above everything is just the same as with
# the real DBI when using a basic driver with few features.
=head1 DESCRIPTION
This is an experimental pure perl DBI emulation layer for Win32::ODBC
If you can improve this code I'd be interested in hearing about it. If
you are having trouble using it please respect the fact that it's very
experimental. Ideally fix it yourself and send me the details.
=head2 Some Things Not Yet Implemented
Most attributes including PrintError & RaiseError.
type_info and table_info
Volunteers welcome!
=cut
${'DBI::VERSION'} # hide version from PAUSE indexer
= "0.01";
my $Revision = sprintf("12.%06d", q$Revision: 8696 $ =~ /(\d+)/o);
sub DBI::W32ODBC::import { } # must trick here since we're called DBI/W32ODBC.pm
use Carp;
use Win32::ODBC;
@ISA = qw(Win32::ODBC);
use strict;
$DBI::dbi_debug = $ENV{PERL_DBI_DEBUG} || 0;
carp "Loaded (W32ODBC) DBI.pm ${'DBI::VERSION'} (debug $DBI::dbi_debug)"
if $DBI::dbi_debug;
sub connect {
my ($class, $dbname, $dbuser, $dbpasswd, $module, $attr) = @_;
$dbname .= ";UID=$dbuser" if $dbuser;
$dbname .= ";PWD=$dbpasswd" if $dbpasswd;
my $h = new Win32::ODBC $dbname;
warn "Error connecting to $dbname: ".Win32::ODBC::Error()."\n" unless $h;
bless $h, $class if $h; # rebless into our class
$h;
}
sub quote {
my ($h, $string) = @_;
return "NULL" if !defined $string;
$string =~ s/'/''/g; # standard
# This hack seems to be required for Access but probably breaks for
# other databases when using \r and \n. It would be better if we could
# use ODBC options to detect that we're actually using Access.
$string =~ s/\r/' & chr\$(13) & '/g;
$string =~ s/\n/' & chr\$(10) & '/g;
"'$string'";
}
sub do {
my($h, $statement, $attribs, @params) = @_;
Carp::carp "\$h->do() attribs unused" if $attribs;
my $new_h = $h->prepare($statement) or return undef; ##
pop @{ $h->{'___sths'} }; ## certain death assured
$new_h->execute(@params) or return undef; ##
my $rows = $new_h->rows; ##
$new_h->finish; ## bang bang
($rows == 0) ? "0E0" : $rows;
}
# ---
sub prepare {
my ($h, $sql) = @_;
## opens a new connection with every prepare to allow
## multiple, concurrent queries
my $new_h = new Win32::ODBC $h->{DSN}; ##
return undef if not $new_h; ## bail if no connection
bless $new_h; ## shouldn't be sub-classed...
$new_h->{'__prepare'} = $sql; ##
$new_h->{NAME} = []; ##
$new_h->{NUM_OF_FIELDS} = -1; ##
push @{ $h->{'___sths'} } ,$new_h; ## save sth in parent for mass destruction
return $new_h; ##
}
sub execute {
my ($h) = @_;
my $rc = $h->Sql($h->{'__prepare'});
return undef if $rc;
my @fields = $h->FieldNames;
$h->{NAME} = \@fields;
$h->{NUM_OF_FIELDS} = scalar @fields;
$h; # return dbh as pseudo sth
}
sub fetchrow_hashref { ## provide DBI compatibility
my $h = shift;
my $NAME = shift || "NAME";
my $row = $h->fetchrow_arrayref or return undef;
my %hash;
@hash{ @{ $h->{$NAME} } } = @$row;
return \%hash;
}
sub fetchrow {
my $h = shift;
return unless $h->FetchRow();
my $fields_r = $h->{NAME};
return $h->Data(@$fields_r);
}
sub fetch {
my @row = shift->fetchrow;
return undef unless @row;
return \@row;
}
*fetchrow_arrayref = \&fetch; ## provide DBI compatibility
*fetchrow_array = \&fetchrow; ## provide DBI compatibility
sub rows {
shift->RowCount;
}
sub finish {
shift->Close; ## uncommented this line
}
# ---
sub commit {
shift->Transact(ODBC::SQL_COMMIT);
}
sub rollback {
shift->Transact(ODBC::SQL_ROLLBACK);
}
sub disconnect {
my ($h) = shift; ## this will kill all the statement handles
foreach (@{$h->{'___sths'}}) { ## created for a specific connection
$_->Close if $_->{DSN}; ##
} ##
$h->Close; ##
}
sub err {
(shift->Error)[0];
}
sub errstr {
scalar( shift->Error );
}
# ---
1;