__PACKAGE__->cursor_class(
'DBIx::Class::Storage::DBI::Cursor'
);
__PACKAGE__->mk_group_accessors(
'inherited'
=>
qw/
sql_limit_dialect sql_quote_char sql_name_sep
/
);
__PACKAGE__->mk_group_accessors(
'component_class'
=>
qw/sql_maker_class datetime_parser_type/
);
__PACKAGE__->sql_maker_class(
'DBIx::Class::SQLMaker'
);
__PACKAGE__->datetime_parser_type(
'DateTime::Format::MySQL'
);
__PACKAGE__->sql_name_sep(
'.'
);
__PACKAGE__->mk_group_accessors(
'simple'
=>
qw/
_connect_info _dbic_connect_attributes _driver_determined
_dbh _dbh_details _conn_pid _sql_maker _sql_maker_opts _dbh_autocommit
_perform_autoinc_retrieval _autoinc_supplied_for_op
/
);
my
@storage_options
=
qw/
on_connect_call on_disconnect_call on_connect_do on_disconnect_do
disable_sth_caching unsafe auto_savepoint
/
;
__PACKAGE__->mk_group_accessors(
'simple'
=>
@storage_options
);
my
@capabilities
= (
qw/
insert_returning
insert_returning_bound
multicolumn_in
placeholders
typeless_placeholders
join_optimizer
/
);
__PACKAGE__->mk_group_accessors(
dbms_capability
=>
map
{
"_supports_$_"
}
@capabilities
);
__PACKAGE__->mk_group_accessors(
use_dbms_capability
=>
map
{
"_use_$_"
} (
@capabilities
) );
__PACKAGE__->_use_join_optimizer (1);
sub
_determine_supports_join_optimizer { 1 };
my
$storage_accessor_idx
= {
map
{
$_
=> 1 }
qw(
sqlt_type
datetime_parser_type
sql_maker
cursor_class
)
};
for
my
$meth
(
keys
%$storage_accessor_idx
,
qw(
deployment_statements
build_datetime_parser
txn_begin
insert
update
delete
select
select_single
_insert_bulk
with_deferred_fk_checks
get_use_dbms_capability
get_dbms_capability
_server_info
_get_server_version
)
) {
my
$orig
= __PACKAGE__->can (
$meth
)
or
die
"$meth is not a ::Storage::DBI method!"
;
my
$is_getter
=
$storage_accessor_idx
->{
$meth
} ? 0 : 1;
quote_sub
__PACKAGE__ .
"::$meth"
,
sprintf
(
<<'EOC', $is_getter, perlstring $meth ), { '$orig' => \$orig };
if (
# only fire when invoked on an instance, a valid class-based invocation
# would e.g. be setting a default for an inherited accessor
ref $_[0]
and
! $_[0]->{_driver_determined}
and
! $_[0]->{_in_determine_driver}
and
# if this is a known *setter* - just set it, no need to connect
# and determine the driver
( %1$s or @_ <= 1 )
and
# Only try to determine stuff if we have *something* that either is or can
# provide a DSN. Allows for bare $schema's generated with a plain ->connect()
# to still be marginally useful
$_[0]->_dbi_connect_info->[0]
) {
$_[0]->_determine_driver;
goto $_[0]->can(%2$s) unless DBIx::Class::_ENV_::BROKEN_GOTO;
my $cref = $_[0]->can(%2$s);
goto $cref;
}
goto $orig;
EOC
}
sub
new {
my
$new
=
shift
->
next
::method(
@_
);
$new
->_sql_maker_opts({});
$new
->_dbh_details({});
$new
->{_in_do_block} = 0;
$new
->_arm_global_destructor;
$new
;
}
{
my
%seek_and_destroy
;
sub
_arm_global_destructor {
defined
$seek_and_destroy
{
$_
} or
delete
$seek_and_destroy
{
$_
}
for
keys
%seek_and_destroy
;
weaken (
$seek_and_destroy
{ refaddr(
$_
[0]) } =
$_
[0]
);
}
END {
local
$?;
$_
->_verify_pid
for
(
grep
{
defined
$_
}
values
%seek_and_destroy
);
}
sub
CLONE {
my
@instances
=
grep
{
defined
$_
}
values
%seek_and_destroy
;
%seek_and_destroy
= ();
for
(
@instances
) {
$_
->_dbh(
undef
);
$_
->transaction_depth(0);
$_
->savepoints([]);
$_
->_arm_global_destructor
}
}
}
sub
DESTROY {
$_
[0]->_verify_pid
unless
DBIx::Class::_ENV_::BROKEN_FORK;
local
$SIG
{__WARN__} =
sub
{};
$_
[0]->_dbh(
undef
);
1;
}
sub
_verify_pid {
my
$pid
=
$_
[0]->_conn_pid;
if
(
defined
$pid
and
$pid
!= $$ and
my
$dbh
=
$_
[0]->_dbh ) {
$dbh
->{InactiveDestroy} = 1;
$_
[0]->_dbh(
undef
);
$_
[0]->transaction_depth(0);
$_
[0]->savepoints([]);
}
return
;
}
sub
connect_info {
my
(
$self
,
$info
) =
@_
;
return
$self
->_connect_info
if
!
$info
;
$self
->_connect_info(
$info
);
$info
=
$self
->_normalize_connect_info(
$info
)
if
ref
$info
eq
'ARRAY'
;
my
%attrs
= (
%{
$self
->_default_dbi_connect_attributes || {} },
%{
$info
->{attributes} || {} },
);
my
@args
= @{
$info
->{arguments} };
if
(
keys
%attrs
and
ref
$args
[0] ne
'CODE'
) {
carp_unique (
'You provided explicit AutoCommit => 0 in your connection_info. '
.
'This is almost universally a bad idea (see the footnotes of '
.
'DBIx::Class::Storage::DBI for more info). If you still want to '
.
'do this you can set $ENV{DBIC_UNSAFE_AUTOCOMMIT_OK} to disable '
.
'this warning.'
)
if
!
$attrs
{AutoCommit} and !
$ENV
{DBIC_UNSAFE_AUTOCOMMIT_OK};
push
@args
, \
%attrs
if
keys
%attrs
;
}
$self
->_dbi_connect_info(\
@args
);
for
my
$storage_opt
(
keys
%{
$info
->{storage_options} }) {
my
$value
=
$info
->{storage_options}{
$storage_opt
};
$self
->
$storage_opt
(
$value
);
}
$self
->_sql_maker(
undef
);
$self
->_sql_maker_opts({});
for
my
$sql_maker_opt
(
keys
%{
$info
->{sql_maker_options} }) {
my
$value
=
$info
->{sql_maker_options}{
$sql_maker_opt
};
$self
->_sql_maker_opts->{
$sql_maker_opt
} =
$value
;
}
$self
->_dbic_connect_attributes (\
%attrs
);
return
$self
->_connect_info;
}
sub
_dbi_connect_info {
my
$self
=
shift
;
return
$self
->{_dbi_connect_info} =
$_
[0]
if
@_
;
my
$conninfo
=
$self
->{_dbi_connect_info} || [];
if
( !
defined
$conninfo
->[0] and
$ENV
{DBI_DSN} ) {
my
@new_conninfo
=
@$conninfo
;
$new_conninfo
[0] =
$ENV
{DBI_DSN};
$conninfo
= \
@new_conninfo
;
}
return
$conninfo
;
}
sub
_normalize_connect_info {
my
(
$self
,
$info_arg
) =
@_
;
my
%info
;
my
@args
=
@$info_arg
;
my
%attrs
;
if
(
ref
$args
[0] eq
'CODE'
) {
%attrs
= %{
$args
[1] || {} };
@args
=
$args
[0];
}
elsif
(
ref
$args
[0] eq
'HASH'
) {
%attrs
= %{
$args
[0]};
@args
= ();
if
(
my
$code
=
delete
$attrs
{dbh_maker}) {
@args
=
$code
;
my
@ignored
=
grep
{
delete
$attrs
{
$_
} } (
qw/dsn user password/
);
if
(
@ignored
) {
carp
sprintf
(
'Attribute(s) %s in connect_info were ignored, as they can not be applied '
.
"to the result of 'dbh_maker'"
,
join
(
', '
,
map
{
"'$_'"
} (
@ignored
) ),
);
}
}
else
{
@args
=
delete
@attrs
{
qw/dsn user password/
};
}
}
else
{
%attrs
= (
% {
$args
[3] || {} },
% {
$args
[4] || {} },
);
@args
=
@args
[0,1,2];
}
$info
{arguments} = \
@args
;
my
@storage_opts
=
grep
exists
$attrs
{
$_
},
@storage_options
,
'cursor_class'
;
@{
$info
{storage_options} }{
@storage_opts
} =
delete
@attrs
{
@storage_opts
}
if
@storage_opts
;
my
@sql_maker_opts
=
grep
exists
$attrs
{
$_
},
qw/limit_dialect quote_char name_sep quote_names/
;
@{
$info
{sql_maker_options} }{
@sql_maker_opts
} =
delete
@attrs
{
@sql_maker_opts
}
if
@sql_maker_opts
;
$info
{attributes} = \
%attrs
if
%attrs
;
return
\
%info
;
}
sub
_default_dbi_connect_attributes () {
+{
AutoCommit
=> 1,
PrintError
=> 0,
RaiseError
=> 1,
ShowErrorStatement
=> 1,
};
}
sub
_parse_connect_do {
my
(
$self
,
$type
) =
@_
;
my
$val
=
$self
->
$type
;
return
()
if
not
defined
$val
;
my
@res
;
if
(not
ref
(
$val
)) {
push
@res
, [
'do_sql'
,
$val
];
}
elsif
(
ref
(
$val
) eq
'CODE'
) {
push
@res
,
$val
;
}
elsif
(
ref
(
$val
) eq
'ARRAY'
) {
push
@res
,
map
{ [
'do_sql'
,
$_
] }
@$val
;
}
else
{
$self
->throw_exception(
"Invalid type for $type: "
.
ref
(
$val
));
}
return
\
@res
;
}
sub
dbh_do {
my
$self
=
shift
;
my
$run_target
=
shift
;
return
$self
->
$run_target
(
$self
->_get_dbh,
@_
)
if
$self
->{_in_do_block} or
$self
->transaction_depth;
my
$args
=
@_
? \
@_
: [];
DBIx::Class::Storage::BlockRunner->new(
storage
=>
$self
,
wrap_txn
=> 0,
retry_handler
=>
sub
{
$_
[0]->failed_attempt_count == 1
and
!
$_
[0]->storage->connected
},
)->run(
sub
{
$self
->
$run_target
(
$self
->_get_dbh,
@$args
)
});
}
sub
txn_do {
$_
[0]->_get_dbh;
shift
->
next
::method(
@_
);
}
sub
disconnect {
if
(
my
$dbh
=
$_
[0]->_dbh ) {
$_
[0]->_do_connection_actions(
disconnect_call_
=>
$_
)
for
(
(
$_
[0]->on_disconnect_call || () ),
$_
[0]->_parse_connect_do (
'on_disconnect_do'
)
);
$_
[0]->_exec_txn_rollback
unless
$_
[0]->_dbh_autocommit;
%{
$dbh
->{CachedKids} } = ();
$dbh
->disconnect;
$_
[0]->_dbh(
undef
);
}
}
sub
with_deferred_fk_checks {
$_
[1]->();
}
sub
connected {
return
0
unless
$_
[0]->_seems_connected;
local
$_
[0]->_dbh->{RaiseError} = 1;
return
$_
[0]->_ping;
}
sub
_seems_connected {
$_
[0]->_verify_pid
unless
DBIx::Class::_ENV_::BROKEN_FORK;
(
$_
[0]->_dbh ||
return
0)->FETCH(
'Active'
);
}
sub
_ping {
(
$_
[0]->_dbh ||
return
0)->ping;
}
sub
ensure_connected {
$_
[0]->connected || (
$_
[0]->_populate_dbh && 1 );
}
sub
dbh {
$_
[0]->_dbh
? (
$_
[0]->ensure_connected and
$_
[0]->_dbh )
:
$_
[0]->_populate_dbh
;
}
sub
_get_dbh {
$_
[0]->_verify_pid
unless
DBIx::Class::_ENV_::BROKEN_FORK;
$_
[0]->_dbh ||
$_
[0]->_populate_dbh;
}
sub
sql_maker {
my
$self
=
shift
;
$self
->throw_exception(
'sql_maker() is not a setter method'
)
if
@_
;
unless
(
$self
->_sql_maker) {
my
$sql_maker_class
=
$self
->sql_maker_class;
my
%opts
= %{
$self
->_sql_maker_opts||{}};
my
$dialect
=
$opts
{limit_dialect}
||
$self
->sql_limit_dialect
||
do
{
my
$s_class
= (
ref
$self
) ||
$self
;
carp_unique (
"Your storage class ($s_class) does not set sql_limit_dialect and you "
.
'have not supplied an explicit limit_dialect in your connection_info. '
.
'DBIC will attempt to use the GenericSubQ dialect, which works on most '
.
'databases but can be (and often is) painfully slow. '
.
"Please file an RT ticket against '$s_class'"
)
if
$self
->_dbi_connect_info->[0];
'GenericSubQ'
;
}
;
my
(
$quote_char
,
$name_sep
);
if
(
$opts
{quote_names}) {
$quote_char
= (
delete
$opts
{quote_char}) ||
$self
->sql_quote_char ||
do
{
my
$s_class
= (
ref
$self
) ||
$self
;
carp_unique (
"You requested 'quote_names' but your storage class ($s_class) does "
.
'not explicitly define a default sql_quote_char and you have not '
.
'supplied a quote_char as part of your connection_info. DBIC will '
.
q{default to the ANSI SQL standard quote '"', which works most of }
.
"the time. Please file an RT ticket against '$s_class'."
);
'"'
;
};
$name_sep
= (
delete
$opts
{name_sep}) ||
$self
->sql_name_sep;
}
$self
->_sql_maker(
$sql_maker_class
->new(
bindtype
=>
'columns'
,
array_datatypes
=> 1,
limit_dialect
=>
$dialect
,
(
$quote_char
? (
quote_char
=>
$quote_char
) : ()),
name_sep
=> (
$name_sep
||
'.'
),
%opts
,
));
}
return
$self
->_sql_maker;
}
sub
_rebless {}
sub
_init {}
sub
_populate_dbh {
$_
[0]->_dbh(
undef
);
$_
[0]->_dbh_details({});
$_
[0]->_dbh(
$_
[0]->_connect);
$_
[0]->_conn_pid($$)
unless
DBIx::Class::_ENV_::BROKEN_FORK;
$_
[0]->_determine_driver;
$_
[0]->{transaction_depth} =
$_
[0]->_dbh_autocommit ? 0 : 1;
$_
[0]->_run_connection_actions
unless
$_
[0]->{_in_determine_driver};
$_
[0]->_dbh;
}
sub
_run_connection_actions {
$_
[0]->_do_connection_actions(
connect_call_
=>
$_
)
for
(
(
$_
[0]->on_connect_call || () ),
$_
[0]->_parse_connect_do (
'on_connect_do'
),
);
}
sub
set_use_dbms_capability {
$_
[0]->set_inherited (
$_
[1],
$_
[2]);
}
sub
get_use_dbms_capability {
my
(
$self
,
$capname
) =
@_
;
my
$use
=
$self
->get_inherited (
$capname
);
return
defined
$use
?
$use
:
do
{
$capname
=~ s/^_use_/_supports_/;
$self
->get_dbms_capability (
$capname
) }
;
}
sub
set_dbms_capability {
$_
[0]->_dbh_details->{capability}{
$_
[1]} =
$_
[2];
}
sub
get_dbms_capability {
my
(
$self
,
$capname
) =
@_
;
my
$cap
=
$self
->_dbh_details->{capability}{
$capname
};
unless
(
defined
$cap
) {
if
(
my
$meth
=
$self
->can (
"_determine$capname"
)) {
$cap
=
$self
->
$meth
? 1 : 0;
}
else
{
$cap
= 0;
}
$self
->set_dbms_capability (
$capname
,
$cap
);
}
return
$cap
;
}
sub
_server_info {
my
$self
=
shift
;
my
$info
;
unless
(
$info
=
$self
->_dbh_details->{info}) {
$info
= {};
my
$server_version
=
try
{
$self
->_get_server_version
}
catch
{
$self
->throw_exception(
$_
)
if
$self
->{_in_determine_driver};
undef
;
};
if
(
defined
$server_version
) {
$info
->{dbms_version} =
$server_version
;
my
(
$numeric_version
) =
$server_version
=~ /^([\d\.]+)/;
my
@verparts
=
split
(/\./,
$numeric_version
);
if
(
@verparts
&&
$verparts
[0] <= 999
) {
my
@use_parts
;
while
(
@verparts
&&
@use_parts
< 3) {
my
$p
=
shift
@verparts
;
last
if
$p
> 999;
push
@use_parts
,
$p
;
}
push
@use_parts
, 0
while
@use_parts
< 3;
$info
->{normalized_dbms_version} =
sprintf
"%d.%03d%03d"
,
@use_parts
;
}
}
$self
->_dbh_details->{info} =
$info
;
}
return
$info
;
}
sub
_get_server_version {
shift
->_dbh_get_info(
'SQL_DBMS_VER'
);
}
sub
_dbh_get_info {
my
(
$self
,
$info
) =
@_
;
if
(
$info
=~ /[^0-9]/) {
$info
=
$DBI::Const::GetInfoType::GetInfoType
{
$info
};
$self
->throw_exception(
"Info type '$_[1]' not provided by DBI::Const::GetInfoType"
)
unless
defined
$info
;
}
$self
->_get_dbh->get_info(
$info
);
}
sub
_describe_connection {
my
$self
=
shift
;
my
$drv
;
try
{
$drv
=
$self
->_extract_driver_from_connect_info;
$self
->ensure_connected;
};
$drv
=
"DBD::$drv"
if
$drv
;
my
$res
= {
DBIC_DSN
=>
$self
->_dbi_connect_info->[0],
DBI_VER
=> DBI->VERSION,
DBIC_VER
=> DBIx::Class->VERSION,
DBIC_DRIVER
=>
ref
$self
,
$drv
? (
DBD
=>
$drv
,
DBD_VER
=>
try
{
$drv
->VERSION },
) : (),
};
for
my
$inf
(
qw/
SQL_CURSOR_COMMIT_BEHAVIOR
SQL_CURSOR_ROLLBACK_BEHAVIOR
SQL_CURSOR_SENSITIVITY
SQL_DATA_SOURCE_NAME
SQL_DBMS_NAME
SQL_DBMS_VER
SQL_DEFAULT_TXN_ISOLATION
SQL_DM_VER
SQL_DRIVER_NAME
SQL_DRIVER_ODBC_VER
SQL_DRIVER_VER
SQL_EXPRESSIONS_IN_ORDERBY
SQL_GROUP_BY
SQL_IDENTIFIER_CASE
SQL_IDENTIFIER_QUOTE_CHAR
SQL_MAX_CATALOG_NAME_LEN
SQL_MAX_COLUMN_NAME_LEN
SQL_MAX_IDENTIFIER_LEN
SQL_MAX_TABLE_NAME_LEN
SQL_MULTIPLE_ACTIVE_TXN
SQL_MULT_RESULT_SETS
SQL_NEED_LONG_DATA_LEN
SQL_NON_NULLABLE_COLUMNS
SQL_ODBC_VER
SQL_QUALIFIER_NAME_SEPARATOR
SQL_QUOTED_IDENTIFIER_CASE
SQL_TXN_CAPABLE
SQL_TXN_ISOLATION_OPTION
/
) {
my
$v
=
try
{
$self
->_dbh_get_info(
$inf
) };
next
unless
defined
$v
;
my
$expl
= DBI::Const::GetInfoReturn::Explain(
$inf
,
$v
);
$res
->{
$inf
} = DBI::Const::GetInfoReturn::Format(
$inf
,
$v
) . (
$expl
?
" ($expl)"
:
''
);
}
$res
;
}
sub
_determine_driver {
my
(
$self
) =
@_
;
if
((not
$self
->_driver_determined) && (not
$self
->{_in_determine_driver})) {
my
$started_connected
= 0;
local
$self
->{_in_determine_driver} = 1;
if
(
ref
(
$self
) eq __PACKAGE__) {
my
$driver
;
if
(
$self
->_dbh) {
$driver
=
$self
->_dbh->{Driver}{Name};
$started_connected
= 1;
}
else
{
$driver
=
$self
->_extract_driver_from_connect_info;
}
if
(
$driver
) {
my
$storage_class
=
"DBIx::Class::Storage::DBI::${driver}"
;
if
(
$self
->load_optional_class(
$storage_class
)) {
mro::set_mro(
$storage_class
,
'c3'
);
bless
$self
,
$storage_class
;
$self
->_rebless();
}
else
{
$self
->_warn_undetermined_driver(
'This version of DBIC does not yet seem to supply a driver for '
.
"your particular RDBMS and/or connection method ('$driver')."
);
}
}
else
{
$self
->_warn_undetermined_driver(
'Unable to extract a driver name from connect info - this '
.
'should not have happened.'
);
}
}
$self
->_driver_determined(1);
Class::C3->reinitialize()
if
DBIx::Class::_ENV_::OLD_MRO;
if
(
$self
->can(
'source_bind_attributes'
)) {
$self
->throw_exception(
"Your storage subclass @{[ ref $self ]} provides (or inherits) the method "
.
'source_bind_attributes() for which support has been removed as of Jan 2013. '
.
'If you are not sure how to proceed please contact the development team via '
. DBIx::Class::_ENV_::HELP_URL
);
}
$self
->_init;
$self
->_run_connection_actions
if
!
$started_connected
&&
defined
$self
->_dbh;
}
}
sub
_extract_driver_from_connect_info {
my
$self
=
shift
;
my
$drv
;
if
(
ref
$self
->_dbi_connect_info->[0]
and
reftype
$self
->_dbi_connect_info->[0] eq
'CODE'
) {
$self
->_populate_dbh;
$drv
=
$self
->_dbh->{Driver}{Name};
}
else
{
(
$drv
) = (
$self
->_dbi_connect_info->[0] ||
''
) =~ /^dbi:([^:]+):/i;
$drv
||=
$ENV
{DBI_DRIVER};
}
return
$drv
;
}
sub
_determine_connector_driver {
my
(
$self
,
$conn
) =
@_
;
my
$dbtype
=
$self
->_dbh_get_info(
'SQL_DBMS_NAME'
);
if
(not
$dbtype
) {
$self
->_warn_undetermined_driver(
'Unable to retrieve RDBMS type (SQL_DBMS_NAME) of the engine behind your '
.
"$conn connector - this should not have happened."
);
return
;
}
$dbtype
=~ s/\W/_/gi;
my
$subclass
=
"DBIx::Class::Storage::DBI::${conn}::${dbtype}"
;
return
if
$self
->isa(
$subclass
);
if
(
$self
->load_optional_class(
$subclass
)) {
bless
$self
,
$subclass
;
$self
->_rebless;
}
else
{
$self
->_warn_undetermined_driver(
'This version of DBIC does not yet seem to supply a driver for '
.
"your particular RDBMS and/or connection method ('$conn/$dbtype')."
);
}
}
sub
_warn_undetermined_driver {
my
(
$self
,
$msg
) =
@_
;
carp_once (
$msg
.
' While we will attempt to continue anyway, the results '
.
'are likely to be underwhelming. Please upgrade DBIC, and if this message '
.
"does not go away, file a bugreport including the following info:\n"
. Data::Dumper::Concise::Dumper(
$self
->_describe_connection)
);
}
sub
_do_connection_actions {
my
$self
=
shift
;
my
$method_prefix
=
shift
;
my
$call
=
shift
;
if
(not
ref
(
$call
)) {
my
$method
=
$method_prefix
.
$call
;
$self
->
$method
(
@_
);
}
elsif
(
ref
(
$call
) eq
'CODE'
) {
$self
->
$call
(
@_
);
}
elsif
(
ref
(
$call
) eq
'ARRAY'
) {
if
(
ref
(
$call
->[0]) ne
'ARRAY'
) {
$self
->_do_connection_actions(
$method_prefix
,
$_
)
for
@$call
;
}
else
{
$self
->_do_connection_actions(
$method_prefix
,
@$_
)
for
@$call
;
}
}
else
{
$self
->throw_exception (
sprintf
(
"Don't know how to process conection actions of type '%s'"
,
ref
(
$call
)) );
}
return
$self
;
}
sub
connect_call_do_sql {
my
$self
=
shift
;
$self
->_do_query(
@_
);
}
sub
disconnect_call_do_sql {
my
$self
=
shift
;
$self
->_do_query(
@_
);
}
sub
connect_call_datetime_setup { 1 }
sub
_do_query {
my
(
$self
,
$action
) =
@_
;
if
(
ref
$action
eq
'CODE'
) {
$action
=
$action
->(
$self
);
$self
->_do_query(
$_
)
foreach
@$action
;
}
else
{
my
@do_args
= (
ref
$action
eq
'ARRAY'
) ? (
@$action
) : (
$action
);
my
$sql
=
shift
@do_args
;
my
$attrs
=
shift
@do_args
;
my
@bind
=
map
{ [
undef
,
$_
] }
@do_args
;
$self
->dbh_do(
sub
{
$_
[0]->_query_start(
$sql
, \
@bind
);
$_
[1]->
do
(
$sql
,
$attrs
,
@do_args
);
$_
[0]->_query_end(
$sql
, \
@bind
);
});
}
return
$self
;
}
sub
_connect {
my
$self
=
shift
;
my
$info
=
$self
->_dbi_connect_info;
$self
->throw_exception(
"You did not provide any connection_info"
)
unless
defined
$info
->[0];
my
(
$old_connect_via
,
$dbh
);
local
$DBI::connect_via
=
'connect'
if
$INC
{
'Apache/DBI.pm'
} &&
$ENV
{MOD_PERL};
my
$dbh_error_handler_installer
=
sub
{
weaken (
my
$weak_self
=
$_
[0]);
$_
[1]->{HandleError} =
bless
sub
{
if
(
$weak_self
) {
$weak_self
->throw_exception(
"DBI Exception: $_[0]"
);
}
else
{
DBIx::Class::Exception->throw(
"DBI Exception (unhandled by DBIC, ::Schema GCed): $_[0]"
);
}
},
'__DBIC__DBH__ERROR__HANDLER__'
;
};
try
{
if
(
ref
$info
->[0] eq
'CODE'
) {
$dbh
=
$info
->[0]->();
}
else
{
$dbh
= DBI->
connect
(
@$info
);
}
die
$DBI::errstr
unless
$dbh
;
die
sprintf
(
"%s fresh DBI handle with a *false* 'Active' attribute. "
.
'This handle is disconnected as far as DBIC is concerned, and we can '
.
'not continue'
,
ref
$info
->[0] eq
'CODE'
?
"Connection coderef $info->[0] returned a"
:
'DBI->connect($schema->storage->connect_info) resulted in a'
)
unless
$dbh
->FETCH(
'Active'
);
unless
(
$self
->unsafe) {
$self
->throw_exception(
'Refusing clobbering of {HandleError} installed on externally supplied '
.
"DBI handle $dbh. Either remove the handler or use the 'unsafe' attribute."
)
if
$dbh
->{HandleError} and
ref
$dbh
->{HandleError} ne
'__DBIC__DBH__ERROR__HANDLER__'
;
unless
(
$dbh
->{RaiseError}) {
carp(
ref
$info
->[0] eq
'CODE'
?
"The 'RaiseError' of the externally supplied DBI handle is set to false. "
.
"DBIx::Class will toggle it back to true, unless the 'unsafe' connect "
.
'attribute has been supplied'
:
'RaiseError => 0 supplied in your connection_info, without an explicit '
.
'unsafe => 1. Toggling RaiseError back to true'
);
$dbh
->{RaiseError} = 1;
}
$dbh_error_handler_installer
->(
$self
,
$dbh
);
}
}
catch
{
$self
->throw_exception(
"DBI Connection failed: $_"
)
};
$self
->_dbh_autocommit(
$dbh
->{AutoCommit});
return
$dbh
;
}
sub
txn_begin {
if
(!
defined
$_
[0]->_dbh_autocommit) {
$_
[0]->ensure_connected;
}
else
{
$_
[0]->_get_dbh;
}
shift
->
next
::method(
@_
);
}
sub
_exec_txn_begin {
my
$self
=
shift
;
if
(
$self
->{_in_do_block}) {
$self
->_dbh->begin_work;
}
else
{
$self
->dbh_do(
sub
{
$_
[1]->begin_work });
}
}
sub
txn_commit {
my
$self
=
shift
;
$self
->throw_exception(
"Unable to txn_commit() on a disconnected storage"
)
unless
$self
->_seems_connected;
if
(!
$self
->transaction_depth and !
$self
->_dbh->FETCH(
'AutoCommit'
) ) {
carp
"Storage transaction_depth 0 does not match "
.
"false AutoCommit of $self->{_dbh}, attempting COMMIT anyway"
;
$self
->transaction_depth(1);
}
$self
->
next
::method(
@_
);
$self
->transaction_depth(1)
if
(
!
$self
->transaction_depth
and
defined
$self
->_dbh_autocommit
and
!
$self
->_dbh_autocommit
);
}
sub
_exec_txn_commit {
shift
->_dbh->commit;
}
sub
txn_rollback {
my
$self
=
shift
;
$self
->throw_exception(
"Unable to txn_rollback() on a disconnected storage"
)
unless
$self
->_seems_connected;
if
(!
$self
->transaction_depth and !
$self
->_dbh->FETCH(
'AutoCommit'
) ) {
carp
"Storage transaction_depth 0 does not match "
.
"false AutoCommit of $self->{_dbh}, attempting ROLLBACK anyway"
;
$self
->transaction_depth(1);
}
$self
->
next
::method(
@_
);
$self
->transaction_depth(1)
if
(
!
$self
->transaction_depth
and
defined
$self
->_dbh_autocommit
and
!
$self
->_dbh_autocommit
);
}
sub
_exec_txn_rollback {
shift
->_dbh->rollback;
}
quote_sub __PACKAGE__ .
"::$_"
=>
sprintf
(
<<'EOS', $_) for qw(svp_begin svp_release svp_rollback);
$_[0]->throw_exception('Unable to %s() on a disconnected storage')
unless $_[0]->_seems_connected;
shift->next::method(@_);
EOS
sub
_prep_for_execute {
return
shift
->_gen_sql_bind(
@_
)
}
sub
_gen_sql_bind {
my
(
$self
,
$op
,
$ident
,
$args
) =
@_
;
my
(
$colinfos
,
$from
);
if
( blessed(
$ident
) ) {
$from
=
$ident
->from;
$colinfos
=
$ident
->columns_info;
}
my
(
$sql
,
$bind
);
(
$sql
,
@$bind
) =
$self
->sql_maker->
$op
( (
$from
||
$ident
),
@$args
);
$bind
=
$self
->_resolve_bindattrs(
$ident
, [ @{
$args
->[2]{
bind
}||[]},
@$bind
],
$colinfos
);
if
(
!
$ENV
{DBIC_DT_SEARCH_OK}
and
$op
eq
'select'
and
first {
length
ref
$_
->[1]
and
blessed(
$_
->[1])
and
$_
->[1]->isa(
'DateTime'
)
}
@$bind
) {
carp_unique
'DateTime objects passed to search() are not supported '
.
'properly (InflateColumn::DateTime formats and settings are not '
.
'respected.) See "Formatting DateTime objects in queries" in '
.
'DBIx::Class::Manual::Cookbook. To disable this warning for good '
.
'set $ENV{DBIC_DT_SEARCH_OK} to true'
}
return
(
$sql
,
$bind
);
}
sub
_resolve_bindattrs {
my
(
$self
,
$ident
,
$bind
,
$colinfos
) =
@_
;
my
$resolve_bindinfo
=
sub
{
$colinfos
||= { %{
$self
->_resolve_column_info(
$ident
) } };
my
$ret
;
if
(
my
$col
=
$_
[0]->{dbic_colname}) {
$ret
= { %{
$_
[0]} };
$ret
->{sqlt_datatype} ||=
$colinfos
->{
$col
}{data_type}
if
$colinfos
->{
$col
}{data_type};
$ret
->{sqlt_size} ||=
$colinfos
->{
$col
}{size}
if
$colinfos
->{
$col
}{size};
}
$ret
||
$_
[0];
};
return
[
map
{
my
$resolved
=
(
ref
$_
ne
'ARRAY'
or
@$_
!= 2 ) ? [ {},
$_
]
: ( !
defined
$_
->[0] ) ? [ {},
$_
->[1] ]
: (
ref
$_
->[0] eq
'HASH'
) ? [(
!
keys
%{
$_
->[0]}
or
exists
$_
->[0]{dbd_attrs}
or
$_
->[0]{sqlt_datatype}
) ?
$_
->[0]
:
$resolve_bindinfo
->(
$_
->[0])
,
$_
->[1]
]
: (
ref
$_
->[0] eq
'SCALAR'
) ? [ {
sqlt_datatype
=> ${
$_
->[0]} },
$_
->[1] ]
: [
$resolve_bindinfo
->(
{
dbic_colname
=>
$_
->[0] }
),
$_
->[1] ]
;
if
(
!
exists
$resolved
->[0]{dbd_attrs}
and
!
$resolved
->[0]{sqlt_datatype}
and
length
ref
$resolved
->[1]
and
! is_plain_value
$resolved
->[1]
) {
local
$Data::Dumper::Maxdepth
= 1;
local
$Data::Dumper::Terse
= 1;
local
$Data::Dumper::Useqq
= 1;
local
$Data::Dumper::Indent
= 0;
local
$Data::Dumper::Pad
=
' '
;
$self
->throw_exception(
'You must supply a datatype/bindtype (see DBIx::Class::ResultSet/DBIC BIND VALUES) '
.
'for non-scalar value '
. Data::Dumper::Dumper (
$resolved
->[1])
);
}
$resolved
;
}
@$bind
];
}
sub
_format_for_trace {
map
{
defined
(
$_
&&
$_
->[1] )
?
qq{'$_->[1]'}
:
q{NULL}
} @{
$_
[1] || []};
}
sub
_query_start {
my
(
$self
,
$sql
,
$bind
) =
@_
;
$self
->debugobj->query_start(
$sql
,
$self
->_format_for_trace(
$bind
) )
if
$self
->debug;
}
sub
_query_end {
my
(
$self
,
$sql
,
$bind
) =
@_
;
$self
->debugobj->query_end(
$sql
,
$self
->_format_for_trace(
$bind
) )
if
$self
->debug;
}
sub
_dbi_attrs_for_bind {
return
[
map
{
exists
$_
->{dbd_attrs} ?
$_
->{dbd_attrs}
: !
$_
->{sqlt_datatype} ?
undef
:
do
{
my
$cache
=
$_
[0]->_dbh_details->{_datatype_map_cache} ||= {};
$cache
->{
$_
->{sqlt_datatype}} =
$_
[0]->bind_attribute_by_data_type(
$_
->{sqlt_datatype})
if
!
exists
$cache
->{
$_
->{sqlt_datatype}};
$cache
->{
$_
->{sqlt_datatype}};
} }
map
{
$_
->[0] } @{
$_
[2]} ];
}
sub
_execute {
my
(
$self
,
$op
,
$ident
,
@args
) =
@_
;
my
(
$sql
,
$bind
) =
$self
->_prep_for_execute(
$op
,
$ident
, \
@args
);
$self
->_populate_dbh
unless
$self
->_dbh;
$self
->dbh_do(
_dbh_execute
=>
$sql
,
$bind
,
$self
->_dbi_attrs_for_bind(
$ident
,
$bind
),
);
}
sub
_dbh_execute {
my
(
$self
,
$dbh
,
$sql
,
$bind
,
$bind_attrs
) =
@_
;
$self
->_query_start(
$sql
,
$bind
);
my
$sth
=
$self
->_bind_sth_params(
$self
->_prepare_sth(
$dbh
,
$sql
),
$bind
,
$bind_attrs
,
);
my
$rv
=
$sth
->execute();
$self
->throw_exception(
$sth
->errstr ||
$sth
->err ||
'Unknown error: execute() returned false, but error flags were not set...'
)
if
!
$rv
;
$self
->_query_end(
$sql
,
$bind
);
return
(
wantarray
? (
$rv
,
$sth
,
@$bind
) :
$rv
);
}
sub
_prepare_sth {
my
(
$self
,
$dbh
,
$sql
) =
@_
;
my
$sth
=
$self
->disable_sth_caching
?
$dbh
->prepare(
$sql
)
:
$dbh
->prepare_cached(
$sql
, {}, 3);
$self
->throw_exception(
$dbh
->errstr
||
sprintf
(
"\$dbh->prepare() of '%s' through %s failed *silently* without "
.
'an exception and/or setting $dbh->errstr'
,
length
(
$sql
) > 20
?
substr
(
$sql
, 0, 20) .
'...'
:
$sql
,
'DBD::'
.
$dbh
->{Driver}{Name},
)
)
if
!
$sth
;
$sth
;
}
sub
_bind_sth_params {
my
(
$self
,
$sth
,
$bind
,
$bind_attrs
) =
@_
;
for
my
$i
(0 ..
$#$bind
) {
if
(
ref
$bind
->[
$i
][1] eq
'SCALAR'
) {
$sth
->bind_param_inout(
$i
+ 1,
$bind
->[
$i
][1],
$bind
->[
$i
][0]{dbd_size} ||
$self
->_max_column_bytesize(
$bind
->[
$i
][0]),
$bind_attrs
->[
$i
],
);
}
else
{
my
$v
= (
length
ref
$bind
->[
$i
][1] and is_plain_value
$bind
->[
$i
][1] )
?
"$bind->[$i][1]"
:
$bind
->[
$i
][1]
;
$sth
->bind_param(
$i
+ 1,
$v
,
$bind_attrs
->[
$i
],
);
}
}
$sth
;
}
sub
_prefetch_autovalues {
my
(
$self
,
$source
,
$colinfo
,
$to_insert
) =
@_
;
my
%values
;
for
my
$col
(
keys
%$colinfo
) {
if
(
$colinfo
->{
$col
}{auto_nextval}
and
(
!
exists
$to_insert
->{
$col
}
or
is_literal_value(
$to_insert
->{
$col
})
)
) {
$values
{
$col
} =
$self
->_sequence_fetch(
'NEXTVAL'
,
(
$colinfo
->{
$col
}{sequence} ||=
$self
->_dbh_get_autoinc_seq(
$self
->_get_dbh,
$source
,
$col
)
),
);
}
}
\
%values
;
}
sub
insert {
my
(
$self
,
$source
,
$to_insert
) =
@_
;
my
$col_infos
=
$source
->columns_info;
my
$prefetched_values
=
$self
->_prefetch_autovalues(
$source
,
$col_infos
,
$to_insert
);
$to_insert
= {
%$to_insert
,
%$prefetched_values
};
my
%pcols
=
map
{
$_
=> 1 }
$source
->primary_columns;
my
(
%retrieve_cols
,
$autoinc_supplied
,
$retrieve_autoinc_col
);
for
my
$col
(
$source
->columns) {
if
(
$col_infos
->{
$col
}{is_auto_increment}) {
$autoinc_supplied
||= 1
if
defined
$to_insert
->{
$col
};
$retrieve_autoinc_col
||=
$col
unless
$autoinc_supplied
;
}
next
if
(
defined
$to_insert
->{
$col
} and ! is_literal_value(
$to_insert
->{
$col
})
);
$retrieve_cols
{
$col
} =
scalar
keys
%retrieve_cols
if
(
$pcols
{
$col
}
or
$col_infos
->{
$col
}{retrieve_on_insert}
);
};
local
$self
->{_autoinc_supplied_for_op} =
$autoinc_supplied
;
local
$self
->{_perform_autoinc_retrieval} =
$retrieve_autoinc_col
;
my
(
$sqla_opts
,
@ir_container
);
if
(
%retrieve_cols
and
$self
->_use_insert_returning) {
$sqla_opts
->{returning_container} = \
@ir_container
if
$self
->_use_insert_returning_bound;
$sqla_opts
->{returning} = [
sort
{
$retrieve_cols
{
$a
} <=>
$retrieve_cols
{
$b
} }
keys
%retrieve_cols
];
}
my
(
$rv
,
$sth
) =
$self
->_execute(
'insert'
,
$source
,
$to_insert
,
$sqla_opts
);
my
%returned_cols
=
%$to_insert
;
if
(
my
$retlist
=
$sqla_opts
->{returning}) {
@ir_container
=
try
{
local
$SIG
{__WARN__} =
sub
{};
my
@r
=
$sth
->fetchrow_array;
$sth
->finish;
@r
;
}
unless
@ir_container
;
@returned_cols
{
@$retlist
} =
@ir_container
if
@ir_container
;
}
else
{
if
(
my
@missing_pri
=
grep
{
$pcols
{
$_
} }
keys
%retrieve_cols
) {
$self
->throw_exception(
"Missing primary key but Storage doesn't support last_insert_id"
)
unless
$self
->can(
'last_insert_id'
);
my
@pri_values
=
$self
->last_insert_id(
$source
,
@missing_pri
);
$self
->throw_exception(
"Can't get last insert id"
)
unless
(
@pri_values
==
@missing_pri
);
@returned_cols
{
@missing_pri
} =
@pri_values
;
delete
@retrieve_cols
{
@missing_pri
};
}
if
(
%retrieve_cols
) {
$self
->throw_exception(
'Unable to retrieve additional columns without a Primary Key on '
.
$source
->source_name
)
unless
%pcols
;
my
@left_to_fetch
=
sort
{
$retrieve_cols
{
$a
} <=>
$retrieve_cols
{
$b
} }
keys
%retrieve_cols
;
my
$cur
= DBIx::Class::ResultSet->new(
$source
, {
where
=> {
map
{
$_
=>
$returned_cols
{
$_
} } (
keys
%pcols
) },
select
=> \
@left_to_fetch
,
})->cursor;
@returned_cols
{
@left_to_fetch
} =
$cur
->
next
;
$self
->throw_exception(
'Duplicate row returned for PK-search after fresh insert'
)
if
scalar
$cur
->
next
;
}
}
return
{
%$prefetched_values
,
%returned_cols
};
}
sub
insert_bulk {
carp_unique(
'insert_bulk() should have never been exposed as a public method and '
.
'calling it is depecated as of Aug 2014. If you believe having a genuine '
.
'use for this method please contact the development team via '
. DBIx::Class::_ENV_::HELP_URL
);
return
'0E0'
unless
@{
$_
[3]||[]};
shift
->_insert_bulk(
@_
);
}
sub
_insert_bulk {
my
(
$self
,
$source
,
$cols
,
$data
) =
@_
;
$self
->throw_exception(
'Calling _insert_bulk without a dataset to process makes no sense'
)
unless
@{
$data
||[]};
my
$colinfos
=
$source
->columns_info(
$cols
);
local
$self
->{_autoinc_supplied_for_op} =
(
grep
{
$_
->{is_auto_increment} }
values
%$colinfos
)
? 1
: 0
;
my
(
$proto_data
,
$serialized_bind_type_by_col_idx
);
for
my
$col_idx
(0..
$#$cols
) {
my
$colname
=
$cols
->[
$col_idx
];
if
(
ref
$data
->[0][
$col_idx
] eq
'SCALAR'
) {
$proto_data
->{
$colname
} =
$data
->[0][
$col_idx
];
}
elsif
(
ref
$data
->[0][
$col_idx
] eq
'REF'
and
ref
${
$data
->[0][
$col_idx
]} eq
'ARRAY'
) {
my
(
$sql
,
@bind
) = @${
$data
->[0][
$col_idx
]};
my
$resolved_bind
=
$self
->_resolve_bindattrs(
$source
, \
@bind
,
$colinfos
,
);
$serialized_bind_type_by_col_idx
->{
$col_idx
} = serialize [
map
{
$_
->[0] }
@$resolved_bind
];
$proto_data
->{
$colname
} = \[
$sql
,
map
{ [
{ %{
$resolved_bind
->[
$_
][0]},
_bind_data_slice_idx
=>
$col_idx
,
_literal_bind_subindex
=>
$_
+1 }
=>
$resolved_bind
->[
$_
][1]
] } (0 ..
$#bind
)
];
}
else
{
$serialized_bind_type_by_col_idx
->{
$col_idx
} =
undef
;
$proto_data
->{
$colname
} = \[
'?'
, [
{
dbic_colname
=>
$colname
,
_bind_data_slice_idx
=>
$col_idx
}
=>
$data
->[0][
$col_idx
]
] ];
}
}
my
(
$sql
,
$proto_bind
) =
$self
->_prep_for_execute (
'insert'
,
$source
,
[
$proto_data
],
);
if
(!
@$proto_bind
and
keys
%$serialized_bind_type_by_col_idx
) {
$self
->throw_exception(
'Unable to invoke fast-path insert without storage placeholder support'
);
}
my
$bad_slice_report_cref
=
sub
{
my
(
$msg
,
$r_idx
,
$c_idx
) =
@_
;
$self
->throw_exception(
sprintf
"%s for column '%s' in populate slice:\n%s"
,
$msg
,
$cols
->[
$c_idx
],
do
{
local
$Data::Dumper::Maxdepth
= 5;
Data::Dumper::Concise::Dumper ({
map
{
$cols
->[
$_
] =>
$data
->[
$r_idx
][
$_
]
} 0..
$#$cols
}),
}
);
};
for
my
$col_idx
(0..
$#$cols
) {
my
$reference_val
=
$data
->[0][
$col_idx
];
for
my
$row_idx
(1..
$#$data
) { # we are comparing against what we got from [0] above, hence start from 1
my
$val
=
$data
->[
$row_idx
][
$col_idx
];
if
(!
exists
$serialized_bind_type_by_col_idx
->{
$col_idx
}) {
if
(
ref
$val
ne
'SCALAR'
) {
$bad_slice_report_cref
->(
"Incorrect value (expecting SCALAR-ref \\'$$reference_val')"
,
$row_idx
,
$col_idx
,
);
}
elsif
(
$$val
ne
$$reference_val
) {
$bad_slice_report_cref
->(
"Inconsistent literal SQL value (expecting \\'$$reference_val')"
,
$row_idx
,
$col_idx
,
);
}
}
elsif
(!
defined
$serialized_bind_type_by_col_idx
->{
$col_idx
} ) {
if
(is_literal_value(
$val
)) {
$bad_slice_report_cref
->(
"Literal SQL found where a plain bind value is expected"
,
$row_idx
,
$col_idx
);
}
}
else
{
if
(
ref
$val
ne
'REF'
or
ref
$$val
ne
'ARRAY'
) {
$bad_slice_report_cref
->(
"Incorrect value (expecting ARRAYREF-ref \\['${$reference_val}->[0]', ... ])"
,
$row_idx
,
$col_idx
,
);
}
elsif
(
$reference_val
!=
$val
or
$$reference_val
!=
$$val
) {
if
(${
$val
}->[0] ne ${
$reference_val
}->[0]) {
$bad_slice_report_cref
->(
"Inconsistent literal/bind SQL (expecting \\['${$reference_val}->[0]', ... ])"
,
$row_idx
,
$col_idx
,
);
}
elsif
(
$serialized_bind_type_by_col_idx
->{
$col_idx
}
ne
serialize [
map
{
$_
->[0] }
@{
$self
->_resolve_bindattrs(
$source
, [ @{
$$val
}[1 .. $
)}
]
) {
$bad_slice_report_cref
->(
'Differing bind attributes on literal/bind values not supported'
,
$row_idx
,
$col_idx
,
);
}
}
}
}
}
my
$guard
=
$self
->txn_scope_guard;
$self
->_query_start(
$sql
,
@$proto_bind
? [[
undef
=>
'__BULK_INSERT__'
]] : () );
my
$sth
=
$self
->_prepare_sth(
$self
->_dbh,
$sql
);
my
$rv
=
do
{
if
(
@$proto_bind
) {
$self
->_dbh_execute_for_fetch(
$source
,
$sth
,
$proto_bind
,
$cols
,
$data
);
}
else
{
$self
->_dbh_execute_inserts_with_no_binds(
$sth
,
scalar
@$data
);
}
};
$self
->_query_end(
$sql
,
@$proto_bind
? [[
undef
=>
'__BULK_INSERT__'
]] : () );
$guard
->commit;
return
wantarray
? (
$rv
,
$sth
,
@$proto_bind
) :
$rv
;
}
sub
_dbh_execute_for_fetch {
my
(
$self
,
$source
,
$sth
,
$proto_bind
,
$cols
,
$data
) =
@_
;
my
$bind_attrs
=
$self
->_dbi_attrs_for_bind(
$source
,
$proto_bind
);
for
my
$i
(0 ..
$#$proto_bind
) {
$sth
->bind_param (
$i
+1,
$proto_bind
->[
$i
][1],
$bind_attrs
->[
$i
],
)
if
defined
$bind_attrs
->[
$i
];
}
my
$fetch_row_idx
= -1;
my
$fetch_tuple
=
sub
{
return
undef
if
++
$fetch_row_idx
>
$#$data
;
return
[
map
{
my
$v
= !
defined
$_
->{_literal_bind_subindex}
?
$data
->[
$fetch_row_idx
]->[
$_
->{_bind_data_slice_idx} ]
:
$self
->_resolve_bindattrs (
undef
,
[ ${
$data
->[
$fetch_row_idx
]->[
$_
->{_bind_data_slice_idx} ]}->[
$_
->{_literal_bind_subindex} ] ],
{},
)->[0][1]
;
(
length
ref
$v
and is_plain_value
$v
)
?
"$v"
:
$v
;
}
map
{
$_
->[0] }
@$proto_bind
];
};
my
$tuple_status
= [];
my
(
$rv
,
$err
);
try
{
$rv
=
$sth
->execute_for_fetch(
$fetch_tuple
,
$tuple_status
,
);
}
catch
{
$err
=
shift
;
};
$err
= (
$sth
->errstr ||
'UNKNOWN ERROR ($sth->errstr is unset)'
)
if
(
!
defined
$err
and
( !
defined
$rv
or
$sth
->err )
);
try
{
$sth
->finish
}
catch
{
$err
=
shift
unless
defined
$err
};
if
(
defined
$err
) {
my
$i
= 0;
++
$i
while
$i
<=
$#$tuple_status
&& !
ref
$tuple_status
->[
$i
];
$self
->throw_exception(
"Unexpected populate error: $err"
)
if
(
$i
>
$#$tuple_status
);
$self
->throw_exception(
sprintf
"execute_for_fetch() aborted with '%s' at populate slice:\n%s"
,
(
$tuple_status
->[
$i
][1] ||
$err
),
Data::Dumper::Concise::Dumper( {
map
{
$cols
->[
$_
] =>
$data
->[
$i
][
$_
] } (0 ..
$#$cols
) } ),
);
}
return
$rv
;
}
sub
_dbh_execute_inserts_with_no_binds {
my
(
$self
,
$sth
,
$count
) =
@_
;
my
$err
;
try
{
my
$dbh
=
$self
->_get_dbh;
local
$dbh
->{RaiseError} = 1;
local
$dbh
->{PrintError} = 0;
$sth
->execute
foreach
1..
$count
;
}
catch
{
$err
=
shift
;
};
try
{
$sth
->finish
}
catch
{
$err
=
shift
unless
defined
$err
;
};
$self
->throw_exception(
$err
)
if
defined
$err
;
return
$count
;
}
sub
update {
shift
->_execute(
'update'
,
@_
);
}
sub
delete
{
shift
->_execute(
'delete'
,
@_
);
}
sub
_select {
my
$self
=
shift
;
$self
->_execute(
$self
->_select_args(
@_
));
}
sub
_select_args_to_query {
my
$self
=
shift
;
$self
->throw_exception(
"Unable to generate limited query representation with 'software_limit' enabled"
)
if
(
$_
[3]->{software_limit} and (
$_
[3]->{offset} or
$_
[3]->{rows}) );
my
(
$op
,
$ident
,
@args
) =
$self
->_select_args(
@_
);
my
(
$sql
,
$bind
) =
$self
->_gen_sql_bind(
$op
,
$ident
, \
@args
);
unshift
@{
$bind
},
"($sql)"
;
\
$bind
;
}
sub
_select_args {
my
(
$self
,
$ident
,
$select
,
$where
,
$orig_attrs
) =
@_
;
my
$sql_maker
=
$self
->sql_maker;
my
$attrs
= {
%$orig_attrs
,
select
=>
$select
,
from
=>
$ident
,
where
=>
$where
,
};
if
(
defined
$attrs
->{offset}) {
$self
->throw_exception(
'A supplied offset attribute must be a non-negative integer'
)
if
(
$attrs
->{offset} =~ /\D/ or
$attrs
->{offset} < 0 );
}
if
(
defined
$attrs
->{rows}) {
$self
->throw_exception(
"The rows attribute must be a positive integer if present"
)
if
(
$attrs
->{rows} =~ /\D/ or
$attrs
->{rows} <= 0 );
}
elsif
(
$attrs
->{offset}) {
$attrs
->{rows} =
$sql_maker
->__max_int;
}
my
(
$prefetch_needs_subquery
,
@limit_args
);
if
(
$attrs
->{_grouped_by_distinct} and
$attrs
->{collapse} ) {
$prefetch_needs_subquery
= 1;
}
elsif
(
(
$prefetch_needs_subquery
or !
$attrs
->{_simple_passthrough_construction} )
and
$attrs
->{group_by}
and
@{
$attrs
->{group_by}}
and
my
$grp_aliases
=
try
{
$self
->_resolve_aliastypes_from_select_args({
from
=>
$attrs
->{from},
group_by
=>
$attrs
->{group_by} })
}
) {
$prefetch_needs_subquery
= !
scalar
grep
{
$_
ne
$attrs
->{alias} }
keys
%{
$grp_aliases
->{grouping} || {} };
}
elsif
(
$attrs
->{rows} &&
$attrs
->{collapse} ) {
$prefetch_needs_subquery
= 1;
}
if
(
$prefetch_needs_subquery
) {
$attrs
=
$self
->_adjust_select_args_for_complex_prefetch (
$attrs
);
}
elsif
(!
$attrs
->{software_limit} ) {
push
@limit_args
, (
$attrs
->{rows} || (),
$attrs
->{offset} || (),
);
}
if
(
!
$prefetch_needs_subquery
and
ref
$attrs
->{from}
and
reftype
$attrs
->{from} eq
'ARRAY'
and
@{
$attrs
->{from}} != 1
) {
(
$attrs
->{from},
$attrs
->{_aliastypes}) =
$self
->_prune_unused_joins (
$attrs
);
}
$orig_attrs
->{_last_sqlmaker_alias_map} =
$attrs
->{_aliastypes};
return
(
'select'
, @{
$attrs
}{
qw(from select where)
},
$attrs
,
@limit_args
);
}
sub
_count_select {
return
{
count
=>
'*'
};
}
sub
select
{
my
$self
=
shift
;
my
(
$ident
,
$select
,
$condition
,
$attrs
) =
@_
;
return
$self
->cursor_class->new(
$self
, \
@_
,
$attrs
);
}
sub
select_single {
my
$self
=
shift
;
my
(
$rv
,
$sth
,
@bind
) =
$self
->_select(
@_
);
my
@row
=
$sth
->fetchrow_array;
my
@nextrow
=
$sth
->fetchrow_array
if
@row
;
if
(
@row
&&
@nextrow
) {
carp
"Query returned more than one row. SQL that returns multiple rows is DEPRECATED for ->find and ->single"
;
}
$sth
->finish();
return
@row
;
}
sub
_dbh_columns_info_for {
my
(
$self
,
$dbh
,
$table
) =
@_
;
my
%result
;
if
(! DBIx::Class::_ENV_::STRESSTEST_COLUMN_INFO_UNAWARE_STORAGE and
$dbh
->can(
'column_info'
)) {
try
{
my
(
$schema
,
$tab
) =
$table
=~ /^(.+?)\.(.+)$/ ? ($1,$2) : (
undef
,
$table
);
my
$sth
=
$dbh
->column_info(
undef
,
$schema
,
$tab
,
'%'
);
$sth
->execute();
while
(
my
$info
=
$sth
->fetchrow_hashref() ){
my
%column_info
;
$column_info
{data_type} =
$info
->{TYPE_NAME};
$column_info
{size} =
$info
->{COLUMN_SIZE};
$column_info
{is_nullable} =
$info
->{NULLABLE} ? 1 : 0;
$column_info
{default_value} =
$info
->{COLUMN_DEF};
my
$col_name
=
$info
->{COLUMN_NAME};
$col_name
=~ s/^\"(.*)\"$/$1/;
$result
{
$col_name
} = \
%column_info
;
}
}
catch
{
%result
= ();
};
return
\
%result
if
keys
%result
;
}
my
$sth
=
$dbh
->prepare(
$self
->sql_maker->
select
(
$table
,
undef
, \
'1 = 0'
));
$sth
->execute;
my
(
$columns
,
$seen_lcs
);
++
$seen_lcs
->{
lc
(
$_
)} and
$columns
->{
$_
} = {
idx
=>
scalar
keys
%$columns
,
name
=>
$_
,
lc_name
=>
lc
(
$_
),
}
for
@{
$sth
->{NAME}};
$seen_lcs
->{
$_
->{lc_name}} == 1
and
$_
->{name} =
$_
->{lc_name}
for
values
%$columns
;
for
(
values
%$columns
) {
my
$inf
= {
data_type
=>
$sth
->{TYPE}->[
$_
->{idx}],
size
=>
$sth
->{PRECISION}->[
$_
->{idx}],
is_nullable
=>
$sth
->{NULLABLE}->[
$_
->{idx}] ? 1 : 0,
};
if
(
$inf
->{data_type} =~ m/^(.*?)\((.*?)\)$/) {
@{
$inf
}{
qw( data_type size)
} = ($1, $2);
}
$result
{
$_
->{name}} =
$inf
;
}
$sth
->finish;
if
(
$dbh
->can(
'type_info'
)) {
for
my
$inf
(
values
%result
) {
next
if
!
defined
$inf
->{data_type};
$inf
->{data_type} = (
(
(
$dbh
->type_info(
$inf
->{data_type} )
||
next
)
||
next
)->{TYPE_NAME}
||
next
);
$inf
->{size} -= 4
if
(
(
$inf
->{size}||0 > 4 )
and
$inf
->{data_type} =~
qr/^text$/
i
);
}
}
return
\
%result
;
}
sub
columns_info_for {
my
(
$self
,
$table
) =
@_
;
$self
->_dbh_columns_info_for (
$self
->_get_dbh,
$table
);
}
sub
_dbh_last_insert_id {
my
(
$self
,
$dbh
,
$source
,
$col
) =
@_
;
my
$id
=
try
{
$dbh
->last_insert_id (
undef
,
undef
,
$source
->name,
$col
) };
return
$id
if
defined
$id
;
my
$class
=
ref
$self
;
$self
->throw_exception (
"No storage specific _dbh_last_insert_id() method implemented in $class, and the generic DBI::last_insert_id() failed"
);
}
sub
last_insert_id {
my
$self
=
shift
;
$self
->_dbh_last_insert_id (
$self
->_dbh,
@_
);
}
sub
_native_data_type {
return
undef
}
sub
_determine_supports_placeholders {
my
$self
=
shift
;
my
$dbh
=
$self
->_get_dbh;
return
try
{
local
$dbh
->{PrintError} = 0;
local
$dbh
->{RaiseError} = 1;
$dbh
->
do
(
'select ?'
, {}, 1);
1;
}
catch
{
0;
};
}
sub
_determine_supports_typeless_placeholders {
my
$self
=
shift
;
my
$dbh
=
$self
->_get_dbh;
return
try
{
local
$dbh
->{PrintError} = 0;
local
$dbh
->{RaiseError} = 1;
$dbh
->
do
(
'select 1 where 1 = ?'
, {}, 1);
1;
}
catch
{
0;
};
}
sub
sqlt_type {
shift
->_get_dbh->{Driver}->{Name};
}
sub
bind_attribute_by_data_type {
return
;
}
sub
is_datatype_numeric {
return
0
unless
$_
[1];
$_
[1] =~ /^ (?:
numeric |
int
(?:eger)? | (?:tiny|small|medium|big)
int
| dec(?:imal)? | real | float | double (?: \s+ precision)? | (?:big)?serial
) $/ix;
}
sub
create_ddl_dir {
my
(
$self
,
$schema
,
$databases
,
$version
,
$dir
,
$preversion
,
$sqltargs
) =
@_
;
unless
(
$dir
) {
carp
"No directory given, using ./\n"
;
$dir
=
'./'
;
}
else
{
-d
$dir
or
(
require
File::Path and File::Path::mkpath ([
"$dir"
]))
or
$self
->throw_exception(
"Failed to create '$dir': "
. ($! || $@ ||
'error unknown'
)
);
}
$self
->throw_exception (
"Directory '$dir' does not exist\n"
)
unless
(-d
$dir
);
$databases
||= [
'MySQL'
,
'SQLite'
,
'PostgreSQL'
];
$databases
= [
$databases
]
if
(
ref
(
$databases
) ne
'ARRAY'
);
my
$schema_version
=
$schema
->schema_version ||
'1.x'
;
$version
||=
$schema_version
;
$sqltargs
= {
add_drop_table
=> 1,
ignore_constraint_names
=> 1,
ignore_index_names
=> 1,
quote_identifiers
=>
$self
->sql_maker->_quoting_enabled,
%{
$sqltargs
|| {}}
};
if
(
my
$missing
= DBIx::Class::Optional::Dependencies->req_missing_for (
'deploy'
)) {
$self
->throw_exception(
"Can't create a ddl file without $missing"
);
}
my
$sqlt
= SQL::Translator->new(
$sqltargs
);
$sqlt
->parser(
'SQL::Translator::Parser::DBIx::Class'
);
my
$sqlt_schema
=
$sqlt
->translate({
data
=>
$schema
})
or
$self
->throw_exception (
$sqlt
->error);
foreach
my
$db
(
@$databases
) {
$sqlt
->
reset
();
$sqlt
->{schema} =
$sqlt_schema
;
$sqlt
->producer(
$db
);
my
$file
;
my
$filename
=
$schema
->ddl_filename(
$db
,
$version
,
$dir
);
if
(-e
$filename
&& (
$version
eq
$schema_version
)) {
carp
"Overwriting existing DDL file - $filename"
;
unlink
(
$filename
);
}
my
$output
=
$sqlt
->translate;
if
(!
$output
) {
carp(
"Failed to translate to $db, skipping. ("
.
$sqlt
->error .
")"
);
next
;
}
if
(!
open
(
$file
,
">$filename"
)) {
$self
->throw_exception(
"Can't open $filename for writing ($!)"
);
next
;
}
print
$file
$output
;
close
(
$file
);
next
unless
(
$preversion
);
my
$prefilename
=
$schema
->ddl_filename(
$db
,
$preversion
,
$dir
);
if
(!-e
$prefilename
) {
carp(
"No previous schema file found ($prefilename)"
);
next
;
}
my
$difffile
=
$schema
->ddl_filename(
$db
,
$version
,
$dir
,
$preversion
);
if
(-e
$difffile
) {
carp(
"Overwriting existing diff file - $difffile"
);
unlink
(
$difffile
);
}
my
$source_schema
;
{
my
$t
= SQL::Translator->new(
$sqltargs
);
$t
->debug( 0 );
$t
->trace( 0 );
$t
->parser(
$db
)
or
$self
->throw_exception (
$t
->error);
my
$out
=
$t
->translate(
$prefilename
)
or
$self
->throw_exception (
$t
->error);
$source_schema
=
$t
->schema;
$source_schema
->name(
$prefilename
)
unless
(
$source_schema
->name );
}
my
$dest_schema
=
$sqlt_schema
;
unless
(
"SQL::Translator::Producer::$db"
->can(
'preprocess_schema'
) ) {
my
$t
= SQL::Translator->new(
$sqltargs
);
$t
->debug( 0 );
$t
->trace( 0 );
$t
->parser(
$db
)
or
$self
->throw_exception (
$t
->error);
my
$out
=
$t
->translate(
$filename
)
or
$self
->throw_exception (
$t
->error);
$dest_schema
=
$t
->schema;
$dest_schema
->name(
$filename
)
unless
$dest_schema
->name;
}
my
$diff
=
do
{
local
$SQL::Translator::Producer::SQLite::NO_QUOTES
if
$SQL::Translator::Producer::SQLite::NO_QUOTES
;
SQL::Translator::Diff::schema_diff(
$source_schema
,
$db
,
$dest_schema
,
$db
,
$sqltargs
);
};
if
(!
open
$file
,
">$difffile"
) {
$self
->throw_exception(
"Can't write to $difffile ($!)"
);
next
;
}
print
$file
$diff
;
close
(
$file
);
}
}
sub
deployment_statements {
my
(
$self
,
$schema
,
$type
,
$version
,
$dir
,
$sqltargs
) =
@_
;
$type
||=
$self
->sqlt_type;
$version
||=
$schema
->schema_version ||
'1.x'
;
$dir
||=
'./'
;
my
$filename
=
$schema
->ddl_filename(
$type
,
$version
,
$dir
);
if
(-f
$filename
)
{
my
$file
;
open
(
$file
,
"<$filename"
)
or
$self
->throw_exception(
"Can't open $filename ($!)"
);
my
@rows
= <
$file
>;
close
(
$file
);
return
join
(
''
,
@rows
);
}
if
(
my
$missing
= DBIx::Class::Optional::Dependencies->req_missing_for (
'deploy'
) ) {
$self
->throw_exception(
"Can't deploy without a pregenerated 'ddl_dir' directory or $missing"
);
}
$sqltargs
->{parser_args}{sources} =
delete
$sqltargs
->{sources}
if
exists
$sqltargs
->{sources};
$sqltargs
->{quote_identifiers} =
$self
->sql_maker->_quoting_enabled
unless
exists
$sqltargs
->{quote_identifiers};
my
$tr
= SQL::Translator->new(
producer
=>
"SQL::Translator::Producer::${type}"
,
%$sqltargs
,
parser
=>
'SQL::Translator::Parser::DBIx::Class'
,
data
=>
$schema
,
);
return
preserve_context {
$tr
->translate
}
after
=>
sub
{
$self
->throw_exception(
'Unable to produce deployment statements: '
.
$tr
->error)
unless
defined
$_
[0];
};
}
sub
deploy {
my
(
$self
,
$schema
,
$type
,
$sqltargs
,
$dir
) =
@_
;
my
$deploy
=
sub
{
my
$line
=
shift
;
return
if
(!
$line
);
return
if
(
$line
=~ /^--/);
return
if
(
$line
=~ /^BEGIN TRANSACTION/m);
return
if
(
$line
=~ /^COMMIT/m);
return
if
$line
=~ /^\s+$/;
$self
->_query_start(
$line
);
try
{
$self
->dbh_do (
sub
{
$_
[1]->
do
(
$line
) });
}
catch
{
carp
qq{$_ (running "${line}
")};
};
$self
->_query_end(
$line
);
};
my
@statements
=
$schema
->deployment_statements(
$type
,
undef
,
$dir
, { %{
$sqltargs
|| {} },
no_comments
=> 1 } );
if
(
@statements
> 1) {
foreach
my
$statement
(
@statements
) {
$deploy
->(
$statement
);
}
}
elsif
(
@statements
== 1) {
foreach
my
$line
(
split
(/\s*--.*\n|;\n/,
$statements
[0])) {
$deploy
->(
$line
);
}
}
}
sub
datetime_parser {
my
$self
=
shift
;
return
$self
->{datetime_parser} ||=
do
{
$self
->build_datetime_parser(
@_
);
};
}
sub
build_datetime_parser {
my
$self
=
shift
;
my
$type
=
$self
->datetime_parser_type(
@_
);
return
$type
;
}
sub
is_replicating {
return
;
}
sub
lag_behind_master {
return
;
}
sub
relname_to_table_alias {
my
(
$self
,
$relname
,
$join_count
) =
@_
;
my
$alias
= (
$join_count
&&
$join_count
> 1 ?
join
(
'_'
,
$relname
,
$join_count
) :
$relname
);
return
$alias
;
}
sub
_max_column_bytesize {
my
(
$self
,
$attr
) =
@_
;
my
$max_size
;
if
(
$attr
->{sqlt_datatype}) {
my
$data_type
=
lc
(
$attr
->{sqlt_datatype});
if
(
$attr
->{sqlt_size}) {
if
(
$data_type
=~ /^(?:
l? (?:var)? char(?:acter)? (?:\s
*varying
)?
|
(?:var)? binary (?:\s
*varying
)?
|
raw
)\b/x
) {
$max_size
=
$attr
->{sqlt_size};
}
elsif
(
$data_type
=~ /^(?:
national \s* character (?:\s
*varying
)?
|
nchar
|
univarchar
|
nvarchar
)\b/x
) {
$max_size
=
$attr
->{sqlt_size} * 4;
}
}
if
(!
$max_size
and !
$self
->_is_lob_type(
$data_type
)) {
$max_size
= 100
}
}
$max_size
||
$self
->_dbic_connect_attributes->{LongReadLen} ||
$self
->_get_dbh->{LongReadLen} || 8000;
}
sub
_is_lob_type {
my
(
$self
,
$data_type
) =
@_
;
$data_type
&& (
$data_type
=~ /lob|bfile|text|image|bytea|memo/i
||
$data_type
=~ /^long(?:\s+(?:raw|bit\s
*varying
|varbit|binary
|varchar|character\s
*varying
|nvarchar
|national\s
*character
\s
*varying
))?\z/xi);
}
sub
_is_binary_lob_type {
my
(
$self
,
$data_type
) =
@_
;
$data_type
&& (
$data_type
=~ /blob|bfile|image|bytea/i
||
$data_type
=~ /^long(?:\s+(?:raw|bit\s
*varying
|varbit|binary))?\z/xi);
}
sub
_is_text_lob_type {
my
(
$self
,
$data_type
) =
@_
;
$data_type
&& (
$data_type
=~ /^(?:clob|memo)\z/i
||
$data_type
=~ /^long(?:\s+(?:varchar|character\s
*varying
|nvarchar
|national\s
*character
\s
*varying
))\z/xi);
}
sub
_is_binary_type {
my
(
$self
,
$data_type
) =
@_
;
$data_type
&& (
$self
->_is_binary_lob_type(
$data_type
)
||
$data_type
=~ /(?:var)?(?:binary|bit|graphic)(?:\s
*varying
)?/i);
}
1;
=head1 USAGE NOTES
=head2 DBIx::Class and AutoCommit
DBIx::Class can
do
some wonderful magic
with
handling exceptions,
disconnections, and transactions
when
you
use
C<<
AutoCommit
=> 1 >>
(the
default
) combined
with
L<txn_do|DBIx::Class::Storage/txn_do>
for
transaction support.
If you set C<<
AutoCommit
=> 0 >> in your
connect
info, then you are always
in an assumed transaction between commits, and you
're telling us you'
d
like to manage that manually. A lot of the magic protections offered by
this module will go away. We can't protect you from exceptions due to database
disconnects because we don't know anything about how to restart your
transactions. You're on your own
for
handling all sorts of exceptional
cases
if
you choose the C<<
AutoCommit
=> 0 >> path, just as you would
be
with
raw DBI.
=head1 FURTHER QUESTIONS?
Check the list of L<additional DBIC resources|DBIx::Class/GETTING HELP/SUPPORT>.
=head1 COPYRIGHT AND LICENSE
This module is free software L<copyright|DBIx::Class/COPYRIGHT AND LICENSE>
by the L<DBIx::Class (DBIC) authors|DBIx::Class/AUTHORS>. You can
redistribute it and/or modify it under the same terms as the
L<DBIx::Class library|DBIx::Class/COPYRIGHT AND LICENSE>.