@@ -2,12 +2,17 @@
DBI::Changes - List of significant changes to the DBI
-(As of $Date: 2007-05-13 22:54:00 +0100 (Sun, 13 May 2007) $ $Revision: 9564 $)
+(As of $Date: 2007-06-18 15:19:45 +0100 (Mon, 18 Jun 2007) $ $Revision: 9659 $)
=cut
Assorted TODO notes:
+Protect trace_msg from SIGPIPE?
+
+Add count of identical frozen_request (plus sum(results size)) to Gofer status
+Highlight those seen before.
+
Move post-request cleanup into separate method and enable hooks so
it can be done after the response has been sent
@@ -23,7 +28,6 @@ Policy for dbh attr FETCH (ie example_driver_path)
or piggyback on skip_connect_check
could also remember which attr have been returned to us
so not bother FETCHing them (unless pedantic)
-Refactor http transport like the others re timeout
Call method on transport failure so transport can cleanup/reset it it wants
prepare(...,{ Err=>\my $isolated_err, ...})
@@ -33,7 +37,56 @@ Or call _new_child and move to DBI::common?
Add trace modules that just records the last N trace messages into an array
and prepends them to any error message.
-=head2 Changes in DBI 1.56 (svn rev 9561), 13th May 2007
+=head2 Changes in DBI 1.57 (svn rev 9639), 13th June 2007
+
+ Note: this release includes a change to the DBI::hash() function which will
+ now produce different values than before *if* your perl was built with 64-bit
+ 'int' type (i.e. "perl -V:intsize" says intsize='4'). It's relatively rare
+ for perl to be configured that way, even on 64-bit systems.
+
+ Fixed XS versions of select*_*() methods to call execute()
+ fetch() etc., with inner handle instead of outer.
+ Fixed execute_for_fetch() to not cache errstr values
+ thanks to Bart Degryse.
+ Fixed unused var compiler warning thanks to JDHEDDEN.
+ Fixed t/86gofer_fail tests to be less likely to fail falsely.
+
+ Changed DBI::hash to return 'I32' type instead of 'int' so results are
+ portable/consistent regardless of size of the int type.
+ Corrected timeout example in docs thanks to Egmont Koblinger.
+ Changed t/01basic.t to warn instead of failing when it detects
+ a problem with Math::BigInt (some recent versions had problems).
+
+ Added support for !Time and !Time~N to DBI::Profile Path. See docs.
+ Added extra trace info to connect_cached thanks to Walery Studennikov.
+ Added non-random (deterministic) mode to DBI_GOFER_RANDOM mechanism.
+ Added DBIXS_REVISION macro that drivers can use.
+ Added more docs for private_attribute_info() method.
+
+ DBI::Profile changes:
+ dbi_profile() now returns ref to relevant leaf node.
+ Don't profile DESTROY during global destruction.
+ Added as_node_path_list() and as_text() methods.
+ DBI::ProfileDumper changes:
+ Don't write file if there's no profile data.
+ Uses full natural precision when saving data (was using %.6f)
+ Optimized flush_to_disk().
+ Locks the data file while writing.
+ Enabled filename to be a code ref for dynamic names.
+ DBI::ProfileDumper::Apache changes:
+ Added Quiet=>1 to avoid write to STDERR in flush_to_disk().
+ Added Dir=>... to specify a writable destination directory.
+ Enabled DBI_PROFILE_APACHE_LOG_DIR for mod_perl 1 as well as 2.
+ Added parent pid to default data file name.
+ DBI::ProfileData changes:
+ Added DeleteFiles option to rename & delete files once read.
+ Locks the data files while reading.
+ Added ability to sort by Path elements.
+ dbiprof changes:
+ Added --dumpnodes and --delete options.
+ Added/updated docs for both DBI::ProfileDumper && ::Apache.
+
+=head2 Changes in DBI 1.56 (svn rev 9660), 18th June 2007
Fixed printf arg warnings thanks to JDHEDDEN.
Fixed returning driver-private sth attributes via gofer.
@@ -44,6 +97,8 @@ and prepends them to any error message.
Changed tests to workaround Math::BigInt broken versions.
Changed dbi_profile_merge() to dbi_profile_merge_nodes()
old name still works as an alias for the new one.
+ Removed old DBI internal sanity check that's no longer valid
+ causing "panic: DESTROY (dbih_clearcom)" when tracing enabled
Added DBI_GOFER_RANDOM env var that can be use to trigger random
failures and delays when executing gofer requests. Designed to help
@@ -1,4 +1,4 @@
-# $Id: DBI.pm 9564 2007-05-13 21:54:00Z timbo $
+# $Id: DBI.pm 9655 2007-06-15 11:40:30Z timbo $
# vim: ts=8:sw=4
#
# Copyright (c) 1994-2007 Tim Bunce Ireland
@@ -9,7 +9,7 @@
require 5.006_00;
BEGIN {
-$DBI::VERSION = "1.56"; # ==> ALSO update the version in the pod text below!
+$DBI::VERSION = "1.57"; # ==> ALSO update the version in the pod text below!
}
=head1 NAME
@@ -120,8 +120,8 @@ Tim he's very likely to just forward it to the mailing list.
=head2 NOTES
-This is the DBI specification that corresponds to the DBI version 1.56
-($Revision: 9564 $).
+This is the DBI specification that corresponds to the DBI version 1.57
+($Revision: 9655 $).
The DBI is evolving at a steady pace, so it's good to check that
you have the latest copy.
@@ -1426,9 +1426,11 @@ sub _new_sth { # called by DBD::<drivername>::db::prepare)
my @attr_keys = $attr ? sort keys %$attr : ();
my $key = do { local $^W; # silence undef warnings
- join "~~", $dsn, $user||'', $auth||'', $attr ? (@attr_keys,@{$attr}{@attr_keys}) : ()
+ join "~~", $dsn, $user, $auth, $attr ? (@attr_keys,@{$attr}{@attr_keys}) : ()
};
my $dbh = $cache->{$key};
+ $drh->trace_msg(sprintf(" connect_cached: key '$key', cached dbh $dbh\n", DBI::neat($key), DBI::neat($dbh)))
+ if $DBI::dbi_debug >= 4;
my $cb = $attr->{Callbacks}; # take care not to autovivify
if ($dbh && $dbh->FETCH('Active') && eval { $dbh->ping }) {
# If the caller has provided a callback then call it
@@ -1919,7 +1921,7 @@ sub _new_sth { # called by DBD::<drivername>::db::prepare)
($tuple_status) ? @$tuple_status = () : $tuple_status = [];
my $rc_total = 0;
- my ($err_count, %errstr_cache);
+ my $err_count;
while ( my $tuple = &$fetch_tuple_sub() ) {
if ( my $rc = $sth->execute(@$tuple) ) {
push @$tuple_status, $rc;
@@ -1927,10 +1929,9 @@ sub _new_sth { # called by DBD::<drivername>::db::prepare)
}
else {
$err_count++;
- my $err = $sth->err;
- push @$tuple_status, [ $err, $errstr_cache{$err} ||= $sth->errstr, $sth->state ];
+ push @$tuple_status, [ $sth->err, $sth->errstr, $sth->state ];
# XXX drivers implementing execute_for_fetch could opt to "last;" here
- # if the know the error code means no further executes will work.
+ # if they know the error code means no further executes will work.
}
}
my $tuples = @$tuple_status;
@@ -3233,8 +3234,27 @@ The parse_trace_flag() method was added in DBI 1.42.
$hash_ref = $h->private_attribute_info();
Returns a reference to a hash whose keys are the names of driver-private
-attributes available for that kind of handle (driver, database, statement).
-(The values should be undef. Meanings may be assigned to particular values in future.)
+attributes available for the kind of handle (driver, database, statement)
+that the method was called on.
+
+For example, the return value when called with a DBD::Sybase $dbh could look like this:
+
+ {
+ syb_dynamic_supported => undef,
+ syb_oc_version => undef,
+ syb_server_version => undef,
+ syb_server_version_string => undef,
+ }
+
+and when called with a DBD::Sybase $sth they could look like this:
+
+ {
+ syb_types => undef,
+ syb_proc_status => undef,
+ syb_result_type => undef,
+ }
+
+The values should be undef. Meanings may be assigned to particular values in future.
=head3 C<swap_inner_handle>
@@ -6731,7 +6751,7 @@ to be delivered $seconds in the future. For example:
alarm(0); # cancel alarm (if code ran fast)
};
alarm(0); # cancel alarm (if eval failed)
- if ( $@ eq "TIMEOUT" ) { ... }
+ if ( $@ eq "TIMEOUT\n" ) { ... }
Unfortunately, as described above, this won't always work as expected,
depending on your perl version and the underlying database code.
@@ -1,6 +1,6 @@
/* vim: ts=8:sw=4
*
- * $Id: DBI.xs 9564 2007-05-13 21:54:00Z timbo $
+ * $Id: DBI.xs 9659 2007-06-18 14:19:45Z timbo $
*
* Copyright (c) 1994-2003 Tim Bunce Ireland.
*
@@ -78,7 +78,7 @@ static int dbih_sth_bind_col _((SV *sth, SV *col, SV *ref, SV *attribs));
static int set_err_char _((SV *h, imp_xxh_t *imp_xxh, const char *err_c, IV err_i, const char *errstr, const char *state, const char *method));
static int set_err_sv _((SV *h, imp_xxh_t *imp_xxh, SV *err, SV *errstr, SV *state, SV *method));
static int quote_type _((int sql_type, int p, int s, int *base_type, void *v));
-static int dbi_hash _((const char *string, long i));
+static I32 dbi_hash _((const char *string, long i));
static void dbih_dumphandle _((pTHX_ SV *h, const char *msg, int level));
static int dbih_dumpcom _((pTHX_ imp_xxh_t *imp_xxh, const char *msg, int level));
char *neatsvpv _((SV *sv, STRLEN maxlen));
@@ -571,8 +571,10 @@ mkvname(pTHX_ HV *stash, const char *item, int uplevel) /* construct a variable
return SvPV_nolen(sv);
}
+/* 32 bit magic FNV-0 and FNV-1 prime */
+#define FNV_32_PRIME ((UV)0x01000193)
-static int
+static I32
dbi_hash(const char *key, long type)
{
if (type == 0) {
@@ -582,14 +584,14 @@ dbi_hash(const char *key, long type)
hash = hash * 33 + *key++;
hash &= 0x7FFFFFFF; /* limit to 31 bits */
hash |= 0x40000000; /* set bit 31 */
- return -(int)hash; /* return negative int */
+ return -(I32)hash; /* return negative int */
}
else if (type == 1) { /* Fowler/Noll/Vo hash */
/* see http://www.isthe.com/chongo/tech/comp/fnv/ */
U32 hash = 0x811c9dc5;
const unsigned char *s = (unsigned char *)key; /* unsigned string */
while (*s) {
- /* multiply by the 32 bit FNV magic prime mod 2^64 */
+ /* multiply by the 32 bit FNV magic prime mod 2^32 */
hash *= FNV_32_PRIME;
/* xor the bottom with the current octet */
hash ^= (U32)*s++;
@@ -2295,7 +2297,7 @@ _profile_next_node(SV *node, const char *name)
}
-static void
+static SV*
dbi_profile(SV *h, imp_xxh_t *imp_xxh, SV *statement_sv, SV *method, NV t1, NV t2)
{
#define DBIprof_MAX_PATH_ELEM 100
@@ -2313,6 +2315,7 @@ dbi_profile(SV *h, imp_xxh_t *imp_xxh, SV *statement_sv, SV *method, NV t1, NV t
HV *dbh_outer_hv = NULL;
HV *dbh_inner_hv = NULL;
char *statement_pv;
+ char *method_pv;
SV *profile;
SV *tmp;
SV *dest_node;
@@ -2323,10 +2326,18 @@ dbi_profile(SV *h, imp_xxh_t *imp_xxh, SV *statement_sv, SV *method, NV t1, NV t
const int parent_call_depth = DBIc_PARENT_COM(imp_xxh) ? DBIc_CALL_DEPTH(DBIc_PARENT_COM(imp_xxh)) : 0;
/* Only count calls originating from the application code */
if (call_depth > 1 || parent_call_depth > 0)
- return;
+ return &sv_undef;
if (!DBIc_has(imp_xxh, DBIcf_Profile))
- return;
+ return &sv_undef;
+
+ method_pv = (SvTYPE(method)==SVt_PVCV)
+ ? GvNAME(CvGV(method))
+ : (isGV(method) ? GvNAME(method) : SvPV_nolen(method));
+
+ /* we don't profile DESTROY during global destruction */
+ if (dirty && instr(method_pv, "DESTROY"))
+ return &sv_undef;
h_hv = (HV*)SvRV(dbih_inner(aTHX_ h, "dbi_profile"));
@@ -2335,9 +2346,9 @@ dbi_profile(SV *h, imp_xxh_t *imp_xxh, SV *statement_sv, SV *method, NV t1, NV t
mg_get(profile); /* FETCH */
if (!profile || !SvROK(profile)) {
DBIc_set(imp_xxh, DBIcf_Profile, 0); /* disable */
- if (!dirty)
+ if (SvOK(profile) && !dirty)
warn("Profile attribute isn't a hash ref (%s,%ld)", neatsvpv(profile,0), (long)SvTYPE(profile));
- return;
+ return &sv_undef;
}
/* statement_sv: undef = use $h->{Statement}, "" (&sv_no) = use empty string */
@@ -2349,9 +2360,8 @@ dbi_profile(SV *h, imp_xxh_t *imp_xxh, SV *statement_sv, SV *method, NV t1, NV t
statement_pv = SvPV_nolen(statement_sv);
if (DBIc_DBISTATE(imp_xxh)->debug >= 4)
- PerlIO_printf(DBIc_LOGPIO(imp_xxh), " dbi_profile %s %fs %s\n",
- neatsvpv((SvTYPE(method)==SVt_PVCV) ? (SV*)CvGV(method) : method, 0),
- ti, neatsvpv(statement_sv,0));
+ PerlIO_printf(DBIc_LOGPIO(imp_xxh), " dbi_profile +%fs %s %s\n",
+ ti, method_pv, neatsvpv(statement_sv,0));
dest_node = _profile_next_node(profile, "Data");
@@ -2372,9 +2382,6 @@ dbi_profile(SV *h, imp_xxh_t *imp_xxh, SV *statement_sv, SV *method, NV t1, NV t
SV *code_sv = SvRV(pathsv);
I32 items;
I32 item_idx;
- char *method_pv = (SvTYPE(method)==SVt_PVCV)
- ? GvNAME(CvGV(method))
- : (isGV(method) ? GvNAME(method) : SvPV_nolen(method));
EXTEND(SP, 4);
PUSHMARK(SP);
PUSHs(h); /* push inner handle, then others params */
@@ -2399,7 +2406,7 @@ dbi_profile(SV *h, imp_xxh_t *imp_xxh, SV *statement_sv, SV *method, NV t1, NV t
}
PUTBACK;
if (items == -2) /* this profile data was vetoed */
- return;
+ return &sv_undef;
}
else if (SvROK(pathsv)) {
/* only meant for refs to scalars currently */
@@ -2414,10 +2421,7 @@ dbi_profile(SV *h, imp_xxh_t *imp_xxh, SV *statement_sv, SV *method, NV t1, NV t
dest_node = _profile_next_node(dest_node, statement_pv);
}
else if (p[1] == 'M' && strEQ(p, "!MethodName")) {
- p = (SvTYPE(method)==SVt_PVCV)
- ? GvNAME(CvGV(method))
- : (isGV(method) ? GvNAME(method) : SvPV_nolen(method));
- dest_node = _profile_next_node(dest_node, p);
+ dest_node = _profile_next_node(dest_node, method_pv);
}
else if (p[1] == 'M' && strEQ(p, "!MethodClass")) {
if (SvTYPE(method) == SVt_PVCV) {
@@ -2436,7 +2440,7 @@ dbi_profile(SV *h, imp_xxh_t *imp_xxh, SV *statement_sv, SV *method, NV t1, NV t
if (*p == '*') ++p; /* skip past leading '*' glob sigil */
}
else {
- p = SvPV_nolen(method);
+ p = method_pv;
}
dest_node = _profile_next_node(dest_node, p);
}
@@ -2452,6 +2456,17 @@ dbi_profile(SV *h, imp_xxh_t *imp_xxh, SV *statement_sv, SV *method, NV t1, NV t
else if (p[1] == 'C' && strEQ(p, "!Caller2")) {
dest_node = _profile_next_node(dest_node, log_where(0, 0, "", "", 1, 1, 0));
}
+ else if (p[1] == 'T' && (strEQ(p, "!Time") || strnEQ(p, "!Time~", 6))) {
+ char timebuf[20];
+ int factor = 1;
+ if (p[5] == '~') {
+ factor = atoi(&p[6]);
+ if (factor == 0) /* sanity check to avoid div by zero error */
+ factor = 3600;
+ }
+ sprintf(timebuf, "%ld", ((long)(dbi_time()/factor))*factor);
+ dest_node = _profile_next_node(dest_node, timebuf);
+ }
else {
warn("Unknown ! element in DBI::Profile Path: %s", p);
dest_node = _profile_next_node(dest_node, p);
@@ -2463,10 +2478,10 @@ dbi_profile(SV *h, imp_xxh_t *imp_xxh, SV *statement_sv, SV *method, NV t1, NV t
imp_dbh_t *imp_dbh = (DBIc_TYPE(imp_xxh) <= DBIt_DB) ? (imp_dbh_t*)imp_xxh : (imp_dbh_t*)DBIc_PARENT_COM(imp_xxh);
dbh_outer_hv = DBIc_MY_H(imp_dbh);
if (SvTYPE(dbh_outer_hv) != SVt_PVHV)
- return; /* presumably global destruction - bail */
+ return &sv_undef; /* presumably global destruction - bail */
dbh_inner_hv = (HV*)SvRV(dbih_inner(aTHX_ (SV*)dbh_outer_hv, "profile"));
if (SvTYPE(dbh_inner_hv) != SVt_PVHV)
- return; /* presumably global destruction - bail */
+ return &sv_undef; /* presumably global destruction - bail */
}
/* fetch from inner first, then outer if key doesn't exist */
/* (yes, this is an evil premature optimization) */
@@ -2501,11 +2516,9 @@ dbi_profile(SV *h, imp_xxh_t *imp_xxh, SV *statement_sv, SV *method, NV t1, NV t
}
- /* this walk-down-the-tree code should be merged into the loop above */
- tmp = dest_node;
- if (!SvOK(tmp)) {
+ if (!SvOK(dest_node)) {
av = newAV();
- sv_setsv(tmp, newRV_noinc((SV*)av));
+ sv_setsv(dest_node, newRV_noinc((SV*)av));
av_store(av, DBIprof_COUNT, newSViv(1));
av_store(av, DBIprof_TOTAL_TIME, newSVnv(ti));
av_store(av, DBIprof_FIRST_TIME, newSVnv(ti));
@@ -2515,6 +2528,7 @@ dbi_profile(SV *h, imp_xxh_t *imp_xxh, SV *statement_sv, SV *method, NV t1, NV t
av_store(av, DBIprof_LAST_CALLED, newSVnv(t1));
}
else {
+ tmp = dest_node;
if (SvROK(tmp))
tmp = SvRV(tmp);
if (SvTYPE(tmp) != SVt_PVAV)
@@ -2530,9 +2544,10 @@ dbi_profile(SV *h, imp_xxh_t *imp_xxh, SV *statement_sv, SV *method, NV t1, NV t
if (ti > SvNV(tmp)) sv_setnv(tmp, ti);
sv_setnv( *av_fetch(av, DBIprof_LAST_CALLED, 1), t1);
}
- return;
+ return dest_node; /* use with caution - copy first, ie sv_mortalcopy() */
}
+
static void
dbi_profile_merge_nodes(SV *dest, SV *increment)
{
@@ -3986,7 +4001,7 @@ neat(sv, maxlen=0)
(void)cv;
-int
+I32
hash(key, type=0)
const char *key
long type
@@ -4146,7 +4161,7 @@ NV
dbi_time()
-SV *
+void
dbi_profile(h, statement, method, t1, t2)
SV *h
SV *statement
@@ -4155,14 +4170,19 @@ dbi_profile(h, statement, method, t1, t2)
NV t2
CODE:
D_imp_xxh(h);
- (void)cv;
- dbi_profile(h, imp_xxh, statement,
+ SV *leaf = dbi_profile(h, imp_xxh, statement,
SvROK(method) ? SvRV(method) : method,
t1, t2
);
- RETVAL = &sv_undef;
- OUTPUT:
- RETVAL
+ if (DBIc_TRACE_LEVEL(imp_xxh) >= 9)
+ warn("dbi_profile(%s, %s, %f, %f) =%s, gimme=%d",
+ neatsvpv(statement,0), neatsvpv(method,0), t1, t2,
+ neatsvpv(leaf,0), GIMME_V);
+ (void)cv; /* avoid unused var warnings */
+ if (GIMME_V == G_VOID)
+ ST(0) = &sv_undef; /* skip sv_mortalcopy if not needed */
+ else
+ ST(0) = sv_mortalcopy(leaf);
SV *
@@ -4175,7 +4195,8 @@ dbi_profile_merge_nodes(dest, ...)
if (!SvROK(dest) || SvTYPE(SvRV(dest)) != SVt_PVAV)
croak("dbi_profile_merge_nodes(%s,...) not an array reference", neatsvpv(dest,0));
if (items <= 1) {
- (void)cv;
+ (void)cv; /* avoid unused var warnings */
+ (void)ix;
RETVAL = 0;
}
else {
@@ -4381,7 +4402,7 @@ take_imp_data(h)
dbih_getcom2(aTHX_ h, &mg); /* get the MAGIC so we can change it */
imp_xxh_sv = mg->mg_obj; /* take local copy of the imp_data pointer */
mg->mg_obj = Nullsv; /* sever the link from handle to imp_xxh */
- if (DBIc_TRACE_LEVEL(imp_xxh))
+ if (DBIc_TRACE_LEVEL(imp_xxh) >= 9)
sv_dump(imp_xxh_sv);
/* --- housekeeping */
DBIc_ACTIVE_off(imp_xxh); /* silence warning from dbih_clearcom */
@@ -1,4 +1,4 @@
-/* $Id: DBIXS.h 9461 2007-04-26 23:28:15Z timbo $
+/* $Id: DBIXS.h 9659 2007-06-18 14:19:45Z timbo $
*
* Copyright (c) 1994-2002 Tim Bunce Ireland
*
@@ -27,6 +27,9 @@
#undef std
#endif
+/* define DBIXS_REVISION */
+#include "dbixs_rev.h"
+
/* Perl backwards compatibility definitions */
#include "dbipport.h"
@@ -41,6 +44,7 @@
* and learns from the needs of various drivers. See also the
* DBISTATE_VERSION macro below. You can think of DBIXS_VERSION as
* being a compile time check and DBISTATE_VERSION as a runtime check.
+ * By contract, DBIXS_REVISION is a driver source compatibility tool.
*/
#define DBIXS_VERSION 93
@@ -410,7 +414,7 @@ struct dbistate_st {
AV * (*get_fbav) _((imp_sth_t *imp_sth));
SV * (*make_fdsv) _((SV *sth, const char *imp_class, STRLEN imp_size, const char *col_name));
int (*bind_as_num) _((int sql_type, int p, int s, int *t, void *v));
- int (*hash) _((const char *string, long i));
+ I32 (*hash) _((const char *string, long i));
SV * (*preparse) _((SV *sth, char *statement, IV ps_return, IV ps_accept, void *foo));
SV *neatsvpvlen; /* only show dbgpvlen chars when debugging pv's */
@@ -1,4 +1,4 @@
-# $Id: Driver.xst 9454 2007-04-26 10:26:47Z timbo $
+# $Id: Driver.xst 9631 2007-06-07 12:56:24Z timbo $
# Copyright (c) 1997-2002 Tim Bunce Ireland
# Copyright (c) 2002 Jonathan Leffler
#
@@ -124,6 +124,8 @@ selectall_arrayref(...)
SPAGAIN; SP -= items; /* because stack might have been realloc'd */
if (!SvROK(sth))
XSRETURN_UNDEF;
+ /* switch to inner handle */
+ sth = mg_find(SvRV(sth),'P')->mg_obj;
}
imp_sth = (imp_sth_t*)(DBIh_COM(sth));
/* --- bind_param --- */
@@ -162,6 +164,8 @@ selectrow_arrayref(...)
if (!SvROK(sth)) {
if (is_selectrow_array) { XSRETURN_EMPTY; } else { XSRETURN_UNDEF; }
}
+ /* switch to inner handle */
+ sth = mg_find(SvRV(sth),'P')->mg_obj;
}
imp_sth = (imp_sth_t*)(DBIh_COM(sth));
/* --- bind_param --- */
@@ -18,6 +18,8 @@ dbilogstrip.PL Utility to normalise DBI logs so they can be com
dbiprof.PL
dbiproxy.PL Frontend for DBI::ProxyServer
dbivport.h DBI version portability macros (for drivers to copy)
+dbixs_rev.h Defines DBIXS_REVISION macro holding DBIXS.h subversion revision number
+dbixs_rev.pl Utility to write dbixs_rev.h
ex/perl_dbi_nulls_test.pl A test script for forms of IS NULL qualification in SQL
ex/profile.pl A test script for DBI::Profile
lib/Bundle/DBI.pm A bundle for automatic installation via CPAN.
@@ -1,7 +1,7 @@
# http://module-build.sourceforge.net/META-spec.html
#XXXXXXX This is a prototype!!! It will change in the future!!! XXXXX#
name: DBI
-version: 1.56
+version: 1.57
version_from: DBI.pm
installdirs: site
requires:
@@ -11,4 +11,4 @@ requires:
Test::Simple: 0.4
distribution_type: module
-generated_by: ExtUtils::MakeMaker version 6.30
+generated_by: ExtUtils::MakeMaker version 6.17
@@ -1,6 +1,6 @@
# -*- perl -*-
#
-# $Id: Makefile.PL 9530 2007-05-09 13:05:23Z timbo $
+# $Id: Makefile.PL 9637 2007-06-11 12:49:27Z timbo $
#
# Copyright (c) 1994-2006 Tim Bunce Ireland
#
@@ -297,6 +297,9 @@ sub post_constants {
my $xst = main::dbd_postamble();
$xst =~ s/\$\(BASEEXT\)/Perl/g;
$xst .= '
+dbixs_rev.h: DBIXS.h Driver_xst.h dbipport.h dbivport.h dbixs_rev.pl
+ $(PERL) dbixs_rev.pl
+
DBI.c: Perl$(OBJ_EXT)
# make Changes file available as installed pod docs "perldoc DBI::Changes"
@@ -7,8 +7,9 @@ my $script = <<'SCRIPT';
use strict;
-my $VERSION = sprintf("1.%06d", q$Revision: 9418 $ =~ /(\d+)/o);
+my $VERSION = sprintf("1.%06d", q$Revision: 9628 $ =~ /(\d+)/o);
+use Data::Dumper;
use DBI::ProfileData;
use Getopt::Long;
@@ -26,6 +27,7 @@ GetOptions(
'help' => sub { exit usage() },
'number=i' => \$number,
'sort=s' => \$sort,
+ 'dumpnodes!' => \my $dumpnodes,
'reverse' => \$reverse,
'match=s' => \%match,
'exclude=s' => \%exclude,
@@ -36,7 +38,7 @@ sub usage {
print <<EOS;
dbiprof [options] [files]
-Merges DBI profile data contain in files and prints a summary.
+Reads and merges DBI profile data from files and prints a summary.
files: defaults to $filename
@@ -48,6 +50,7 @@ options:
-match=K=V for filtering, see docs
-exclude=K=V for filtering, see docs
-case_sensitive for -match and -exclude
+ -delete rename files before reading then delete afterwards
-version print version number and exit
-help print this help
@@ -60,8 +63,12 @@ my @files = @ARGV ? @ARGV : ('dbi.prof');
# instantiate ProfileData object
-my $prof;
-eval { $prof = DBI::ProfileData->new(Files => \@files) };
+my $prof = eval {
+ $prof = DBI::ProfileData->new(
+ Files => \@files,
+ DeleteFiles => $opt_delete,
+ );
+};
die "Unable to load profile data: $@\n" if $@;
if (%match) { # handle matches
@@ -86,7 +93,16 @@ if (%exclude) { # handle excludes
$prof->sort(field => $sort, reverse => $reverse);
# all done, print it out
-print $prof->report(number => $number);
+if ($dumpnodes) {
+ $Data::Dumper::Indent = 1;
+ $Data::Dumper::Terse = 1;
+ $Data::Dumper::Useqq = 1;
+ $Data::Dumper::Deparse = 0;
+ print Dumper($prof->nodes);
+}
+else {
+ print $prof->report(number => $number);
+}
exit 0;
__END__
@@ -130,7 +146,8 @@ Produce this many items in the report. Defaults to 10. If set to
=item --sort field
-Sort results by the given field. The available sort fields are:
+Sort results by the given field. Sorting by multiple fields isn't currently
+supported (patches welcome). The available sort fields are:
=over 4
@@ -155,6 +172,11 @@ Sorts by the time taken in the first run.
Sorts by the shortest single run.
+=item key1
+
+Sorts by the value of the first element in the Path, which should be numeric.
+You can also sort by C<key2> and C<key3>.
+
=back
=item --reverse
@@ -209,6 +231,16 @@ this can be changed with the --case-sensitive option.
Using this option causes --match and --exclude to work
case-sensitively. Defaults to off.
+=item --delete
+
+Sets the C<DeleteFiles> option to L<DBI::ProfileData> which causes the
+files to be deleted after reading. See L<DBI::ProfileData> for more details.
+
+=item --dumpnodes
+
+Print the list of nodes in the form of a perl data structure.
+Use the C<-sort> option if you want the list sorted.
+
=item --version
Print the dbiprof version number and exit.
@@ -0,0 +1 @@
+#define DBIXS_REVISION 9659
@@ -0,0 +1,42 @@
+#!perl -w
+use strict;
+
+my $file = "dbixs_rev.h";
+my $svnversion = `svnversion -n`;
+my $is_make_dist;
+
+if ($svnversion eq 'exported') {
+ $svnversion = `svnversion -n ..`;
+ if (-f "../MANIFEST.SKIP") {
+ # presumably we're in a subdirectory because the user is doing a 'make dist'
+ $is_make_dist = 1;
+ }
+ else {
+ # presumably we're being run by an end-user because their file timestamps
+ # got messed up
+ print "Skipping regeneration of $file\n";
+ utime(time(), time(), $file); # update modification time
+ exit 0;
+ }
+}
+
+my @warn;
+die "Neither current directory nor parent directory are an svn working copy\n"
+ unless $svnversion and $svnversion =~ m/^\d+/;
+push @warn, "Mixed revision working copy"
+ if $svnversion =~ s/:\d+//;
+push @warn, "Code modified since last checkin"
+ if $svnversion =~ s/[MS]+$//;
+warn "$file warning: $_\n" for @warn;
+die "$0 failed\n" if $is_make_dist && @warn;
+
+write_header($file, DBIXS_REVISION => $svnversion, \@warn);
+
+sub write_header {
+ my ($file, $macro, $version, $comments_ref) = @_;
+ open my $fh, ">$file" or die "Can't open $file: $!\n";
+ print $fh "/* $_ */\n" for @$comments_ref;
+ print $fh "#define $macro $version\n";
+ close $fh or die "Error closing $file: $!\n";
+ print "Wrote $macro $version to $file\n";
+}
@@ -1,6 +1,6 @@
package DBI::Gofer::Execute;
-# $Id: Execute.pm 9563 2007-05-13 21:17:43Z timbo $
+# $Id: Execute.pm 9632 2007-06-07 16:46:08Z timbo $
#
# Copyright (c) 2007, Tim Bunce, Ireland
#
@@ -16,7 +16,7 @@ use DBI::Gofer::Response;
use base qw(DBI::Util::_accessor);
-our $VERSION = sprintf("0.%06d", q$Revision: 9563 $ =~ /(\d+)/o);
+our $VERSION = sprintf("0.%06d", q$Revision: 9632 $ =~ /(\d+)/o);
our @all_dbh_methods = sort map { keys %$_ } $DBI::DBI_methods{db}, $DBI::DBI_methods{common};
our %all_dbh_methods = map { $_ => DBD::_::db->can($_) } @all_dbh_methods;
@@ -273,7 +273,7 @@ sub execute_request {
my $response = eval {
if (my $check_request_sub = $self->check_request_sub) {
- $request = $check_request_sub->($request)
+ $request = $check_request_sub->($request, $self)
or die "check_request_sub failed";
}
@@ -563,11 +563,11 @@ sub _install_rand_callbacks {
my ($fail_percent, $delay_percent, $delay_duration);
my @specs = split /,/, $dbi_gofer_random;
for my $spec (@specs) {
- if ($spec =~ m/^fail=([.\d]+)%?$/) {
+ if ($spec =~ m/^fail=(-?[.\d]+)%?$/) {
$fail_percent = $1;
next;
}
- if ($spec =~ m/^delay([.\d]+)=([.\d]+)%?$/) {
+ if ($spec =~ m/^delay([.\d]+)=(-?[.\d]+)%?$/) {
$delay_duration = $1;
$delay_percent = $2;
next;
@@ -592,20 +592,30 @@ sub _install_rand_callbacks {
$dbh->{private_gofer_rand_fail_callbacks} = $callbacks;
}
+my %_mk_rand_callback_seqn;
sub _mk_rand_callback {
my ($self, $method, $fail_percent, $delay_percent, $delay_duration) = @_;
+ $fail_percent ||= 0; my $fail_modrate = int(1/(-$fail_percent )*100) if $fail_percent;
+ $delay_percent ||= 0; my $delay_modrate = int(1/(-$delay_percent)*100) if $delay_percent;
# note that $method may be "*"
return sub {
my ($h) = @_;
- if ($delay_percent && rand(100) < $delay_percent) {
+ my $seqn = ++$_mk_rand_callback_seqn{$method};
+ my $delay = ($delay_percent > 0) ? rand(100) < $delay_percent :
+ ($delay_percent < 0) ? !($seqn % $delay_modrate): 0;
+ my $fail = ($fail_percent > 0) ? rand(100) < $fail_percent :
+ ($fail_percent < 0) ? !($seqn % $fail_modrate) : 0;
+ #no warnings 'uninitialized';
+ #warn "_mk_rand_callback($fail_percent:$fail_modrate, $delay_percent:$delay_modrate): seqn=$seqn fail=$fail delay=$delay";
+ if ($delay) {
my $msg = "DBI_GOFER_RANDOM delaying execution of $method by $delay_duration seconds\n";
# Note what's happening in a trace message. If the delay percent is an odd
# number then use warn() so it's sent back to the client
($delay_percent % 2 == 0) ? $h->trace_msg($msg) : warn($msg);
select undef, undef, undef, $delay_duration; # allows floating point value
}
- if ($fail_percent && rand(100) < $fail_percent) {
+ if ($fail) {
undef $_; # tell DBI to not call the method
return $h->set_err(1, "fake error induced by DBI_GOFER_RANDOM env var");
}
@@ -667,6 +677,7 @@ Examples include: L<DBI::Gofer::Transport::stream> and L<DBI::Gofer::Transport::
=head2 check_request_sub
If defined, it must be a reference to a subroutine that will 'check' the request.
+It is pass the request object and the executor as its only arguments.
The subroutine can either return the original request object or die with a
suitable error message (which will be turned into a Gofer response).
@@ -766,13 +777,15 @@ The tokens can be one of three types:
=item fail=R%
-Set the current random failure rate to R where R is a percentage. The value R can be floating point, e.g., C<fail=0.05%>.
+Set the current failure rate to R where R is a percentage.
+The value R can be floating point, e.g., C<fail=0.05%>.
+Negative values for R have special meaning, see below.
=item delayN=R%
Set the current random delay rate to R where R is a percentage, and set the
current delay duration to N seconds. The values of R and N can be floating point,
-e.g., C<delay120=0.1%>.
+e.g., C<delay120=0.1%>. Negative values for R have special meaning, see below.
=item methodname
@@ -790,6 +803,11 @@ For example:
will cause the do() method to fail for 0.01% of calls, and the execute() method to
fail 0.01% of calls and be delayed by 60 seconds on 1% of calls.
+If the percentage value (C<R>) is negative then instead of the failures being
+triggered randomly (via the rand() function) they are triggered via a sequence
+number. In other words "C<fail=-20%>" will mean every fifth call will fail.
+Each method has a distinct sequence number.
+
=head1 AUTHOR
Tim Bunce, L<http://www.linkedin.com/in/timbunce>
@@ -220,11 +220,14 @@ A reference to a hash containing the collected profile data.
The Path value is a reference to an array. Each element controls the
value to use at the corresponding level of the profile Data tree.
-The elements of Path array can be one of the following types:
+If the value of Path is anything other than an array reference,
+it is treated as if it was:
-=over 4
+ [ '!Statement' ]
-=item Special Constant
+The elements of Path array can be one of the following types:
+
+=head3 Special Constant
B<!Statement>
@@ -283,7 +286,22 @@ B<!File2>
Same as !Caller2 above except that only the filenames are included, not the line number.
-=item Code Reference
+B<!Time>
+
+Use the current value of time(). Rarely used. See the more useful C<!Time~N> below.
+
+B<!Time~N>
+
+Where C<N> is an integer. Use the current value of time() but with reduced precision.
+The value used is determined in this way:
+
+ int( time() / N ) * N
+
+This is a useful way to segregate a profile into time slots. For example:
+
+ [ '!Time~60', '!Statement' ]
+
+=head3 Code Reference
The subroutine is passed the handle it was called on and the DBI method name.
The current Statement is in $_. The statement string should not be modified,
@@ -295,7 +313,7 @@ The sub can 'veto' (reject) a profile sample by including a reference to undef
in the returned list. That can be useful when you want to only profile
statements that match a certain pattern, or only profile certain methods.
-=item Subroutine Specifier
+=head3 Subroutine Specifier
A Path element that begins with 'C<&>' is treated as the name of a subroutine
in the DBI::ProfileSubs namespace and replaced with the corresponding code reference.
@@ -307,33 +325,24 @@ Also, currently, the only subroutine in the DBI::ProfileSubs namespace is
C<'&norm_std_n3'>. That's a very handy subroutine when profiling code that
doesn't use placeholders. See L<DBI::ProfileSubs> for more information.
-=item Attribute Specifier
+=head3 Attribute Specifier
A string enclosed in braces, such as 'C<{Username}>', specifies that the current
value of the corresponding database handle attribute should be used at that
point in the Path.
-=item Reference to a Scalar
+=head3 Reference to a Scalar
Specifies that the current value of the referenced scalar be used at that point
in the Path. This provides an efficient way to get 'contextual' values into
your profile.
-=item Other Values
+=head3 Other Values
Any other values are stringified and used literally.
(References, and values that begin with punctuation characters are reserved.)
-=back
-
-Only the first 100 elements in Path are used.
-
-If the value of Path is anything other than an array reference,
-it is treated as if it was:
-
- [ DBI::Profile::!Statement ]
-
=head1 REPORTING
@@ -431,6 +440,84 @@ then by default the statement handles created from it all contribute
to the same merged profile data tree.
+=head1 PROFILE OBJECT METHODS
+
+=head2 format
+
+See L</REPORTING>.
+
+=head2 as_node_path_list
+
+ @ary = $dbh->{Profile}->as_node_path_list();
+ @ary = $dbh->{Profile}->as_node_path_list($node, $path);
+
+Returns the collected data ($dbh->{Profile}{Data}) restructured into a list of
+array refs, one for each leaf node in the Data tree. This 'flat' structure is
+often much simpler for applications to work with.
+
+The first element of each array ref is a reference to the leaf node.
+The remaining elements are the 'path' through the data tree to that node.
+
+For example, given a data tree like this:
+
+ {key1a}{key2a}[node1]
+ {key1a}{key2b}[node2]
+ {key1b}{key2a}{key3a}[node3]
+
+The as_node_path_list() method will return this list:
+
+ [ [node1], 'key1a', 'key2a' ]
+ [ [node2], 'key1a', 'key2b' ]
+ [ [node3], 'key1b', 'key2a', 'key3a' ]
+
+The nodes are ordered by key, depth-first.
+
+The $node argument can be used to focus on a sub-tree.
+If not specified it defaults to $dbh->{Profile}{Data}.
+
+The $path argument can be used to specify a list of path elements that will be
+added to each element of the returned list. If not specified it defaults to a a
+ref to an empty array.
+
+=head2 as_text
+
+ @txt = $dbh->{Profile}->as_text();
+ $txt = $dbh->{Profile}->as_text({
+ node => undef,
+ path => [],
+ separator => " > ",
+ format => '%1$s: %11$fs / %10$d = %2$fs avg (first %12$fs, min %13$fs, max %14$fs)'."\n";
+ sortsub => sub { ... },
+ );
+
+Returns the collected data ($dbh->{Profile}{Data}) reformatted into a list of formatted strings.
+In scalar context the list is returned as a single contatenated string.
+
+A hashref can be used to pass in arguments, the default values are shown in the example above.
+
+The C<node> and <path> arguments are passed to as_node_path_list().
+
+The C<separator> argument is used to join the elemets of the path for each leaf node.
+
+The C<sortsub> argument is used to pass in a ref to a sub that will order the list.
+The subroutine will be passed a reference to the array returned by as_node_path_list().
+
+The C<format> argument is a C<sprintf> format string that specifies the format
+to use for each leaf node. It uses the explicit format parameter index
+mechanism to specify which of the arguments should appear where in the string.
+The arguments to sprintf are:
+
+ 1: path to node, joined with the separator
+ 2: average duration (total/count)
+ (3 thru 9 are currently unused)
+ 10: count
+ 11: total duration
+ 12: first_duration
+ 13: smallest duration
+ 14: largest duration
+ 15: time of first call
+ 16: time of first call
+
=head1 CUSTOM DATA MANIPULATION
Recall that C<$h->{Profile}->{Data}> is a reference to the collected data.
@@ -587,7 +674,7 @@ use Carp;
use DBI qw(dbi_time dbi_profile dbi_profile_merge_nodes dbi_profile_merge);
-$VERSION = sprintf("2.%06d", q$Revision: 9564 $ =~ /(\d+)/o);
+$VERSION = sprintf("2.%06d", q$Revision: 9656 $ =~ /(\d+)/o);
@ISA = qw(Exporter);
@@ -670,6 +757,81 @@ sub _auto_new {
}
+sub empty { # empty out profile data
+ my $self = shift;
+ DBI->trace_msg("profile data discarded\n",0) if $self->{Trace};
+ $self->{Data} = undef;
+}
+
+sub filename { # baseclass method, see DBI::ProfileDumper
+ return undef;
+}
+
+sub flush_to_disk { # baseclass method, see DBI::ProfileDumper
+ return undef;
+}
+
+
+sub as_node_path_list {
+ my ($self, $node, $path) = @_;
+ # convert the tree into an array of arrays
+ # from
+ # {key1a}{key2a}[node1]
+ # {key1a}{key2b}[node2]
+ # {key1b}{key2a}{key3a}[node3]
+ # to
+ # [ [node1], 'key1a', 'key2a' ]
+ # [ [node2], 'key1a', 'key2b' ]
+ # [ [node3], 'key1b', 'key2a', 'key3a' ]
+
+ $node ||= $self->{Data} or return;
+ $path ||= [];
+ if (ref $node eq 'HASH') { # recurse
+ $path = [ @$path, undef ];
+ return map {
+ $path->[-1] = $_;
+ ($node->{$_}) ? $self->as_node_path_list($node->{$_}, $path) : ()
+ } sort keys %$node;
+ }
+ return [ $node, @$path ];
+}
+
+
+sub as_text {
+ my ($self, $args_ref) = @_;
+ my $separator = $args_ref->{separator} || " > ";
+ my $format_path_element = $args_ref->{format_path_element}
+ || "%s"; # or e.g., " key%2$d='%s'"
+ my $format = $args_ref->{format}
+ || '%1$s: %11$fs / %10$d = %2$fs avg (first %12$fs, min %13$fs, max %14$fs)'."\n";
+
+ my @node_path_list = $self->as_node_path_list(undef, $args_ref->{path});
+
+ $args_ref->{sortsub}->(\@node_path_list) if $args_ref->{sortsub};
+
+ my $eval = "qr/".quotemeta($separator)."/";
+ my $separator_re = eval($eval) || quotemeta($separator);
+ #warn "[$eval] = [$separator_re]";
+ my @text;
+ for my $node_path (@node_path_list) {
+ my ($node, @path) = @$node_path;
+ my $idx = 0;
+ for (@path) {
+ s/[\r\n]+/ /g;
+ s/$separator_re/ /g;
+ $_ = sprintf $format_path_element, $_, ++$idx;
+ }
+ push @text, sprintf $format,
+ join($separator, @path), # 1=path
+ ($node->[0] ? $node->[4]/$node->[0] : 0), # 2=avg
+ (undef) x 7, # spare slots
+ @$node; # 10=count, 11=dur, 12=first_dur, 13=min, 14=max, 15=first_called, 16=last_called
+ }
+ return @text if wantarray;
+ return join "", @text;
+}
+
+
sub format {
my $self = shift;
my $class = ref($self) || $self;
@@ -709,10 +871,6 @@ sub format_profile_leaf {
unless UNIVERSAL::isa($thingy,'ARRAY');
push @$leaves, $thingy if $leaves;
- if (0) {
- use Data::Dumper;
- return Dumper($thingy);
- }
my ($count, $total_time, $first_time, $min, $max, $first_called, $last_called) = @$thingy;
return sprintf "%s%fs\n", ($pad x $depth), $total_time
if $count <= 1;
@@ -765,6 +923,7 @@ sub DESTROY {
local $@;
eval { $self->on_destroy };
if ($@) {
+ chomp $@;
my $class = ref($self) || $self;
DBI->trace_msg("$class on_destroy failed: $@", 0);
}
@@ -17,7 +17,7 @@ This module can also be used to roll your own profile analysis:
# load data from dbi.prof
$prof = DBI::ProfileData->new(File => "dbi.prof");
- # get a count of the records in the data set
+ # get a count of the records (unique paths) in the data set
$count = $prof->count();
# sort by longest overall time
@@ -67,15 +67,14 @@ record have the same number of keys.
The following methods are supported by DBI::ProfileData objects.
-=over 4
-
=cut
-our $VERSION = sprintf("2.%06d", q$Revision: 8696 $ =~ /(\d+)/o);
+our $VERSION = sprintf("2.%06d", q$Revision: 9632 $ =~ /(\d+)/o);
use Carp qw(croak);
use Symbol;
+use Fcntl qw(:flock);
use DBI::Profile qw(dbi_profile_merge);
@@ -89,17 +88,37 @@ sub FIRST_AT () { 5 };
sub LAST_AT () { 6 };
sub PATH () { 7 };
-=item $prof = DBI::ProfileData->new(File => "dbi.prof")
+=head2 $prof = DBI::ProfileData->new(File => "dbi.prof")
-=item $prof = DBI::ProfileData->new(File => "dbi.prof", Filter => sub { ... })
+=head2 $prof = DBI::ProfileData->new(File => "dbi.prof", Filter => sub { ... })
-=item $prof = DBI::ProfileData->new(Files => [ "dbi.prof.1", "dbi.prof.2" ])
+=head2 $prof = DBI::ProfileData->new(Files => [ "dbi.prof.1", "dbi.prof.2" ])
Creates a a new DBI::ProfileData object. Takes either a single file
through the File option or a list of Files in an array ref. If
multiple files are specified then the header data from the first file
is used.
+=head3 Files
+
+Reference to an array of file names to read.
+
+=head3 File
+
+Name of file to read. Takes precedence over C<Files>.
+
+=head3 DeleteFiles
+
+If true, the files are deleted after being read.
+
+Actually the files are renamed with a C.deleteme> suffix before being read,
+and then, after reading all the files, they're all deleted together.
+
+The files are locked while being read which, combined with the rename, makes it
+safe to 'consume' files that are still being generated by L<DBI::ProfileDumper>.
+
+=head3 Filter
+
The C<Filter> parameter can be used to supply a code reference that can
manipulate the profile data as it is being read. This is most useful for
editing SQL statements so that slightly different statements in the raw data
@@ -141,14 +160,16 @@ sub new {
my $self = {
Files => [ "dbi.prof" ],
Filter => undef,
+ DeleteFiles => 0,
_header => {},
_nodes => [],
_node_lookup => {},
+ _sort => 'none',
@_
};
bless $self, $pkg;
- # File overrides Files
+ # File (singular) overrides Files (plural)
$self->{Files} = [ $self->{File} ] if exists $self->{File};
$self->_read_files();
@@ -158,19 +179,40 @@ sub new {
# read files into _header and _nodes
sub _read_files {
my $self = shift;
- my $files = $self->{Files};
+ my $files = $self->{Files};
my $read_header = 0;
-
- foreach my $filename (@$files) {
- my $fh = gensym;
- open($fh, $filename)
+ my @files_to_delete;
+
+ my $fh = gensym;
+ foreach (@$files) {
+ my $filename = $_;
+
+ if ($self->{DeleteFiles}) {
+ my $newfilename = $filename . ".deleteme";
+ # will clobber an existing $newfilename
+ rename($filename, $newfilename)
+ or croak "Can't rename($filename, $newfilename): $!";
+ $filename = $newfilename;
+ }
+
+ open($fh, "<", $filename)
or croak("Unable to read profile file '$filename': $!");
+
+ # lock the file in case it's still being written to
+ # (we'll be foced to wait till the write is complete)
+ flock($fh, LOCK_SH);
+
+ if (-s $fh) { # not empty
+ $self->_read_header($fh, $filename, $read_header ? 0 : 1);
+ $read_header = 1;
+ $self->_read_body($fh, $filename);
+ }
+ close($fh); # and release lock
- $self->_read_header($fh, $filename, $read_header ? 0 : 1);
- $read_header = 1;
- $self->_read_body($fh, $filename);
- close($fh);
+ push @files_to_delete, $filename
+ if $self->{DeleteFiles};
}
+ unlink $_ or warn "Can't delete '$_': $!" for @files_to_delete;
# discard node_lookup now that all files are read
delete $self->{_node_lookup};
@@ -192,6 +234,8 @@ sub _read_header {
last unless length $_;
/^(\S+)\s*=\s*(.*)/
or croak("Syntax error in header in $filename line $.: $_");
+ # XXX should compare new with existing (from previous file)
+ # and warn if they differ (diferent program or path)
$self->{_header}{$1} = $2 if $keep;
}
}
@@ -221,22 +265,19 @@ sub _read_body {
$path[$index] = $key; # place new key at end
}
- elsif (/^=/) {
+ elsif (s/^=\s+//) {
# it's data - file in the node array with the path in index 0
# (the optional minus is to make it more robust against systems
# with unstable high-res clocks - typically due to poor NTP config
# of kernel SMP behaviour, i.e. min time may be -0.000008))
- @data = /^=\s+(\d+)
- \s+(-?\d+\.?\d*)
- \s+(-?\d+\.?\d*)
- \s+(-?\d+\.?\d*)
- \s+(-?\d+\.?\d*)
- \s+(\d+\.?\d*)
- \s+(\d+\.?\d*)
- \s*$/x;
+
+ @data = split / /, $_;
# corrupt data?
- croak("Invalid data syntax format in $filename line $.: $_") unless @data;
+ croak("Invalid number of fields in $filename line $.: $_")
+ unless @data == 7;
+ croak("Invalid leaf node characters $filename line $.: $_")
+ unless m/^[-+ 0-9eE\.]+$/;
# hook to enable pre-processing of the data - such as mangling SQL
# so that slightly different statements get treated as the same
@@ -270,7 +311,7 @@ sub _read_body {
-=item $copy = $prof->clone();
+=head2 $copy = $prof->clone();
Clone a profile data set creating a new object.
@@ -291,7 +332,7 @@ sub clone {
return $clone;
}
-=item $header = $prof->header();
+=head2 $header = $prof->header();
Returns a reference to a hash of header values. These are the key
value pairs included in the header section of the DBI::ProfileDumper
@@ -310,7 +351,7 @@ inside the profile object.
sub header { shift->{_header} }
-=item $nodes = $prof->nodes()
+=head2 $nodes = $prof->nodes()
Returns a reference the sorted nodes array. Each element in the array
is a single record in the data set. The first seven elements are the
@@ -340,7 +381,8 @@ the profile object.
sub nodes { shift->{_nodes} }
-=item $count = $prof->count()
+
+=head2 $count = $prof->count()
Returns the number of items in the profile data set.
@@ -348,9 +390,10 @@ Returns the number of items in the profile data set.
sub count { scalar @{shift->{_nodes}} }
-=item $prof->sort(field => "field")
-=item $prof->sort(field => "field", reverse => 1)
+=head2 $prof->sort(field => "field")
+
+=head2 $prof->sort(field => "field", reverse => 1)
Sorts data by the given field. Available fields are:
@@ -373,6 +416,9 @@ the dbiprof frontend.
total => TOTAL,
count => COUNT,
shortest => SHORTEST,
+ key1 => PATH+0,
+ key2 => PATH+1,
+ key3 => PATH+2,
);
sub sort {
my $self = shift;
@@ -404,11 +450,12 @@ the dbiprof frontend.
}
}
-=item $count = $prof->exclude(key2 => "disconnect")
-=item $count = $prof->exclude(key2 => "disconnect", case_sensitive => 1)
+=head2 $count = $prof->exclude(key2 => "disconnect")
+
+=head2 $count = $prof->exclude(key2 => "disconnect", case_sensitive => 1)
-=item $count = $prof->exclude(key1 => qr/^SELECT/i)
+=head2 $count = $prof->exclude(key1 => qr/^SELECT/i)
Removes records from the data set that match the given string or
regular expression. This method modifies the data in a permanent
@@ -455,11 +502,11 @@ sub exclude {
}
-=item $count = $prof->match(key2 => "disconnect")
+=head2 $count = $prof->match(key2 => "disconnect")
-=item $count = $prof->match(key2 => "disconnect", case_sensitive => 1)
+=head2 $count = $prof->match(key2 => "disconnect", case_sensitive => 1)
-=item $count = $prof->match(key1 => qr/^SELECT/i)
+=head2 $count = $prof->match(key1 => qr/^SELECT/i)
Removes records from the data set that do not match the given string
or regular expression. This method modifies the data in a permanent
@@ -505,7 +552,8 @@ sub match {
return scalar @$nodes;
}
-=item $Data = $prof->Data()
+
+=head2 $Data = $prof->Data()
Returns the same Data hash structure as seen in DBI::Profile. This
structure is not sorted. The nodes() structure probably makes more
@@ -532,7 +580,8 @@ sub Data {
return \%Data;
}
-=item $text = $prof->format($nodes->[0])
+
+=head2 $text = $prof->format($nodes->[0])
Formats a single node into a human-readable block of text.
@@ -582,7 +631,8 @@ END
}
}
-=item $text = $prof->report(number => 10)
+
+=head2 $text = $prof->report(number => 10)
Produces a report with the given number of items.
@@ -647,8 +697,6 @@ END
__END__
-=back
-
=head1 AUTHOR
Sam Tregar <sam@tregar.com>
@@ -1,5 +1,7 @@
package DBI::ProfileDumper::Apache;
+use strict;
+
=head1 NAME
DBI::ProfileDumper::Apache - capture DBI profiling data from Apache/mod_perl
@@ -10,33 +12,17 @@ Add this line to your F<httpd.conf>:
PerlSetEnv DBI_PROFILE 2/DBI::ProfileDumper::Apache
-Under mod_perl2 RC5+ you'll need to also add:
-
- PerlSetEnv DBI_PROFILE_APACHE_LOG_DIR /server_root/logs
-
-OR add
-
- PerlOptions +GlobalRequest
-
-to the gobal config section you're about test with DBI::ProfileDumper::Apache.
-If you don't do this, you'll see messages in your error_log similar to:
-
- DBI::ProfileDumper::Apache on_destroy failed: Global $r object is not available. Set:
- PerlOptions +GlobalRequest in httpd.conf at ..../DBI/ProfileDumper/Apache.pm line 144
+(If you're using mod_perl2, see L</When using mod_perl2> for some additional notes.)
Then restart your server. Access the code you wish to test using a
web browser, then shutdown your server. This will create a set of
-F<dbi.prof.*> files in your Apache log directory. Get a profiling
-report with L<dbiprof|dbiprof>:
+F<dbi.prof.*> files in your Apache log directory.
- dbiprof /usr/local/apache/logs/dbi.prof.*
+Get a profiling report with L<dbiprof|dbiprof>:
-When you're ready to perform another profiling run, delete the old
-files
-
- rm /usr/local/apache/logs/dbi.prof.*
+ dbiprof /path/to/your/apache/logs/dbi.prof.*
-and start again.
+When you're ready to perform another profiling run, delete the old files and start again.
=head1 DESCRIPTION
@@ -44,7 +30,7 @@ This module interfaces DBI::ProfileDumper to Apache/mod_perl. Using
this module you can collect profiling data from mod_perl applications.
It works by creating a DBI::ProfileDumper data file for each Apache
process. These files are created in your Apache log directory. You
-can then use dbiprof to analyze the profile files.
+can then use the dbiprof utility to analyze the profile files.
=head1 USAGE
@@ -55,17 +41,68 @@ environment variable in your F<httpd.conf>:
PerlSetEnv DBI_PROFILE 2/DBI::ProfileDumper::Apache
-If you want to use one of DBI::Profile's other Path settings, you can
-use a string like:
-
- PerlSetEnv DBI_PROFILE 2/DBI::ProfileDumper::Apache
+The DBI will look after loading and using the module when the first DBI handle
+is created.
It's also possible to use this module by setting the Profile attribute
of any DBI handle:
$dbh->{Profile} = "2/DBI::ProfileDumper::Apache";
-See L<DBI::ProfileDumper> for more possibilities.
+See L<DBI::ProfileDumper> for more possibilities, and L<DBI::Profile> for full
+details of the DBI's profiling mechanism.
+
+=head2 WRITING PROFILE DATA
+
+The profile data files will be written to your Apache log directory by default.
+
+The user that the httpd processes run as will need write access to the
+directory. So, for example, if you're running the child httpds as user 'nobody'
+and using chronolog to write to the logs directory, then you'll need to change
+the default.
+
+You can change the destination directory either by secifying a C<Dir> value
+when creating the profile (like C<File> in the L<DBI::ProfileDumper> docs),
+or you can use the C<DBI_PROFILE_APACHE_LOG_DIR> env var to change that. For example:
+
+ PerlSetEnv DBI_PROFILE_APACHE_LOG_DIR /server_root/logs
+
+=head3 When using mod_perl2
+
+Under mod_perl2 you'll need to either set the C<DBI_PROFILE_APACHE_LOG_DIR> env var,
+or enable the mod_perl2 C<GlobalRequest> option, like this:
+
+ PerlOptions +GlobalRequest
+
+to the global config section you're about test with DBI::ProfileDumper::Apache.
+If you don't do one of those then you'll see messages in your error_log similar to:
+
+ DBI::ProfileDumper::Apache on_destroy failed: Global $r object is not available. Set:
+ PerlOptions +GlobalRequest in httpd.conf at ..../DBI/ProfileDumper/Apache.pm line 144
+
+=head3 Naming the files
+
+The default file name is inherited from L<DBI::ProfileDumper> via the
+filename() method, but DBI::ProfileDumper::Apache appends the parent pid and
+the current pid, separated by dots, to that name.
+
+=head3 Silencing the log
+
+By default a message is written to STDERR (i.e., the apache error_log file)
+when flush_to_disk() is called (either explicitly, or implicitly via DESTROY).
+
+That's usually very useful. If you don't want the log message you can silence
+it by setting the C<Quiet> attribute true.
+
+ PerlSetEnv DBI_PROFILE 2/DBI::ProfileDumper::Apache/Quiet:1
+
+ $dbh->{Profile} = "!Statement/DBI::ProfileDumper/Quiet:1";
+
+ $dbh->{Profile} = DBI::ProfileDumper->new(
+ Path => [ '!Statement' ]
+ Quiet => 1
+ );
+
=head2 GATHERING PROFILE DATA
@@ -74,11 +111,11 @@ would. Stop the webserver when your tests are complete. Profile data
files will be produced when Apache exits and you'll see something like
this in your error_log:
- DBI::ProfileDumper::Apache writing to /usr/local/apache/logs/dbi.prof.2619
+ DBI::ProfileDumper::Apache writing to /usr/local/apache/logs/dbi.prof.2604.2619
Now you can use dbiprof to examine the data:
- dbiprof /usr/local/apache/logs/dbi.prof.*
+ dbiprof /usr/local/apache/logs/dbi.prof.2604.*
By passing dbiprof a list of all generated files, dbiprof will
automatically merge them into one result set. You can also pass
@@ -89,22 +126,25 @@ dbiprof sorting and querying options, see L<dbiprof> for details.
Once you've made some code changes, you're ready to start again.
First, delete the old profile data files:
- rm /usr/local/apache/logs/dbi.prof.*
+ rm /usr/local/apache/logs/dbi.prof.*
Then restart your server and get back to work.
-=head1 MEMORY USAGE
+=head1 OTHER ISSUES
+
+=head2 Memory usage
-DBI::Profile can use a lot of memory for very active applications. It
-collects profiling data in memory for each distinct query your
-application runs. You can avoid this problem with a call like this:
+DBI::Profile can use a lot of memory for very active applications because it
+collects profiling data in memory for each distinct query run.
+Calling C<flush_to_disk()> will write the current data to disk and free the
+memory it's using. For example:
$dbh->{Profile}->flush_to_disk() if $dbh->{Profile};
-Calling C<flush_to_disk()> will clear out the profile data and write
-it to disk. Put this someplace where it will run on every request,
-like a CleanupHandler, and your memory troubles should go away. Well,
-at least the ones caused by DBI::Profile anyway.
+or, rather than flush every time, you could flush less often:
+
+ $dbh->{Profile}->flush_to_disk()
+ if $dbh->{Profile} and ++$i % 100;
=head1 AUTHOR
@@ -119,47 +159,72 @@ it under the same terms as Perl 5 itself.
=cut
-our $VERSION = sprintf("2.%06d", q$Revision: 8696 $ =~ /(\d+)/o);
+our $VERSION = sprintf("2.%06d", q$Revision: 9618 $ =~ /(\d+)/o);
our @ISA = qw(DBI::ProfileDumper);
use DBI::ProfileDumper;
use File::Spec;
+my $parent_pid = $$; # init to pid because we are currently the parent of the children-to-be
+
use constant MP2 => ($ENV{MOD_PERL_API_VERSION} and $ENV{MOD_PERL_API_VERSION} == 2) ? 1 : 0;
-# Override flush_to_disk() to setup File just in time for output.
-# Overriding new() would work unless the user creates a DBI handle
-# during server startup, in which case all the children would try to
-# write to the same file.
+my $apache_server;
+my $server_root_dir;
+
+if (MP2) {
+ require Apache2::Const;
+ Apache2::Const->import(-compile => qw(OK DECLINED));
+ require Apache2::ServerUtil;
+ $apache_server = Apache2::ServerUtil->server;
+ $server_root_dir = Apache2::ServerUtil::server_root();
+}
+else {
+ require Apache;
+ require Apache::Constants;
+ Apache::Constants->import(qw(OK DECLINED));
+ $apache_server = "Apache";
+ $server_root_dir = eval { Apache->server_root_relative('') } || "/tmp";
+}
+
+
+if (UNIVERSAL::can($apache_server, "push_handlers")) {
+ $apache_server->push_handlers(PerlChildInitHandler => sub {
+ $parent_pid = getppid();
+ #warn "PerlChildInitHandler pid$$ has ppid $parent_pid";
+ OK();
+ });
+}
+
+sub dirname {
+ my $self = shift;
+ return $self->{Dir} if $self->{Dir};
+ $self->{Dir} ||= $ENV{DBI_PROFILE_APACHE_LOG_DIR};
+ return $self->{Dir} || File::Spec->catdir($server_root_dir, "logs");
+}
+
+sub filename {
+ my $self = shift;
+ my $filename = $self->SUPER::filename(@_);
+ # to be able to identify groups of profile files from the same set of
+ # apache processes, we include the parent pid in the file name
+ # as well as the pid.
+ $filename .= ".$parent_pid.$$";
+ return $filename if File::Spec->file_name_is_absolute($filename);
+ return File::Spec->catfile($self->dirname, $filename);
+}
+
+
sub flush_to_disk {
my $self = shift;
-
- # setup File per process
- my $path;
- if (MP2) {
- if ($ENV{DBI_PROFILE_APACHE_LOG_DIR}) {
- $path = $ENV{DBI_PROFILE_APACHE_LOG_DIR};
- }
- else {
- require Apache2::RequestUtil;
- require Apache2::ServerUtil;
- $path = Apache2::ServerUtil::server_root_relative(Apache2::RequestUtil->request()->pool, "logs/")
- }
- }
- else {
- require Apache;
- $path = Apache->server_root_relative("logs/");
- }
- my $old_file = $self->{File};
- $self->{File} = File::Spec->catfile($path, "$old_file.$$");
-
- # write out to disk
- print STDERR "DBI::ProfileDumper::Apache writing to $self->{File}\n";
- $self->SUPER::flush_to_disk(@_);
-
- # reset File to previous setting
- $self->{File} = $old_file;
+
+ my $filename = $self->SUPER::flush_to_disk(@_);
+
+ print STDERR ref($self)." pid$$ written to $filename\n"
+ if $filename && not $self->{Quiet};
+
+ return $filename;
}
1;
@@ -22,20 +22,22 @@ You can also activate DBI::ProfileDumper from within your code:
use DBI;
# profile with default path (2) and output file (dbi.prof)
- $dbh->{Profile} = "2/DBI::ProfileDumper";
+ $dbh->{Profile} = "!Statement/DBI::ProfileDumper";
# same thing, spelled out
- $dbh->{Profile} = "2/DBI::ProfileDumper/File:dbi.prof";
+ $dbh->{Profile} = "!Statement/DBI::ProfileDumper/File:dbi.prof";
# another way to say it
- use DBI::Profile;
+ use DBI::ProfileDumper;
$dbh->{Profile} = DBI::ProfileDumper->new(
Path => [ '!Statement' ]
File => 'dbi.prof' );
# using a custom path
- $dbh->{Profile} = DBI::ProfileDumper->new( Path => [ "foo", "bar" ],
- File => 'dbi.prof' );
+ $dbh->{Profile} = DBI::ProfileDumper->new(
+ Path => [ "foo", "bar" ],
+ File => 'dbi.prof',
+ );
=head1 DESCRIPTION
@@ -59,10 +61,11 @@ This will write out profile data by statement into a file called
F<dbi.prof>. If you want to modify either of these properties, you
can construct the DBI::ProfileDumper object yourself:
- use DBI::Profile;
+ use DBI::ProfileDumper;
$dbh->{Profile} = DBI::ProfileDumper->new(
- Path => [ '!Statement' ]
- File => 'dbi.prof' );
+ Path => [ '!Statement' ],
+ File => 'dbi.prof'
+ );
The C<Path> option takes the same values as in
L<DBI::Profile>. The C<File> option gives the name of the
@@ -84,18 +87,36 @@ in any DBI handle:
my $profile = $dbh->{Profile};
-=over 4
+=head2 flush_to_disk
+
+ $profile->flush_to_disk()
+
+Flushes all collected profile data to disk and empties the Data hash. Returns
+the filename writen to. If no profile data has been collected then the file is
+not written and flush_to_disk() returns undef.
-=item $profile->flush_to_disk()
+The file is locked while it's being written. A process 'consuming' the files
+while they're being written to, should rename the file first, then lock it,
+then read it, then close and delete it. The C<DeleteFiles> option to
+L<DBI::ProfileData> does the right thing.
-Flushes all collected profile data to disk and empties the Data hash.
This method may be called multiple times during a program run.
-=item $profile->empty()
+=head2 empty
+
+ $profile->empty()
Clears the Data hash without writing to disk.
-=back
+=head2 filename
+
+ $filename = $profile->filename();
+
+Get or set the filename.
+
+The filename can be specified as a CODE reference, in which case the referenced
+code should return the filename to be used. The code will be called with the
+profile object as its first argument.
=head1 DATA FORMAT
@@ -104,7 +125,7 @@ containing the version number of the module used to generate it. Then
a block of variable declarations describes the profile. After two
newlines, the profile data forms the body of the file. For example:
- DBI::ProfileDumper 1.0
+ DBI::ProfileDumper 2.003762
Path = [ '!Statement', '!MethodName' ]
Program = t/42profile_data.t
@@ -154,61 +175,84 @@ it under the same terms as Perl 5 itself.
# inherit from DBI::Profile
use DBI::Profile;
-our $VERSION = sprintf("2.%06d", q$Revision: 9395 $ =~ /(\d+)/o);
-
our @ISA = ("DBI::Profile");
+our $VERSION = sprintf("2.%06d", q$Revision: 9658 $ =~ /(\d+)/o);
+
use Carp qw(croak);
+use Fcntl qw(:flock);
use Symbol;
+my $program_header;
+
+
# validate params and setup default
sub new {
my $pkg = shift;
my $self = $pkg->SUPER::new(@_);
- # File defaults to dbi.prof
- $self->{File} = "dbi.prof" unless exists $self->{File};
+ # provide a default filename
+ $self->filename("dbi.prof") unless $self->filename;
return $self;
}
+
+# get/set filename to use
+sub filename {
+ my $self = shift;
+ $self->{File} = shift if @_;
+ my $filename = $self->{File};
+ $filename = $filename->($self) if ref($filename) eq 'CODE';
+ return $filename;
+}
+
+
# flush available data to disk
sub flush_to_disk {
my $self = shift;
+ my $class = ref $self;
+ my $filename = $self->filename;
my $data = $self->{Data};
+ if (1) { # make an option
+ if (not $data or ref $data eq 'HASH' && !%$data) {
+ DBI->trace_msg("flush_to_disk skipped for empty profile\n",0) if $self->{Trace};
+ return undef;
+ }
+ }
+
my $fh = gensym;
- if ($self->{_wrote_header}) {
+ if (($self->{_wrote_header}||'') eq $filename) {
# append more data to the file
- open($fh, ">>$self->{File}")
- or croak("Unable to open '$self->{File}' for profile output: $!");
+ # XXX assumes that Path hasn't changed
+ open($fh, ">>", $filename)
+ or croak("Unable to open '$filename' for $class output: $!");
} else {
- # create new file and write the header
- open($fh, ">$self->{File}")
- or croak("Unable to open '$self->{File}' for profile output: $!");
+ # create new file (or overwrite existing)
+ open($fh, ">", $filename)
+ or croak("Unable to open '$filename' for $class output: $!");
+ }
+ # lock the file (before checking size and writing the header)
+ flock($fh, LOCK_EX);
+ # write header if file is empty - typically because we just opened it
+ # in '>' mode, or perhaps we used '>>' but the file had been truncated externally.
+ if (-s $fh == 0) {
+ DBI->trace_msg("flush_to_disk wrote header to $filename\n",0) if $self->{Trace};
$self->write_header($fh);
- $self->{_wrote_header} = 1;
+ $self->{_wrote_header} = $filename;
}
- $self->write_data($fh, $self->{Data}, 1);
+ my $lines = $self->write_data($fh, $self->{Data}, 1);
+ DBI->trace_msg("flush_to_disk wrote $lines lines to $filename\n",0) if $self->{Trace};
- close($fh) or croak("Error closing '$self->{File}': $!");
+ close($fh) # unlocks the file
+ or croak("Error closing '$filename': $!");
$self->empty();
-}
-# empty out profile data
-sub empty {
- shift->{Data} = {};
-}
-sub _print {
- my($fh) = shift;
-
- # isolate us against globals which effect print
- local($\, $,);
-
- print $fh @_;
+ return $filename;
}
@@ -216,50 +260,64 @@ sub _print {
sub write_header {
my ($self, $fh) = @_;
+ # isolate us against globals which effect print
+ local($\, $,);
+
+ # $self->VERSION can return undef during global destruction
+ my $version = $self->VERSION || $VERSION;
+
# module name and version number
- _print $fh, ref($self), " ", $self->VERSION, "\n";
+ print $fh ref($self)." $version\n";
- # print out Path
- my @path_words;
- if ($self->{Path}) {
- foreach (@{$self->{Path}}) {
- push @path_words, $_;
- }
- }
- _print $fh, "Path = [ ", join(', ', @path_words), " ]\n";
+ # print out Path (may contain CODE refs etc)
+ my @path_words = map { escape_key($_) } @{ $self->{Path} || [] };
+ print $fh "Path = [ ", join(', ', @path_words), " ]\n";
# print out $0 and @ARGV
- _print $fh, "Program = $0";
- _print $fh, " ", join(", ", @ARGV) if @ARGV;
- _print $fh, "\n";
+ if (!$program_header) {
+ # XXX should really quote as well as escape
+ $program_header = "Program = "
+ . join(" ", map { escape_key($_) } $0, @ARGV)
+ . "\n";
+ }
+ print $fh $program_header;
# all done
- _print $fh, "\n";
+ print $fh "\n";
}
+
# write data in the proscribed format
sub write_data {
my ($self, $fh, $data, $level) = @_;
+ # XXX it's valid for $data to be an ARRAY ref, i.e., Path is empty.
# produce an empty profile for invalid $data
- return unless $data and UNIVERSAL::isa($data,'HASH');
+ return 0 unless $data and UNIVERSAL::isa($data,'HASH');
+ # isolate us against globals which affect print
+ local ($\, $,);
+
+ my $lines = 0;
while (my ($key, $value) = each(%$data)) {
# output a key
- _print $fh, "+ ", $level, " ", quote_key($key), "\n";
+ print $fh "+ $level ". escape_key($key). "\n";
if (UNIVERSAL::isa($value,'ARRAY')) {
# output a data set for a leaf node
- _print $fh, sprintf "= %4d %.6f %.6f %.6f %.6f %.6f %.6f\n", @$value;
+ print $fh "= ".join(' ', @$value)."\n";
+ $lines += 1;
} else {
# recurse through keys - this could be rewritten to use a
# stack for some small performance gain
- $self->write_data($fh, $value, $level + 1);
+ $lines += $self->write_data($fh, $value, $level + 1);
}
}
+ return $lines;
}
-# quote a key for output
-sub quote_key {
+
+# escape a key for output
+sub escape_key {
my $key = shift;
$key =~ s!\\!\\\\!g;
$key =~ s!\n!\\n!g;
@@ -268,6 +326,7 @@ sub quote_key {
return $key;
}
+
# flush data to disk when profile object goes out of scope
sub on_destroy {
shift->flush_to_disk();
@@ -233,8 +233,19 @@ SKIP: {
if $DBI::PurePerl && !eval { require Math::BigInt; require_version Math::BigInt 1.56 };
skip("Math::BigInt $Math::BigInt::VERSION broken",2)
if $DBI::PurePerl && $Math::BigInt::VERSION =~ /^1\.8[45]/;
-cmp_ok(DBI::hash("foo1",1), '==', -1263462440, '... should be -1263462440');
-cmp_ok(DBI::hash("foo2",1), '==', -1263462437, '... should be -1263462437');
+ my $bigint_vers = $Math::BigInt::VERSION || "";
+ if (!$DBI::PurePerl) {
+ cmp_ok(DBI::hash("foo1",1), '==', -1263462440);
+ cmp_ok(DBI::hash("foo2",1), '==', -1263462437);
+ }
+ else {
+ # for PurePerl we use Math::BigInt but that's often caused test failures that
+ # aren't DBI's fault. So we just warn (via a skip) if it's not working right.
+ skip("Seems like your Math::BigInt $Math::BigInt::VERSION has a bug",2)
+ unless (DBI::hash("foo1X",1) == -1263462440) && (DBI::hash("foo2",1) == -1263462437);
+ ok(1, "Math::BigInt $Math::BigInt::VERSION worked okay");
+ ok(1);
+ }
}
is(data_string_desc(""), "UTF8 off, ASCII, 0 characters 0 bytes");
@@ -24,8 +24,9 @@ plan tests => 3 + 4 * $threads;
# Something about DBD::Gofer causes a problem. Older versions didn't leak. It
# started at some point in development but I didn't track it down at the time
# so the exact change that made it start is now lost in the mists of time.
+# XXX doesn't seem to be happening any more
warn " You can ignore the $threads 'Scalars leaked' messages you may see here (or send me a patch to fix the underlying problem)\n"
- if $ENV{DBI_AUTOPROXY} && not $ENV{DBI_PUREPERL};
+ if 0 && $ENV{DBI_AUTOPROXY} && not $ENV{DBI_PUREPERL};
{
package threads_sub;
@@ -106,10 +106,12 @@ sub CLOSE {
package MyFancyLogger;
+use Symbol qw(gensym);
+
sub new
{
my $self = {};
- my $fh;
+ my $fh = gensym();
open $fh, '>', 'fancylog.log';
$self->{_fh} = $fh;
$self->{_buf} = '';
@@ -22,7 +22,7 @@ BEGIN {
# tie methods (STORE/FETCH etc) get called different number of times
plan skip_all => "test results assume perl >= 5.8.2"
if $] <= 5.008001;
- plan tests => 51;
+ plan tests => 58;
}
$Data::Dumper::Indent = 1;
@@ -97,13 +97,19 @@ print "dbi_profile\n";
my $t1 = DBI::dbi_time() . "";
my $dummy_statement = "Hi mom";
my $dummy_methname = "my_method_name";
-dbi_profile($dbh, $dummy_statement, $dummy_methname, $t1, $t1 + 1);
+my $leaf = dbi_profile($dbh, $dummy_statement, $dummy_methname, $t1, $t1 + 1);
print Dumper($dbh->{Profile});
cmp_ok(keys %{ $dbh->{Profile}{Data} }, '==', 2);
cmp_ok(keys %{ $dbh->{Profile}{Data}{$dummy_statement} }, '==', 1);
-ok( ref $dbh->{Profile}{Data}{$dummy_statement}{$dummy_methname} );
+is( ref($dbh->{Profile}{Data}{$dummy_statement}{$dummy_methname}), 'ARRAY');
+
+ok $leaf, "should return ref to leaf node";
+is ref $leaf, 'ARRAY', "should return ref to leaf node";
my $mine = $dbh->{Profile}{Data}{$dummy_statement}{$dummy_methname};
+
+is $leaf, $mine, "should return ref to correct leaf node";
+
print "@$mine\n";
is_deeply $mine, [ 1, 1, 1, 1, 1, $t1, $t1 ];
@@ -214,23 +220,41 @@ is_deeply $tmp, bless {
},
'usrnam' => {
'' => {
- 'foo' => { },
+ 'foo' => { },
},
'select name from .' => {
- 'foo' => {
- 'execute' => [ 1, 0, 0, 0, 0, 0, 0 ],
- 'fetchrow_hashref' => [ 1, 0, 0, 0, 0, 0, 0 ],
- 'prepare' => [ 1, 0, 0, 0, 0, 0, 0 ],
- },
- 'bar' => {
- 'DESTROY' => [ 1, 0, 0, 0, 0, 0, 0 ],
- 'finish' => [ 1, 0, 0, 0, 0, 0, 0 ],
- },
+ 'foo' => {
+ 'execute' => [ 1, 0, 0, 0, 0, 0, 0 ],
+ 'fetchrow_hashref' => [ 1, 0, 0, 0, 0, 0, 0 ],
+ 'prepare' => [ 1, 0, 0, 0, 0, 0, 0 ],
+ },
+ 'bar' => {
+ 'DESTROY' => [ 1, 0, 0, 0, 0, 0, 0 ],
+ 'finish' => [ 1, 0, 0, 0, 0, 0, 0 ],
+ },
},
},
},
} => 'DBI::Profile';
+$tmp = [ $dbh->{Profile}->as_node_path_list() ];
+is @$tmp, 9, 'should have 9 nodes';
+sanitize_profile_data_nodes($_->[0]) for @$tmp;
+#warn Dumper($dbh->{Profile}->{Data});
+is_deeply $tmp, [
+ [ [ 3, 0, 0, 0, 0, 0, 0 ], '', '', 'foo', 'STORE' ],
+ [ [ 1, 0, 0, 0, 0, 0, 0 ], 'usrnam', '', 'foo', 'FETCH' ],
+ [ [ 2, 0, 0, 0, 0, 0, 0 ], 'usrnam', '', 'foo', 'STORE' ],
+ [ [ 1, 0, 0, 0, 0, 0, 0 ], 'usrnam', '', 'foo', 'connected' ],
+ [ [ 1, 0, 0, 0, 0, 0, 0 ], 'usrnam', 'select name from .', 'bar', 'DESTROY' ],
+ [ [ 1, 0, 0, 0, 0, 0, 0 ], 'usrnam', 'select name from .', 'bar', 'finish' ],
+ [ [ 1, 0, 0, 0, 0, 0, 0 ], 'usrnam', 'select name from .', 'foo', 'execute' ],
+ [ [ 1, 0, 0, 0, 0, 0, 0 ], 'usrnam', 'select name from .', 'foo', 'fetchrow_hashref' ],
+ [ [ 1, 0, 0, 0, 0, 0, 0 ], 'usrnam', 'select name from .', 'foo', 'prepare' ]
+];
+
+
+print "testing '!File', '!Caller' and their variants in Path\n";
$dbh->{Profile}->{Path} = [ '!File', '!File2', '!Caller', '!Caller2' ];
$dbh->{Profile}->{Data} = undef;
@@ -255,6 +279,26 @@ is_deeply $tmp, {
};
+print "testing '!Time' and variants in Path\n";
+
+undef $sth;
+my $factor = 100_000; # ~27 hours
+$dbh->{Profile}->{Path} = [ '!Time', "!Time~$factor", '!MethodName' ];
+$dbh->{Profile}->{Data} = undef;
+
+$t1 = time()+1; 1 while time() < $t1; # spin till new second starts
+$sth = $dbh->prepare("select name from .");
+$t2 = int($t1/$factor)*$factor;
+
+$tmp = sanitize_profile_data_nodes($dbh->{Profile}{Data});
+#warn Dumper($tmp);
+is_deeply $tmp, {
+ $t1 => { $t2 => { prepare => [ 1, 0, 0, 0, 0, 0, 0 ] }}
+}, "!Time and !Time~$factor should work";
+
+
+print "testing &norm_std_n3 in Path\n";
+
$dbh->{Profile} = '&norm_std_n3'; # assign as string to get magic
is_deeply $dbh->{Profile}{Path}, [
\&DBI::ProfileSubs::norm_std_n3
@@ -266,7 +310,7 @@ $tmp = $dbh->{Profile}{Data};
#warn Dumper($tmp);
is_deeply $tmp, {
'insert into foo<N> (a,b) values (<N>,"<S>")' => [ 1, '2', '2', '2', '2', '100000000', '100000000' ]
-};
+}, '&norm_std_n3 should normalize statement';
# -----------------------------------------------------------------------------------
@@ -285,7 +329,9 @@ sub run_test1 {
$sth->fetchrow_hashref;
$sth->finish;
undef $sth; # DESTROY
- return sanitize_profile_data_nodes($dbh->{Profile}{Data});
+ my $data = sanitize_profile_data_nodes($dbh->{Profile}{Data}, 1);
+ return ($data, $dbh) if wantarray;
+ return $data;
}
$tmp = run_test1( { Path => [ 'foo', sub { 'bar' }, 'baz' ] });
@@ -330,6 +376,24 @@ ok(-s $LOG_FILE, 'output should go to log file');
# -----------------------------------------------------------------------------------
+print "testing as_text\n";
+
+($tmp, $dbh) = run_test1( { Path => [ 'foo', '!MethodName', 'baz' ] });
+my $as_text = $dbh->{Profile}->as_text();
+$as_text =~ s/\.00+/.0/g;
+#warn "[$as_text]";
+is $as_text, q{foo > DESTROY > baz: 0.0s / 1 = 0.0s avg (first 0.0s, min 0.0s, max 0.0s)
+foo > FETCH > baz: 0.0s / 1 = 0.0s avg (first 0.0s, min 0.0s, max 0.0s)
+foo > STORE > baz: 0.0s / 5 = 0.0s avg (first 0.0s, min 0.0s, max 0.0s)
+foo > connected > baz: 0.0s / 1 = 0.0s avg (first 0.0s, min 0.0s, max 0.0s)
+foo > execute > baz: 0.0s / 1 = 0.0s avg (first 0.0s, min 0.0s, max 0.0s)
+foo > fetchrow_hashref > baz: 0.0s / 1 = 0.0s avg (first 0.0s, min 0.0s, max 0.0s)
+foo > finish > baz: 0.0s / 1 = 0.0s avg (first 0.0s, min 0.0s, max 0.0s)
+foo > prepare > baz: 0.0s / 1 = 0.0s avg (first 0.0s, min 0.0s, max 0.0s)
+};
+
+# -----------------------------------------------------------------------------------
+
print "dbi_profile_merge_nodes\n";
my $total_time = dbi_profile_merge_nodes(
my $totals=[],
@@ -355,8 +419,9 @@ exit 0;
sub sanitize_tree {
my $data = shift;
+ my $skip_clone = shift;
return $data unless ref $data;
- $data = dclone($data);
+ $data = dclone($data) unless $skip_clone;
sanitize_profile_data_nodes($data->{Data}) if $data->{Data};
return $data;
}
@@ -14,12 +14,12 @@ use DBI;
use Test::More;
BEGIN {
- if ($DBI::PurePerl) {
- plan skip_all => 'profiling not supported for DBI::PurePerl';
- }
- else {
- plan tests => 15;
- }
+ if ($DBI::PurePerl) {
+ plan skip_all => 'profiling not supported for DBI::PurePerl';
+ }
+ else {
+ plan tests => 16;
+ }
}
BEGIN {
@@ -63,13 +63,15 @@ open(PROF, "dbi.prof") or die $!;
my @prof = <PROF>;
close PROF;
+print @prof;
+
# has a header?
-ok( $prof[0] =~ /^DBI::ProfileDumper\s+([\d.]+)/, 'Found a version number' );
-# Can't use like() because we need $1
+like( $prof[0], '/^DBI::ProfileDumper\s+([\d.]+)/', 'Found a version number' );
# version matches VERSION? (DBI::ProfileDumper uses $self->VERSION so
# it's a stringified version object that looks like N.N.N)
-is( $1, DBI::ProfileDumper->VERSION, 'Version numbers match' );
+$prof[0] =~ /^DBI::ProfileDumper\s+([\d.]+)/;
+is( $1, DBI::ProfileDumper->VERSION, "Version numbers match in $prof[0]" );
like( $prof[1], qr{^Path\s+=\s+\[\s+\]}, 'Found the Path');
ok( $prof[2] =~ m{^Program\s+=\s+(\S+)}, 'Found the Program');
@@ -81,4 +83,14 @@ like(join('', @prof), qr/\+\s+1\s+\Q$sql\E/m);
# unlink("dbi.prof"); # now done by 'make clean'
+# should be able to load DBI::ProfileDumper::Apache outside apache
+# this also naturally checks for syntax errors etc.
+SKIP: {
+ skip "developer-only test", 1
+ unless -d ".svn" && -f "MANIFEST.SKIP";
+ skip "Apache module not installed", 1
+ unless eval { require Apache };
+ require_ok('DBI::ProfileDumper::Apache')
+}
+
1;
@@ -8,12 +8,12 @@ use DBI;
use Test::More;
BEGIN {
- if ($DBI::PurePerl) {
- plan skip_all => 'profiling not supported for DBI::PurePerl';
- }
- else {
- plan tests => 30;
- }
+ if ($DBI::PurePerl) {
+ plan skip_all => 'profiling not supported for DBI::PurePerl';
+ }
+ else {
+ plan tests => 31;
+ }
}
BEGIN {
@@ -46,10 +46,12 @@ undef $dbh;
ok(-s "dbi.prof", "Profile written to disk, non-zero size" );
# load up
-my $prof = DBI::ProfileData->new( Filter => sub {
- my ($path_ref, $data_ref) = @_;
- $path_ref->[0] =~ s/set dummy=\d/set dummy=N/;
-});
+my $prof = DBI::ProfileData->new(
+ Filter => sub {
+ my ($path_ref, $data_ref) = @_;
+ $path_ref->[0] =~ s/set dummy=\d/set dummy=N/;
+ },
+);
isa_ok( $prof, 'DBI::ProfileData' );
cmp_ok( $prof->count, '>=', 3, 'At least 3 profile data items' );
@@ -119,15 +121,15 @@ $dbh->disconnect;
undef $dbh;
# load dbi.prof
-$prof = DBI::ProfileData->new();
+$prof = DBI::ProfileData->new( DeleteFiles => 1 );
isa_ok( $prof, 'DBI::ProfileData' );
+ok(not(-e "dbi.prof"), "file should be deleted when DeleteFiles set" );
+
+
# make sure the keys didn't get garbled
$Data = $prof->Data;
ok(exists $Data->{$sql2});
ok(exists $Data->{$sql3});
-# cleanup
-# unlink("dbi.prof"); # now done by 'make clean'
-
1;
@@ -6,7 +6,7 @@ use strict;
use DBI;
-require Config;
+use Config;
require VMS::Filespec if $^O eq 'VMS';
require Cwd;
@@ -90,7 +90,7 @@ else {
# local $ENV{PERLDB_OPTS} = "AutoTrace NonStop=1 LineInfo=dbiproxy.dbg";
# pass our @INC to children (e.g., so -Mblib passes through)
- $ENV{PERL5LIB} = join(':', @INC);
+ $ENV{PERL5LIB} = join($Config{path_sep}, @INC);
my $dbi_trace_level = DBI->trace(0);
my @child_args = (
@@ -6,6 +6,7 @@ use strict;
use warnings;
use Cwd;
+use Config;
use Data::Dumper;
use Test::More;
@@ -52,7 +53,7 @@ if ($ENV{DBI_AUTOPROXY}) {
}
# ensure subprocess (for pipeone and stream transport) will use the same modules as us, ie ./blib
-local $ENV{PERL5LIB} = join ":", @INC;
+local $ENV{PERL5LIB} = join $Config{path_sep}, @INC;
my $getcwd = getcwd();
@@ -28,6 +28,7 @@ if (my $ap = $ENV{DBI_AUTOPROXY}) { # limit the insanity
plan 'no_plan';
my $tmp;
+my $dbh;
my $fails;
# we'll use the null transport for simplicity and speed
@@ -39,45 +40,43 @@ $SIG{__WARN__} = sub { ("@_" =~ /^DBI_GOFER_RANDOM/) ? push(@warns, @_) : warn @
# --- 100% failure rate
-$ENV{DBI_GOFER_RANDOM} = "fail=100%,do"; # total failure
-my $dbh_100 = DBI->connect("dbi:Gofer:transport=null;policy=rush;dsn=dbi:ExampleP:", 0, 0, {
- RaiseError => 1, PrintError => 0,
-});
-ok $dbh_100;
-
-ok !eval { $dbh_100->do("set foo=1") }, 'do method should fail';
-ok $dbh_100->errstr, 'errstr should be set';
-ok $@, '$@ should be set';
+($fails, $dbh) = trial_impact("fail=100%,do", 10, "", sub { $_->do("set foo=1") });
+is $fails, 100, 'should fail 100% of the time';
+ok $@, '$@ should be set';
like $@, '/fake error induced by DBI_GOFER_RANDOM/';
-like $dbh_100->errstr, '/DBI_GOFER_RANDOM/', 'errstr should contain DBI_GOFER_RANDOM';
-
-ok !$dbh_100->{go_response}->executed_flag_set, 'go_response executed flag should be false';
+ok $dbh->errstr, 'errstr should be set';
+like $dbh->errstr, '/DBI_GOFER_RANDOM/', 'errstr should contain DBI_GOFER_RANDOM';
+ok !$dbh->{go_response}->executed_flag_set, 'go_response executed flag should be false';
-is precentage_exceptions(200, sub { $dbh_100->do("set foo=1") }), 100;
# XXX randomness can't be predicted, so it's just possible these will fail
+srand(42); # try to limit occasional failures (effect will vary by platform etc)
+
+sub trial_impact {
+ my ($spec, $count, $dsn_attr, $code) = @_;
+ local $ENV{DBI_GOFER_RANDOM} = $spec;
+ my $dbh = dbi_connect("policy=rush;$dsn_attr");
+ local $_ = $dbh;
+ my $fail_percent = percentage_exceptions(200, $code);
+ return $fail_percent unless wantarray;
+ return ($fail_percent, $dbh);
+}
# --- 50% failure rate, with no retries
-$ENV{DBI_GOFER_RANDOM} = "fail=50%,do"; # 50% failure (almost)
-ok my $dbh_50r0 = dbi_connect("policy=rush;retry_limit=0");
-$fails = precentage_exceptions(200, sub { $dbh_50r0->do("set foo=1") });
+$fails = trial_impact("fail=50%,do", 200, "retry_limit=0", sub { $_->do("set foo=1") });
print "target approx 50% random failures, got $fails%\n";
between_ok $fails, 10, 90, "should fail about 50% of the time, but at least between 10% and 90%";
# --- 50% failure rate, with many retries (should yield low failure rate)
-$ENV{DBI_GOFER_RANDOM} = "fail=50%,prepare"; # 50% failure (almost)
-ok my $dbh_50r5 = dbi_connect("policy=rush;retry_limit=5");
-$fails = precentage_exceptions(200, sub { $dbh_50r5->prepare("set foo=1") });
-print "target approx 5% random failures, got $fails%\n";
+$fails = trial_impact("fail=50%,prepare", 200, "retry_limit=5", sub { $_->prepare("set foo=1") });
+print "target less than 20% effective random failures (ideally 0), got $fails%\n";
cmp_ok $fails, '<', 20, 'should fail < 20%';
# --- 10% failure rate, with many retries (should yield zero failure rate)
-$ENV{DBI_GOFER_RANDOM} = "fail=10,do"; # without the % this time
-ok my $dbh_1r10 = dbi_connect("policy=rush;retry_limit=10");
-$fails = precentage_exceptions(200, sub { $dbh_1r10->do("set foo=1") });
+$fails = trial_impact("fail=10,do", 200, "retry_limit=10", sub { $_->do("set foo=1") });
cmp_ok $fails, '<', 1, 'should fail < 1%';
# --- 50% failure rate, test is_idempotent
@@ -89,35 +88,53 @@ ok my $dbh_50r1ro = dbi_connect("policy=rush;retry_limit=1", {
go_retry_hook => sub { return ($_[0]->is_idempotent) ? 1 : 0 },
ReadOnly => 1,
} );
-between_ok precentage_exceptions(100, sub { $dbh_50r1ro->do("set foo=1") }),
- 15, 35, 'should fail ~25% (ie 50% with one retry)';
+between_ok percentage_exceptions(100, sub { $dbh_50r1ro->do("set foo=1") }),
+ 10, 40, 'should fail ~25% (ie 50% with one retry)';
between_ok $dbh_50r1ro->{go_transport}->meta->{request_retry_count},
- 35, 65, 'transport request_retry_count should be around 50';
+ 20, 80, 'transport request_retry_count should be around 50';
# test as above but with ReadOnly => 0
ok my $dbh_50r1rw = dbi_connect("policy=rush;retry_limit=1", {
go_retry_hook => sub { return ($_[0]->is_idempotent) ? 1 : 0 },
ReadOnly => 0,
} );
-between_ok precentage_exceptions(100, sub { $dbh_50r1rw->do("set foo=1") }),
- 35, 65, 'should fail ~50%, ie no retries';
+between_ok percentage_exceptions(100, sub { $dbh_50r1rw->do("set foo=1") }),
+ 20, 80, 'should fail ~50%, ie no retries';
ok !$dbh_50r1rw->{go_transport}->meta->{request_retry_count},
'transport request_retry_count should be zero or undef';
+# --- check random is random and non-random is non-random
+
+my %fail_percents;
+for (1..5) {
+ $fails = trial_impact("fail=50%,do", 10, "", sub { $_->do("set foo=1") });
+ ++$fail_percents{$fails};
+}
+cmp_ok scalar keys %fail_percents, '>=', 2, 'positive percentage should fail randomly';
+
+%fail_percents = ();
+for (1..5) {
+ $fails = trial_impact("fail=-50%,do", 10, "", sub { $_->do("set foo=1") });
+ ++$fail_percents{$fails};
+}
+is scalar keys %fail_percents, 1, 'negative percentage should fail non-randomly';
+
# ---
print "Testing random delay\n";
$ENV{DBI_GOFER_RANDOM} = "delay0.1=51%,do"; # odd percentage to force warn()s
@warns = ();
-ok my $dbh = dbi_connect("policy=rush;retry_limit=0");
-is precentage_exceptions(10, sub { $dbh->do("set foo=1") }),
+ok $dbh = dbi_connect("policy=rush;retry_limit=0");
+is percentage_exceptions(20, sub { $dbh->do("set foo=1") }),
0, "should not fail for DBI_GOFER_RANDOM='$ENV{DBI_GOFER_RANDOM}'";
my $delays = grep { m/delaying execution/ } @warns;
-between_ok $delays, 2, 9, 'should be delayed around 5 times';
+between_ok $delays, 1, 19, 'should be delayed around 5 times';
exit 0;
+# --- subs ---
+#
sub between_ok {
my ($got, $min, $max, $label) = @_;
local $Test::Builder::Level = 2;
@@ -132,7 +149,7 @@ sub dbi_connect {
});
}
-sub precentage_exceptions {
+sub percentage_exceptions {
my ($count, $sub) = @_;
my $i = $count;
my $exceptions = 0;