Initial Commit

This commit is contained in:
Riley Schneider
2025-12-03 16:38:10 +01:00
parent c5e26bf594
commit b732d8d4b5
17680 changed files with 5977495 additions and 2 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,107 @@
package Test::Builder::Formatter;
use strict;
use warnings;
our $VERSION = '1.302183';
BEGIN { require Test2::Formatter::TAP; our @ISA = qw(Test2::Formatter::TAP) }
use Test2::Util::HashBase qw/no_header no_diag/;
BEGIN {
*OUT_STD = Test2::Formatter::TAP->can('OUT_STD');
*OUT_ERR = Test2::Formatter::TAP->can('OUT_ERR');
my $todo = OUT_ERR() + 1;
*OUT_TODO = sub() { $todo };
}
sub init {
my $self = shift;
$self->SUPER::init(@_);
$self->{+HANDLES}->[OUT_TODO] = $self->{+HANDLES}->[OUT_STD];
}
sub plan_tap {
my ($self, $f) = @_;
return if $self->{+NO_HEADER};
return $self->SUPER::plan_tap($f);
}
sub debug_tap {
my ($self, $f, $num) = @_;
return if $self->{+NO_DIAG};
my @out = $self->SUPER::debug_tap($f, $num);
$self->redirect(\@out) if @out && ref $f->{about} && defined $f->{about}->{package}
&& $f->{about}->{package} eq 'Test::Builder::TodoDiag';
return @out;
}
sub info_tap {
my ($self, $f) = @_;
return if $self->{+NO_DIAG};
my @out = $self->SUPER::info_tap($f);
$self->redirect(\@out) if @out && ref $f->{about} && defined $f->{about}->{package}
&& $f->{about}->{package} eq 'Test::Builder::TodoDiag';
return @out;
}
sub redirect {
my ($self, $out) = @_;
$_->[0] = OUT_TODO for @$out;
}
sub no_subtest_space { 1 }
1;
__END__
=pod
=encoding UTF-8
=head1 NAME
Test::Builder::Formatter - Test::Builder subclass of Test2::Formatter::TAP
=head1 DESCRIPTION
This is what takes events and turns them into TAP.
=head1 SYNOPSIS
use Test::Builder; # Loads Test::Builder::Formatter for you
=head1 SOURCE
The source code repository for Test2 can be found at
F<http://github.com/Test-More/test-more/>.
=head1 MAINTAINERS
=over 4
=item Chad Granum E<lt>exodist@cpan.orgE<gt>
=back
=head1 AUTHORS
=over 4
=item Chad Granum E<lt>exodist@cpan.orgE<gt>
=back
=head1 COPYRIGHT
Copyright 2020 Chad Granum E<lt>exodist@cpan.orgE<gt>.
This program is free software; you can redistribute it and/or
modify it under the same terms as Perl itself.
See F<http://dev.perl.org/licenses/>
=cut

View File

@@ -0,0 +1,659 @@
package Test::Builder::IO::Scalar;
=head1 NAME
Test::Builder::IO::Scalar - A copy of IO::Scalar for Test::Builder
=head1 DESCRIPTION
This is a copy of L<IO::Scalar> which ships with L<Test::Builder> to
support scalar references as filehandles on Perl 5.6. Newer
versions of Perl simply use C<open()>'s built in support.
L<Test::Builder> can not have dependencies on other modules without
careful consideration, so its simply been copied into the distribution.
=head1 COPYRIGHT and LICENSE
This file came from the "IO-stringy" Perl5 toolkit.
Copyright (c) 1996 by Eryq. All rights reserved.
Copyright (c) 1999,2001 by ZeeGee Software Inc. All rights reserved.
This program is free software; you can redistribute it and/or
modify it under the same terms as Perl itself.
=cut
# This is copied code, I don't care.
##no critic
use Carp;
use strict;
use vars qw($VERSION @ISA);
use IO::Handle;
use 5.005;
### The package version, both in 1.23 style *and* usable by MakeMaker:
$VERSION = "2.114";
### Inheritance:
@ISA = qw(IO::Handle);
#==============================
=head2 Construction
=over 4
=cut
#------------------------------
=item new [ARGS...]
I<Class method.>
Return a new, unattached scalar handle.
If any arguments are given, they're sent to open().
=cut
sub new {
my $proto = shift;
my $class = ref($proto) || $proto;
my $self = bless \do { local *FH }, $class;
tie *$self, $class, $self;
$self->open(@_); ### open on anonymous by default
$self;
}
sub DESTROY {
shift->close;
}
#------------------------------
=item open [SCALARREF]
I<Instance method.>
Open the scalar handle on a new scalar, pointed to by SCALARREF.
If no SCALARREF is given, a "private" scalar is created to hold
the file data.
Returns the self object on success, undefined on error.
=cut
sub open {
my ($self, $sref) = @_;
### Sanity:
defined($sref) or do {my $s = ''; $sref = \$s};
(ref($sref) eq "SCALAR") or croak "open() needs a ref to a scalar";
### Setup:
*$self->{Pos} = 0; ### seek position
*$self->{SR} = $sref; ### scalar reference
$self;
}
#------------------------------
=item opened
I<Instance method.>
Is the scalar handle opened on something?
=cut
sub opened {
*{shift()}->{SR};
}
#------------------------------
=item close
I<Instance method.>
Disassociate the scalar handle from its underlying scalar.
Done automatically on destroy.
=cut
sub close {
my $self = shift;
%{*$self} = ();
1;
}
=back
=cut
#==============================
=head2 Input and output
=over 4
=cut
#------------------------------
=item flush
I<Instance method.>
No-op, provided for OO compatibility.
=cut
sub flush { "0 but true" }
#------------------------------
=item getc
I<Instance method.>
Return the next character, or undef if none remain.
=cut
sub getc {
my $self = shift;
### Return undef right away if at EOF; else, move pos forward:
return undef if $self->eof;
substr(${*$self->{SR}}, *$self->{Pos}++, 1);
}
#------------------------------
=item getline
I<Instance method.>
Return the next line, or undef on end of string.
Can safely be called in an array context.
Currently, lines are delimited by "\n".
=cut
sub getline {
my $self = shift;
### Return undef right away if at EOF:
return undef if $self->eof;
### Get next line:
my $sr = *$self->{SR};
my $i = *$self->{Pos}; ### Start matching at this point.
### Minimal impact implementation!
### We do the fast fast thing (no regexps) if using the
### classic input record separator.
### Case 1: $/ is undef: slurp all...
if (!defined($/)) {
*$self->{Pos} = length $$sr;
return substr($$sr, $i);
}
### Case 2: $/ is "\n": zoom zoom zoom...
elsif ($/ eq "\012") {
### Seek ahead for "\n"... yes, this really is faster than regexps.
my $len = length($$sr);
for (; $i < $len; ++$i) {
last if ord (substr ($$sr, $i, 1)) == 10;
}
### Extract the line:
my $line;
if ($i < $len) { ### We found a "\n":
$line = substr ($$sr, *$self->{Pos}, $i - *$self->{Pos} + 1);
*$self->{Pos} = $i+1; ### Remember where we finished up.
}
else { ### No "\n"; slurp the remainder:
$line = substr ($$sr, *$self->{Pos}, $i - *$self->{Pos});
*$self->{Pos} = $len;
}
return $line;
}
### Case 3: $/ is ref to int. Do fixed-size records.
### (Thanks to Dominique Quatravaux.)
elsif (ref($/)) {
my $len = length($$sr);
my $i = ${$/} + 0;
my $line = substr ($$sr, *$self->{Pos}, $i);
*$self->{Pos} += $i;
*$self->{Pos} = $len if (*$self->{Pos} > $len);
return $line;
}
### Case 4: $/ is either "" (paragraphs) or something weird...
### This is Graham's general-purpose stuff, which might be
### a tad slower than Case 2 for typical data, because
### of the regexps.
else {
pos($$sr) = $i;
### If in paragraph mode, skip leading lines (and update i!):
length($/) or
(($$sr =~ m/\G\n*/g) and ($i = pos($$sr)));
### If we see the separator in the buffer ahead...
if (length($/)
? $$sr =~ m,\Q$/\E,g ### (ordinary sep) TBD: precomp!
: $$sr =~ m,\n\n,g ### (a paragraph)
) {
*$self->{Pos} = pos $$sr;
return substr($$sr, $i, *$self->{Pos}-$i);
}
### Else if no separator remains, just slurp the rest:
else {
*$self->{Pos} = length $$sr;
return substr($$sr, $i);
}
}
}
#------------------------------
=item getlines
I<Instance method.>
Get all remaining lines.
It will croak() if accidentally called in a scalar context.
=cut
sub getlines {
my $self = shift;
wantarray or croak("can't call getlines in scalar context!");
my ($line, @lines);
push @lines, $line while (defined($line = $self->getline));
@lines;
}
#------------------------------
=item print ARGS...
I<Instance method.>
Print ARGS to the underlying scalar.
B<Warning:> this continues to always cause a seek to the end
of the string, but if you perform seek()s and tell()s, it is
still safer to explicitly seek-to-end before subsequent print()s.
=cut
sub print {
my $self = shift;
*$self->{Pos} = length(${*$self->{SR}} .= join('', @_) . (defined($\) ? $\ : ""));
1;
}
sub _unsafe_print {
my $self = shift;
my $append = join('', @_) . $\;
${*$self->{SR}} .= $append;
*$self->{Pos} += length($append);
1;
}
sub _old_print {
my $self = shift;
${*$self->{SR}} .= join('', @_) . $\;
*$self->{Pos} = length(${*$self->{SR}});
1;
}
#------------------------------
=item read BUF, NBYTES, [OFFSET]
I<Instance method.>
Read some bytes from the scalar.
Returns the number of bytes actually read, 0 on end-of-file, undef on error.
=cut
sub read {
my $self = $_[0];
my $n = $_[2];
my $off = $_[3] || 0;
my $read = substr(${*$self->{SR}}, *$self->{Pos}, $n);
$n = length($read);
*$self->{Pos} += $n;
($off ? substr($_[1], $off) : $_[1]) = $read;
return $n;
}
#------------------------------
=item write BUF, NBYTES, [OFFSET]
I<Instance method.>
Write some bytes to the scalar.
=cut
sub write {
my $self = $_[0];
my $n = $_[2];
my $off = $_[3] || 0;
my $data = substr($_[1], $off, $n);
$n = length($data);
$self->print($data);
return $n;
}
#------------------------------
=item sysread BUF, LEN, [OFFSET]
I<Instance method.>
Read some bytes from the scalar.
Returns the number of bytes actually read, 0 on end-of-file, undef on error.
=cut
sub sysread {
my $self = shift;
$self->read(@_);
}
#------------------------------
=item syswrite BUF, NBYTES, [OFFSET]
I<Instance method.>
Write some bytes to the scalar.
=cut
sub syswrite {
my $self = shift;
$self->write(@_);
}
=back
=cut
#==============================
=head2 Seeking/telling and other attributes
=over 4
=cut
#------------------------------
=item autoflush
I<Instance method.>
No-op, provided for OO compatibility.
=cut
sub autoflush {}
#------------------------------
=item binmode
I<Instance method.>
No-op, provided for OO compatibility.
=cut
sub binmode {}
#------------------------------
=item clearerr
I<Instance method.> Clear the error and EOF flags. A no-op.
=cut
sub clearerr { 1 }
#------------------------------
=item eof
I<Instance method.> Are we at end of file?
=cut
sub eof {
my $self = shift;
(*$self->{Pos} >= length(${*$self->{SR}}));
}
#------------------------------
=item seek OFFSET, WHENCE
I<Instance method.> Seek to a given position in the stream.
=cut
sub seek {
my ($self, $pos, $whence) = @_;
my $eofpos = length(${*$self->{SR}});
### Seek:
if ($whence == 0) { *$self->{Pos} = $pos } ### SEEK_SET
elsif ($whence == 1) { *$self->{Pos} += $pos } ### SEEK_CUR
elsif ($whence == 2) { *$self->{Pos} = $eofpos + $pos} ### SEEK_END
else { croak "bad seek whence ($whence)" }
### Fixup:
if (*$self->{Pos} < 0) { *$self->{Pos} = 0 }
if (*$self->{Pos} > $eofpos) { *$self->{Pos} = $eofpos }
return 1;
}
#------------------------------
=item sysseek OFFSET, WHENCE
I<Instance method.> Identical to C<seek OFFSET, WHENCE>, I<q.v.>
=cut
sub sysseek {
my $self = shift;
$self->seek (@_);
}
#------------------------------
=item tell
I<Instance method.>
Return the current position in the stream, as a numeric offset.
=cut
sub tell { *{shift()}->{Pos} }
#------------------------------
=item use_RS [YESNO]
I<Instance method.>
B<Deprecated and ignored.>
Obey the current setting of $/, like IO::Handle does?
Default is false in 1.x, but cold-welded true in 2.x and later.
=cut
sub use_RS {
my ($self, $yesno) = @_;
carp "use_RS is deprecated and ignored; \$/ is always consulted\n";
}
#------------------------------
=item setpos POS
I<Instance method.>
Set the current position, using the opaque value returned by C<getpos()>.
=cut
sub setpos { shift->seek($_[0],0) }
#------------------------------
=item getpos
I<Instance method.>
Return the current position in the string, as an opaque object.
=cut
*getpos = \&tell;
#------------------------------
=item sref
I<Instance method.>
Return a reference to the underlying scalar.
=cut
sub sref { *{shift()}->{SR} }
#------------------------------
# Tied handle methods...
#------------------------------
# Conventional tiehandle interface:
sub TIEHANDLE {
((defined($_[1]) && UNIVERSAL::isa($_[1], __PACKAGE__))
? $_[1]
: shift->new(@_));
}
sub GETC { shift->getc(@_) }
sub PRINT { shift->print(@_) }
sub PRINTF { shift->print(sprintf(shift, @_)) }
sub READ { shift->read(@_) }
sub READLINE { wantarray ? shift->getlines(@_) : shift->getline(@_) }
sub WRITE { shift->write(@_); }
sub CLOSE { shift->close(@_); }
sub SEEK { shift->seek(@_); }
sub TELL { shift->tell(@_); }
sub EOF { shift->eof(@_); }
sub FILENO { -1 }
#------------------------------------------------------------
1;
__END__
=back
=cut
=head1 WARNINGS
Perl's TIEHANDLE spec was incomplete prior to 5.005_57;
it was missing support for C<seek()>, C<tell()>, and C<eof()>.
Attempting to use these functions with an IO::Scalar will not work
prior to 5.005_57. IO::Scalar will not have the relevant methods
invoked; and even worse, this kind of bug can lie dormant for a while.
If you turn warnings on (via C<$^W> or C<perl -w>),
and you see something like this...
attempt to seek on unopened filehandle
...then you are probably trying to use one of these functions
on an IO::Scalar with an old Perl. The remedy is to simply
use the OO version; e.g.:
$SH->seek(0,0); ### GOOD: will work on any 5.005
seek($SH,0,0); ### WARNING: will only work on 5.005_57 and beyond
=head1 VERSION
$Id: Scalar.pm,v 1.6 2005/02/10 21:21:53 dfs Exp $
=head1 AUTHORS
=head2 Primary Maintainer
David F. Skoll (F<dfs@roaringpenguin.com>).
=head2 Principal author
Eryq (F<eryq@zeegee.com>).
President, ZeeGee Software Inc (F<http://www.zeegee.com>).
=head2 Other contributors
The full set of contributors always includes the folks mentioned
in L<IO::Stringy/"CHANGE LOG">. But just the same, special
thanks to the following individuals for their invaluable contributions
(if I've forgotten or misspelled your name, please email me!):
I<Andy Glew,>
for contributing C<getc()>.
I<Brandon Browning,>
for suggesting C<opened()>.
I<David Richter,>
for finding and fixing the bug in C<PRINTF()>.
I<Eric L. Brine,>
for his offset-using read() and write() implementations.
I<Richard Jones,>
for his patches to massively improve the performance of C<getline()>
and add C<sysread> and C<syswrite>.
I<B. K. Oxley (binkley),>
for stringification and inheritance improvements,
and sundry good ideas.
I<Doug Wilson,>
for the IO::Handle inheritance and automatic tie-ing.
=head1 SEE ALSO
L<IO::String>, which is quite similar but which was designed
more-recently and with an IO::Handle-like interface in mind,
so you could mix OO- and native-filehandle usage without using tied().
I<Note:> as of version 2.x, these classes all work like
their IO::Handle counterparts, so we have comparable
functionality to IO::String.
=cut

View File

@@ -0,0 +1,182 @@
package Test::Builder::Module;
use strict;
use Test::Builder;
require Exporter;
our @ISA = qw(Exporter);
our $VERSION = '1.302183';
=head1 NAME
Test::Builder::Module - Base class for test modules
=head1 SYNOPSIS
# Emulates Test::Simple
package Your::Module;
my $CLASS = __PACKAGE__;
use parent 'Test::Builder::Module';
@EXPORT = qw(ok);
sub ok ($;$) {
my $tb = $CLASS->builder;
return $tb->ok(@_);
}
1;
=head1 DESCRIPTION
This is a superclass for L<Test::Builder>-based modules. It provides a
handful of common functionality and a method of getting at the underlying
L<Test::Builder> object.
=head2 Importing
Test::Builder::Module is a subclass of L<Exporter> which means your
module is also a subclass of Exporter. @EXPORT, @EXPORT_OK, etc...
all act normally.
A few methods are provided to do the C<< use Your::Module tests => 23 >> part
for you.
=head3 import
Test::Builder::Module provides an C<import()> method which acts in the
same basic way as L<Test::More>'s, setting the plan and controlling
exporting of functions and variables. This allows your module to set
the plan independent of L<Test::More>.
All arguments passed to C<import()> are passed onto
C<< Your::Module->builder->plan() >> with the exception of
C<< import =>[qw(things to import)] >>.
use Your::Module import => [qw(this that)], tests => 23;
says to import the functions C<this()> and C<that()> as well as set the plan
to be 23 tests.
C<import()> also sets the C<exported_to()> attribute of your builder to be
the caller of the C<import()> function.
Additional behaviors can be added to your C<import()> method by overriding
C<import_extra()>.
=cut
sub import {
my($class) = shift;
Test2::API::test2_load() unless Test2::API::test2_in_preload();
# Don't run all this when loading ourself.
return 1 if $class eq 'Test::Builder::Module';
my $test = $class->builder;
my $caller = caller;
$test->exported_to($caller);
$class->import_extra( \@_ );
my(@imports) = $class->_strip_imports( \@_ );
$test->plan(@_);
local $Exporter::ExportLevel = $Exporter::ExportLevel + 1;
$class->Exporter::import(@imports);
}
sub _strip_imports {
my $class = shift;
my $list = shift;
my @imports = ();
my @other = ();
my $idx = 0;
while( $idx <= $#{$list} ) {
my $item = $list->[$idx];
if( defined $item and $item eq 'import' ) {
push @imports, @{ $list->[ $idx + 1 ] };
$idx++;
}
else {
push @other, $item;
}
$idx++;
}
@$list = @other;
return @imports;
}
=head3 import_extra
Your::Module->import_extra(\@import_args);
C<import_extra()> is called by C<import()>. It provides an opportunity for you
to add behaviors to your module based on its import list.
Any extra arguments which shouldn't be passed on to C<plan()> should be
stripped off by this method.
See L<Test::More> for an example of its use.
B<NOTE> This mechanism is I<VERY ALPHA AND LIKELY TO CHANGE> as it
feels like a bit of an ugly hack in its current form.
=cut
sub import_extra { }
=head2 Builder
Test::Builder::Module provides some methods of getting at the underlying
Test::Builder object.
=head3 builder
my $builder = Your::Class->builder;
This method returns the L<Test::Builder> object associated with Your::Class.
It is not a constructor so you can call it as often as you like.
This is the preferred way to get the L<Test::Builder> object. You should
I<not> get it via C<< Test::Builder->new >> as was previously
recommended.
The object returned by C<builder()> may change at runtime so you should
call C<builder()> inside each function rather than store it in a global.
sub ok {
my $builder = Your::Class->builder;
return $builder->ok(@_);
}
=cut
sub builder {
return Test::Builder->new;
}
=head1 SEE ALSO
L<< Test2::Manual::Tooling::TestBuilder >> describes the improved
options for writing testing modules provided by L<< Test2 >>.
=cut
1;

View File

@@ -0,0 +1,675 @@
package Test::Builder::Tester;
use strict;
our $VERSION = '1.302183';
use Test::Builder;
use Symbol;
use Carp;
=head1 NAME
Test::Builder::Tester - test testsuites that have been built with
Test::Builder
=head1 SYNOPSIS
use Test::Builder::Tester tests => 1;
use Test::More;
test_out("not ok 1 - foo");
test_fail(+1);
fail("foo");
test_test("fail works");
=head1 DESCRIPTION
A module that helps you test testing modules that are built with
L<Test::Builder>.
The testing system is designed to be used by performing a three step
process for each test you wish to test. This process starts with using
C<test_out> and C<test_err> in advance to declare what the testsuite you
are testing will output with L<Test::Builder> to stdout and stderr.
You then can run the test(s) from your test suite that call
L<Test::Builder>. At this point the output of L<Test::Builder> is
safely captured by L<Test::Builder::Tester> rather than being
interpreted as real test output.
The final stage is to call C<test_test> that will simply compare what you
predeclared to what L<Test::Builder> actually outputted, and report the
results back with a "ok" or "not ok" (with debugging) to the normal
output.
=cut
####
# set up testing
####
my $t = Test::Builder->new;
###
# make us an exporter
###
use Exporter;
our @ISA = qw(Exporter);
our @EXPORT = qw(test_out test_err test_fail test_diag test_test line_num);
sub import {
my $class = shift;
my(@plan) = @_;
my $caller = caller;
$t->exported_to($caller);
$t->plan(@plan);
my @imports = ();
foreach my $idx ( 0 .. $#plan ) {
if( $plan[$idx] eq 'import' ) {
@imports = @{ $plan[ $idx + 1 ] };
last;
}
}
__PACKAGE__->export_to_level( 1, __PACKAGE__, @imports );
}
###
# set up file handles
###
# create some private file handles
my $output_handle = gensym;
my $error_handle = gensym;
# and tie them to this package
my $out = tie *$output_handle, "Test::Builder::Tester::Tie", "STDOUT";
my $err = tie *$error_handle, "Test::Builder::Tester::Tie", "STDERR";
####
# exported functions
####
# for remembering that we're testing and where we're testing at
my $testing = 0;
my $testing_num;
my $original_is_passing;
# remembering where the file handles were originally connected
my $original_output_handle;
my $original_failure_handle;
my $original_todo_handle;
my $original_formatter;
my $original_harness_env;
# function that starts testing and redirects the filehandles for now
sub _start_testing {
# Hack for things that conditioned on Test-Stream being loaded
$INC{'Test/Stream.pm'} ||= 'fake' if $INC{'Test/Moose/More.pm'};
# even if we're running under Test::Harness pretend we're not
# for now. This needed so Test::Builder doesn't add extra spaces
$original_harness_env = $ENV{HARNESS_ACTIVE} || 0;
$ENV{HARNESS_ACTIVE} = 0;
my $hub = $t->{Hub} || ($t->{Stack} ? $t->{Stack}->top : Test2::API::test2_stack->top);
$original_formatter = $hub->format;
unless ($original_formatter && $original_formatter->isa('Test::Builder::Formatter')) {
my $fmt = Test::Builder::Formatter->new;
$hub->format($fmt);
}
# remember what the handles were set to
$original_output_handle = $t->output();
$original_failure_handle = $t->failure_output();
$original_todo_handle = $t->todo_output();
# switch out to our own handles
$t->output($output_handle);
$t->failure_output($error_handle);
$t->todo_output($output_handle);
# clear the expected list
$out->reset();
$err->reset();
# remember that we're testing
$testing = 1;
$testing_num = $t->current_test;
$t->current_test(0);
$original_is_passing = $t->is_passing;
$t->is_passing(1);
# look, we shouldn't do the ending stuff
$t->no_ending(1);
}
=head2 Functions
These are the six methods that are exported as default.
=over 4
=item test_out
=item test_err
Procedures for predeclaring the output that your test suite is
expected to produce until C<test_test> is called. These procedures
automatically assume that each line terminates with "\n". So
test_out("ok 1","ok 2");
is the same as
test_out("ok 1\nok 2");
which is even the same as
test_out("ok 1");
test_out("ok 2");
Once C<test_out> or C<test_err> (or C<test_fail> or C<test_diag>) have
been called, all further output from L<Test::Builder> will be
captured by L<Test::Builder::Tester>. This means that you will not
be able perform further tests to the normal output in the normal way
until you call C<test_test> (well, unless you manually meddle with the
output filehandles)
=cut
sub test_out {
# do we need to do any setup?
_start_testing() unless $testing;
$out->expect(@_);
}
sub test_err {
# do we need to do any setup?
_start_testing() unless $testing;
$err->expect(@_);
}
=item test_fail
Because the standard failure message that L<Test::Builder> produces
whenever a test fails will be a common occurrence in your test error
output, and because it has changed between Test::Builder versions, rather
than forcing you to call C<test_err> with the string all the time like
so
test_err("# Failed test ($0 at line ".line_num(+1).")");
C<test_fail> exists as a convenience function that can be called
instead. It takes one argument, the offset from the current line that
the line that causes the fail is on.
test_fail(+1);
This means that the example in the synopsis could be rewritten
more simply as:
test_out("not ok 1 - foo");
test_fail(+1);
fail("foo");
test_test("fail works");
=cut
sub test_fail {
# do we need to do any setup?
_start_testing() unless $testing;
# work out what line we should be on
my( $package, $filename, $line ) = caller;
$line = $line + ( shift() || 0 ); # prevent warnings
# expect that on stderr
$err->expect("# Failed test ($filename at line $line)");
}
=item test_diag
As most of the remaining expected output to the error stream will be
created by L<Test::Builder>'s C<diag> function, L<Test::Builder::Tester>
provides a convenience function C<test_diag> that you can use instead of
C<test_err>.
The C<test_diag> function prepends comment hashes and spacing to the
start and newlines to the end of the expected output passed to it and
adds it to the list of expected error output. So, instead of writing
test_err("# Couldn't open file");
you can write
test_diag("Couldn't open file");
Remember that L<Test::Builder>'s diag function will not add newlines to
the end of output and test_diag will. So to check
Test::Builder->new->diag("foo\n","bar\n");
You would do
test_diag("foo","bar")
without the newlines.
=cut
sub test_diag {
# do we need to do any setup?
_start_testing() unless $testing;
# expect the same thing, but prepended with "# "
local $_;
$err->expect( map { "# $_" } @_ );
}
=item test_test
Actually performs the output check testing the tests, comparing the
data (with C<eq>) that we have captured from L<Test::Builder> against
what was declared with C<test_out> and C<test_err>.
This takes name/value pairs that effect how the test is run.
=over
=item title (synonym 'name', 'label')
The name of the test that will be displayed after the C<ok> or C<not
ok>.
=item skip_out
Setting this to a true value will cause the test to ignore if the
output sent by the test to the output stream does not match that
declared with C<test_out>.
=item skip_err
Setting this to a true value will cause the test to ignore if the
output sent by the test to the error stream does not match that
declared with C<test_err>.
=back
As a convenience, if only one argument is passed then this argument
is assumed to be the name of the test (as in the above examples.)
Once C<test_test> has been run test output will be redirected back to
the original filehandles that L<Test::Builder> was connected to
(probably STDOUT and STDERR,) meaning any further tests you run
will function normally and cause success/errors for L<Test::Harness>.
=cut
sub test_test {
# END the hack
delete $INC{'Test/Stream.pm'} if $INC{'Test/Stream.pm'} && $INC{'Test/Stream.pm'} eq 'fake';
# decode the arguments as described in the pod
my $mess;
my %args;
if( @_ == 1 ) {
$mess = shift
}
else {
%args = @_;
$mess = $args{name} if exists( $args{name} );
$mess = $args{title} if exists( $args{title} );
$mess = $args{label} if exists( $args{label} );
}
# er, are we testing?
croak "Not testing. You must declare output with a test function first."
unless $testing;
my $hub = $t->{Hub} || Test2::API::test2_stack->top;
$hub->format($original_formatter);
# okay, reconnect the test suite back to the saved handles
$t->output($original_output_handle);
$t->failure_output($original_failure_handle);
$t->todo_output($original_todo_handle);
# restore the test no, etc, back to the original point
$t->current_test($testing_num);
$testing = 0;
$t->is_passing($original_is_passing);
# re-enable the original setting of the harness
$ENV{HARNESS_ACTIVE} = $original_harness_env;
# check the output we've stashed
unless( $t->ok( ( $args{skip_out} || $out->check ) &&
( $args{skip_err} || $err->check ), $mess )
)
{
# print out the diagnostic information about why this
# test failed
local $_;
$t->diag( map { "$_\n" } $out->complaint )
unless $args{skip_out} || $out->check;
$t->diag( map { "$_\n" } $err->complaint )
unless $args{skip_err} || $err->check;
}
}
=item line_num
A utility function that returns the line number that the function was
called on. You can pass it an offset which will be added to the
result. This is very useful for working out the correct text of
diagnostic functions that contain line numbers.
Essentially this is the same as the C<__LINE__> macro, but the
C<line_num(+3)> idiom is arguably nicer.
=cut
sub line_num {
my( $package, $filename, $line ) = caller;
return $line + ( shift() || 0 ); # prevent warnings
}
=back
In addition to the six exported functions there exists one
function that can only be accessed with a fully qualified function
call.
=over 4
=item color
When C<test_test> is called and the output that your tests generate
does not match that which you declared, C<test_test> will print out
debug information showing the two conflicting versions. As this
output itself is debug information it can be confusing which part of
the output is from C<test_test> and which was the original output from
your original tests. Also, it may be hard to spot things like
extraneous whitespace at the end of lines that may cause your test to
fail even though the output looks similar.
To assist you C<test_test> can colour the background of the debug
information to disambiguate the different types of output. The debug
output will have its background coloured green and red. The green
part represents the text which is the same between the executed and
actual output, the red shows which part differs.
The C<color> function determines if colouring should occur or not.
Passing it a true or false value will enable or disable colouring
respectively, and the function called with no argument will return the
current setting.
To enable colouring from the command line, you can use the
L<Text::Builder::Tester::Color> module like so:
perl -Mlib=Text::Builder::Tester::Color test.t
Or by including the L<Test::Builder::Tester::Color> module directly in
the PERL5LIB.
=cut
my $color;
sub color {
$color = shift if @_;
$color;
}
=back
=head1 BUGS
Test::Builder::Tester does not handle plans well. It has never done anything
special with plans. This means that plans from outside Test::Builder::Tester
will effect Test::Builder::Tester, worse plans when using Test::Builder::Tester
will effect overall testing. At this point there are no plans to fix this bug
as people have come to depend on it, and Test::Builder::Tester is now
discouraged in favor of C<Test2::API::intercept()>. See
L<https://github.com/Test-More/test-more/issues/667>
Calls C<< Test::Builder->no_ending >> turning off the ending tests.
This is needed as otherwise it will trip out because we've run more
tests than we strictly should have and it'll register any failures we
had that we were testing for as real failures.
The color function doesn't work unless L<Term::ANSIColor> is
compatible with your terminal. Additionally, L<Win32::Console::ANSI>
must be installed on windows platforms for color output.
Bugs (and requests for new features) can be reported to the author
though GitHub:
L<https://github.com/Test-More/test-more/issues>
=head1 AUTHOR
Copyright Mark Fowler E<lt>mark@twoshortplanks.comE<gt> 2002, 2004.
Some code taken from L<Test::More> and L<Test::Catch>, written by
Michael G Schwern E<lt>schwern@pobox.comE<gt>. Hence, those parts
Copyright Micheal G Schwern 2001. Used and distributed with
permission.
This program is free software; you can redistribute it
and/or modify it under the same terms as Perl itself.
=head1 MAINTAINERS
=over 4
=item Chad Granum E<lt>exodist@cpan.orgE<gt>
=back
=head1 NOTES
Thanks to Richard Clamp E<lt>richardc@unixbeard.netE<gt> for letting
me use his testing system to try this module out on.
=head1 SEE ALSO
L<Test::Builder>, L<Test::Builder::Tester::Color>, L<Test::More>.
=cut
1;
####################################################################
# Helper class that is used to remember expected and received data
package Test::Builder::Tester::Tie;
##
# add line(s) to be expected
sub expect {
my $self = shift;
my @checks = @_;
foreach my $check (@checks) {
$check = $self->_account_for_subtest($check);
$check = $self->_translate_Failed_check($check);
push @{ $self->{wanted} }, ref $check ? $check : "$check\n";
}
}
sub _account_for_subtest {
my( $self, $check ) = @_;
my $hub = $t->{Stack}->top;
my $nesting = $hub->isa('Test2::Hub::Subtest') ? $hub->nested : 0;
return ref($check) ? $check : (' ' x $nesting) . $check;
}
sub _translate_Failed_check {
my( $self, $check ) = @_;
if( $check =~ /\A(.*)# (Failed .*test) \((.*?) at line (\d+)\)\Z(?!\n)/ ) {
$check = "/\Q$1\E#\\s+\Q$2\E.*?\\n?.*?\Qat $3\E line \Q$4\E.*\\n?/";
}
return $check;
}
##
# return true iff the expected data matches the got data
sub check {
my $self = shift;
# turn off warnings as these might be undef
local $^W = 0;
my @checks = @{ $self->{wanted} };
my $got = $self->{got};
foreach my $check (@checks) {
$check = "\Q$check\E" unless( $check =~ s,^/(.*)/$,$1, or ref $check );
return 0 unless $got =~ s/^$check//;
}
return length $got == 0;
}
##
# a complaint message about the inputs not matching (to be
# used for debugging messages)
sub complaint {
my $self = shift;
my $type = $self->type;
my $got = $self->got;
my $wanted = join '', @{ $self->wanted };
# are we running in colour mode?
if(Test::Builder::Tester::color) {
# get color
eval { require Term::ANSIColor };
unless($@) {
eval { require Win32::Console::ANSI } if 'MSWin32' eq $^O; # support color on windows platforms
# colours
my $green = Term::ANSIColor::color("black") . Term::ANSIColor::color("on_green");
my $red = Term::ANSIColor::color("black") . Term::ANSIColor::color("on_red");
my $reset = Term::ANSIColor::color("reset");
# work out where the two strings start to differ
my $char = 0;
$char++ while substr( $got, $char, 1 ) eq substr( $wanted, $char, 1 );
# get the start string and the two end strings
my $start = $green . substr( $wanted, 0, $char );
my $gotend = $red . substr( $got, $char ) . $reset;
my $wantedend = $red . substr( $wanted, $char ) . $reset;
# make the start turn green on and off
$start =~ s/\n/$reset\n$green/g;
# make the ends turn red on and off
$gotend =~ s/\n/$reset\n$red/g;
$wantedend =~ s/\n/$reset\n$red/g;
# rebuild the strings
$got = $start . $gotend;
$wanted = $start . $wantedend;
}
}
my @got = split "\n", $got;
my @wanted = split "\n", $wanted;
$got = "";
$wanted = "";
while (@got || @wanted) {
my $g = shift @got || "";
my $w = shift @wanted || "";
if ($g ne $w) {
if($g =~ s/(\s+)$/ |> /g) {
$g .= ($_ eq ' ' ? '_' : '\t') for split '', $1;
}
if($w =~ s/(\s+)$/ |> /g) {
$w .= ($_ eq ' ' ? '_' : '\t') for split '', $1;
}
$g = "> $g";
$w = "> $w";
}
else {
$g = " $g";
$w = " $w";
}
$got = $got ? "$got\n$g" : $g;
$wanted = $wanted ? "$wanted\n$w" : $w;
}
return "$type is:\n" . "$got\nnot:\n$wanted\nas expected";
}
##
# forget all expected and got data
sub reset {
my $self = shift;
%$self = (
type => $self->{type},
got => '',
wanted => [],
);
}
sub got {
my $self = shift;
return $self->{got};
}
sub wanted {
my $self = shift;
return $self->{wanted};
}
sub type {
my $self = shift;
return $self->{type};
}
###
# tie interface
###
sub PRINT {
my $self = shift;
$self->{got} .= join '', @_;
}
sub TIEHANDLE {
my( $class, $type ) = @_;
my $self = bless { type => $type }, $class;
$self->reset;
return $self;
}
sub READ { }
sub READLINE { }
sub GETC { }
sub FILENO { }
1;

View File

@@ -0,0 +1,51 @@
package Test::Builder::Tester::Color;
use strict;
our $VERSION = '1.302183';
require Test::Builder::Tester;
=head1 NAME
Test::Builder::Tester::Color - turn on colour in Test::Builder::Tester
=head1 SYNOPSIS
When running a test script
perl -MTest::Builder::Tester::Color test.t
=head1 DESCRIPTION
Importing this module causes the subroutine color in Test::Builder::Tester
to be called with a true value causing colour highlighting to be turned
on in debug output.
The sole purpose of this module is to enable colour highlighting
from the command line.
=cut
sub import {
Test::Builder::Tester::color(1);
}
=head1 AUTHOR
Copyright Mark Fowler E<lt>mark@twoshortplanks.comE<gt> 2002.
This program is free software; you can redistribute it
and/or modify it under the same terms as Perl itself.
=head1 BUGS
This module will have no effect unless Term::ANSIColor is installed.
=head1 SEE ALSO
L<Test::Builder::Tester>, L<Term::ANSIColor>
=cut
1;

View File

@@ -0,0 +1,68 @@
package Test::Builder::TodoDiag;
use strict;
use warnings;
our $VERSION = '1.302183';
BEGIN { require Test2::Event::Diag; our @ISA = qw(Test2::Event::Diag) }
sub diagnostics { 0 }
sub facet_data {
my $self = shift;
my $out = $self->SUPER::facet_data();
$out->{info}->[0]->{debug} = 0;
return $out;
}
1;
__END__
=pod
=encoding UTF-8
=head1 NAME
Test::Builder::TodoDiag - Test::Builder subclass of Test2::Event::Diag
=head1 DESCRIPTION
This is used to encapsulate diag messages created inside TODO.
=head1 SYNOPSIS
You do not need to use this directly.
=head1 SOURCE
The source code repository for Test2 can be found at
F<http://github.com/Test-More/test-more/>.
=head1 MAINTAINERS
=over 4
=item Chad Granum E<lt>exodist@cpan.orgE<gt>
=back
=head1 AUTHORS
=over 4
=item Chad Granum E<lt>exodist@cpan.orgE<gt>
=back
=head1 COPYRIGHT
Copyright 2020 Chad Granum E<lt>exodist@cpan.orgE<gt>.
This program is free software; you can redistribute it and/or
modify it under the same terms as Perl itself.
See F<http://dev.perl.org/licenses/>
=cut

View File

@@ -0,0 +1,211 @@
package Test::CPAN::Meta;
use warnings;
use strict;
use vars qw($VERSION);
$VERSION = '0.25';
#----------------------------------------------------------------------------
=head1 NAME
Test::CPAN::Meta - Validate your CPAN META.yml files.
=head1 SYNOPSIS
There are two forms this module can be used.
The first is a standalone test of your distribution's META.yml file:
use Test::More;
eval "use Test::CPAN::Meta";
plan skip_all => "Test::CPAN::Meta required for testing META.yml" if $@;
meta_yaml_ok();
Note that you may provide an optional label/comment/message/etc to the
function, or one will be created automatically.
The second form allows you to test other META.yml files, or specify a specific
version you wish to test against:
use Test::More tests => 6;
use Test::CPAN::Meta;
# specify a file and specification version
meta_spec_ok('META.yml','1.3',$msg);
# specify the specification version to validate the local META.yml
meta_spec_ok(undef,'1.3',$msg);
# specify a file, where the specification version is deduced
# from the file itself
meta_spec_ok('META.yml',undef,$msg);
Note that this form requires you to specify the number of tests you will be
running in your test script. Also note that each 'meta_spec_ok' is actually 2
tests under the hood.
=head1 DESCRIPTION
This distribution was written to ensure that a META.yml file, provided with a
standard distribution uploaded to CPAN, meets the specifications that are
slowly being introduced to module uploads, via the use of package makers and
installers such as L<ExtUtils::MakeMaker>, L<Module::Build> and
L<Module::Install>.
See L<CPAN::Meta> for further details of the CPAN Meta Specification.
=cut
#----------------------------------------------------------------------------
#############################################################################
#Library Modules #
#############################################################################
use Parse::CPAN::Meta;
use Test::Builder;
use Test::CPAN::Meta::Version;
#----------------------------------------------------------------------------
my $Test = Test::Builder->new();
sub import {
my $self = shift;
my $caller = caller;
no strict 'refs';
*{$caller.'::meta_yaml_ok'} = \&meta_yaml_ok;
*{$caller.'::meta_spec_ok'} = \&meta_spec_ok;
$Test->exported_to($caller);
$Test->plan(@_);
}
#############################################################################
#Interface Functions #
#############################################################################
=head1 FUNCTIONS
=over
=item * meta_yaml_ok([$msg])
Basic META.yml wrapper around meta_spec_ok.
Returns a hash reference to the contents of the parsed META.yml
=cut
sub meta_yaml_ok {
$Test->plan( tests => 2 );
return meta_spec_ok(undef,undef,@_);
}
=item * meta_spec_ok($file, $version [,$msg])
Validates the named file against the given specification version. Both $file
and $version can be undefined.
Returns a hash reference to the contents of the given file, after it has been
parsed.
=back
=cut
sub meta_spec_ok {
my ($file, $vers, $msg) = @_;
$file ||= 'META.yml';
unless($msg) {
$msg = "$file meets the designated specification";
$msg .= " ($vers)" if($vers);
}
my ($data) = eval { Parse::CPAN::Meta::LoadFile($file) };
if($@) {
$Test->ok(0,"$file contains valid YAML");
$Test->ok(0,$msg);
$Test->diag(" ERR: $@");
return;
} else {
$Test->ok(1,"$file contains valid YAML");
}
my %hash;
$hash{spec} = $vers if($vers);
$hash{data} = $data;
my $spec = Test::CPAN::Meta::Version->new(%hash);
if(my $result = $spec->parse()) {
$Test->ok(0,$msg);
$Test->diag(" ERR: $_") for($spec->errors);
} else {
$Test->ok(1,$msg);
}
return $data;
}
q( "Before software can be reusable it first has to be usable." - Ralph Johnson );
__END__
#----------------------------------------------------------------------------
=head1 TESTING META FILES
There are currently 3 distributions to test META files:
Test-CPAN-Meta
Test-CPAN-Meta-JSON
Test-CPAN-Meta-YAML
All three have slightly different requirements and are intended to be used in
slightly different environments.
Test-CPAN-Meta-YAML requires a YAML parser, and currently looks for the YAML or
YAML::Syck modules. This is the original variant of the 3 and was intended to
provide a more complete YAML validation of a META.yml.
Test-CPAN-Meta requires the Parse::CPAN::Meta module, which is now part of Perl
Core as of perl-5.10.1. This version is intended to be used by those only
wishing to rely on core modules to test their META.yml files.
Test-CPAN-Meta-JSON is the most recent addition to the family, and is specifically
aimed at those distributions that use a META.json Meta file. The distribution
requires the JSON module to parse the Meta file.
=head1 BUGS, PATCHES & FIXES
There are no known bugs at the time of this release. However, if you spot a
bug or are experiencing difficulties that are not explained within the POD
documentation, please send an email to barbie@cpan.org or submit a bug to the
RT system (http://rt.cpan.org/Public/Dist/Display.html?Name=Test-CPAN-Meta).
However, it would help greatly if you are able to pinpoint problems or even
supply a patch.
Fixes are dependent upon their severity and my availability. Should a fix not
be forthcoming, please feel free to (politely) remind me.
=head1 SEE ALSO
Test::YAML::Valid
=head1 AUTHOR
Barbie, <barbie@cpan.org>
for Miss Barbell Productions, L<http://www.missbarbell.co.uk>
=head1 COPYRIGHT AND LICENSE
Copyright (C) 2007-2015 Barbie for Miss Barbell Productions
This distribution is free software; you can redistribute it and/or
modify it under the Artistic Licence v2.
=cut

View File

@@ -0,0 +1,792 @@
package Test::CPAN::Meta::Version;
use warnings;
use strict;
use vars qw($VERSION);
$VERSION = '0.25';
#----------------------------------------------------------------------------
=head1 NAME
Test::CPAN::Meta::Version - Validate CPAN META data against the specification
=head1 SYNOPSIS
use Test::CPAN::Meta::Version;
=head1 DESCRIPTION
This distribution was written to ensure that a META.yml file, provided with a
standard distribution uploaded to CPAN, meets the specifications that are
slowly being introduced to module uploads, via the use of package makers and
installers such as L<ExtUtils::MakeMaker>, L<Module::Build> and
L<Module::Install>.
This module is meant to be used together with L<Test::CPAN::Meta>, however
the code is self contained enough that you can access it directly.
See L<CPAN::Meta> for further details of the CPAN Meta Specification.
=cut
#----------------------------------------------------------------------------
#############################################################################
#Specification Definitions #
#############################################################################
my %known_specs = (
'1.4' => 'http://module-build.sourceforge.net/META-spec-v1.4.html',
'1.3' => 'http://module-build.sourceforge.net/META-spec-v1.3.html',
'1.2' => 'http://module-build.sourceforge.net/META-spec-v1.2.html',
'1.1' => 'http://module-build.sourceforge.net/META-spec-v1.1.html',
'1.0' => 'http://module-build.sourceforge.net/META-spec-v1.0.html'
);
my %known_urls = map {$known_specs{$_} => $_} keys %known_specs;
my $module_map1 = { 'map' => { ':key' => { name => \&module, value => \&exversion } } };
my $module_map2 = { 'map' => { ':key' => { name => \&module, value => \&version } } };
my $no_index_1_3 = {
'map' => { file => { list => { value => \&string } },
directory => { list => { value => \&string } },
'package' => { list => { value => \&string } },
namespace => { list => { value => \&string } },
}
};
my $no_index_1_2 = {
'map' => { file => { list => { value => \&string } },
dir => { list => { value => \&string } },
'package' => { list => { value => \&string } },
namespace => { list => { value => \&string } },
}
};
my $no_index_1_1 = {
'map' => { ':key' => { name => \&keyword, list => { value => \&string } },
}
};
my %definitions = (
'1.4' => {
# 'header' => { mandatory => 1, value => \&header },
'meta-spec' => { mandatory => 1, 'map' => { version => { mandatory => 1, value => \&version},
url => { mandatory => 1, value => \&urlspec } } },
'name' => { mandatory => 1, value => \&string },
'version' => { mandatory => 1, value => \&version },
'abstract' => { mandatory => 1, value => \&string },
'author' => { mandatory => 1, list => { value => \&string } },
'license' => { mandatory => 1, value => \&license },
'generated_by' => { mandatory => 1, value => \&string },
'distribution_type' => { value => \&string },
'dynamic_config' => { value => \&boolean },
'requires' => $module_map1,
'recommends' => $module_map1,
'build_requires' => $module_map1,
'configure_requires' => $module_map1,
'conflicts' => $module_map2,
'optional_features' => {
'map' => {
':key' => { name => \&identifier,
'map' => { description => { value => \&string },
requires_packages => { value => \&string },
requires_os => { value => \&string },
excludes_os => { value => \&string },
requires => $module_map1,
recommends => $module_map1,
build_requires => $module_map1,
conflicts => $module_map2,
}
}
}
},
'provides' => {
'map' => { ':key' => { name => \&module,
'map' => { file => { mandatory => 1, value => \&file },
version => { value => \&version } } } }
},
'no_index' => $no_index_1_3,
'private' => $no_index_1_3,
'keywords' => { list => { value => \&string } },
'resources' => {
'map' => { license => { value => \&url },
homepage => { value => \&url },
bugtracker => { value => \&url },
repository => { value => \&url },
':key' => { value => \&string, name => \&resource },
}
},
# additional user defined key/value pairs
# note we can only validate the key name, as the structure is user defined
':key' => { name => \&keyword, value => \&anything },
},
'1.3' => {
# 'header' => { mandatory => 1, value => \&header },
'meta-spec' => { mandatory => 1, 'map' => { version => { mandatory => 1, value => \&version},
url => { mandatory => 1, value => \&urlspec } } },
'name' => { mandatory => 1, value => \&string },
'version' => { mandatory => 1, value => \&version },
'abstract' => { mandatory => 1, value => \&string },
'author' => { mandatory => 1, list => { value => \&string } },
'license' => { mandatory => 1, value => \&license },
'generated_by' => { mandatory => 1, value => \&string },
'distribution_type' => { value => \&string },
'dynamic_config' => { value => \&boolean },
'requires' => $module_map1,
'recommends' => $module_map1,
'build_requires' => $module_map1,
'conflicts' => $module_map2,
'optional_features' => {
'map' => {
':key' => { name => \&identifier,
'map' => { description => { value => \&string },
requires_packages => { value => \&string },
requires_os => { value => \&string },
excludes_os => { value => \&string },
requires => $module_map1,
recommends => $module_map1,
build_requires => $module_map1,
conflicts => $module_map2,
}
}
}
},
'provides' => {
'map' => { ':key' => { name => \&module,
'map' => { file => { mandatory => 1, value => \&file },
version => { value => \&version } } } }
},
'no_index' => $no_index_1_3,
'private' => $no_index_1_3,
'keywords' => { list => { value => \&string } },
'resources' => {
'map' => { license => { value => \&url },
homepage => { value => \&url },
bugtracker => { value => \&url },
repository => { value => \&url },
':key' => { value => \&string, name => \&resource },
}
},
# additional user defined key/value pairs
# note we can only validate the key name, as the structure is user defined
':key' => { name => \&keyword, value => \&anything },
},
# v1.2 is misleading, it seems to assume that a number of fields where created
# within v1.1, when they were created within v1.2. This may have been an
# original mistake, and that a v1.1 was retro fitted into the timeline, when
# v1.2 was originally slated as v1.1. But I could be wrong ;)
'1.2' => {
# 'header' => { mandatory => 1, value => \&header },
'meta-spec' => { mandatory => 1, 'map' => { version => { mandatory => 1, value => \&version},
url => { mandatory => 1, value => \&urlspec } } },
'name' => { mandatory => 1, value => \&string },
'version' => { mandatory => 1, value => \&version },
'license' => { mandatory => 1, value => \&license },
'generated_by' => { mandatory => 1, value => \&string },
'author' => { mandatory => 1, list => { value => \&string } },
'abstract' => { mandatory => 1, value => \&string },
'distribution_type' => { value => \&string },
'dynamic_config' => { value => \&boolean },
'keywords' => { list => { value => \&string } },
'private' => $no_index_1_2,
'$no_index' => $no_index_1_2,
'requires' => $module_map1,
'recommends' => $module_map1,
'build_requires' => $module_map1,
'conflicts' => $module_map2,
'optional_features' => {
'map' => {
':key' => { name => \&identifier,
'map' => { description => { value => \&string },
requires_packages => { value => \&string },
requires_os => { value => \&string },
excludes_os => { value => \&string },
requires => $module_map1,
recommends => $module_map1,
build_requires => $module_map1,
conflicts => $module_map2,
}
}
}
},
'provides' => {
'map' => { ':key' => { name => \&module,
'map' => { file => { mandatory => 1, value => \&file },
version => { value => \&version } } } }
},
'resources' => {
'map' => { license => { value => \&url },
homepage => { value => \&url },
bugtracker => { value => \&url },
repository => { value => \&url },
':key' => { value => \&string, name => \&resource },
}
},
# additional user defined key/value pairs
# note we can only validate the key name, as the structure is user defined
':key' => { name => \&keyword, value => \&anything },
},
# note that the 1.1 spec doesn't specify optional or mandatory fields, what
# appears below is assumed from later specifications.
'1.1' => {
# 'header' => { mandatory => 1, value => \&header },
'name' => { mandatory => 1, value => \&string },
'version' => { mandatory => 1, value => \&version },
'license' => { mandatory => 1, value => \&license },
'license_uri' => { mandatory => 0, value => \&url },
'generated_by' => { mandatory => 1, value => \&string },
'distribution_type' => { value => \&string },
'dynamic_config' => { value => \&boolean },
'private' => $no_index_1_1,
'requires' => $module_map1,
'recommends' => $module_map1,
'build_requires' => $module_map1,
'conflicts' => $module_map2,
# additional user defined key/value pairs
# note we can only validate the key name, as the structure is user defined
':key' => { name => \&keyword, value => \&anything },
},
# note that the 1.0 spec doesn't specify optional or mandatory fields, what
# appears below is assumed from later specifications.
'1.0' => {
# 'header' => { mandatory => 1, value => \&header },
'name' => { mandatory => 1, value => \&string },
'version' => { mandatory => 1, value => \&version },
'license' => { mandatory => 1, value => \&license },
'generated_by' => { mandatory => 1, value => \&string },
'distribution_type' => { value => \&string },
'dynamic_config' => { value => \&boolean },
'requires' => $module_map1,
'recommends' => $module_map1,
'build_requires' => $module_map1,
'conflicts' => $module_map2,
# additional user defined key/value pairs
# note we can only validate the key name, as the structure is user defined
':key' => { name => \&keyword, value => \&anything },
},
);
#############################################################################
#Code #
#############################################################################
=head1 CLASS CONSTRUCTOR
=over
=item * new( data => $data [, spec => $version] )
The constructor must be passed a valid data structure.
Optionally you may also provide a specification version. This version is then
use to ensure that the given data structure meets the respective
specification definition. If no version is provided the module will attempt to
deduce the appropriate specification version from the data structure itself.
=back
=cut
sub new {
my ($class,%hash) = @_;
# create an attributes hash
my $atts = {
'spec' => $hash{spec},
'data' => $hash{data},
};
# create the object
my $self = bless $atts, $class;
}
=head1 METHODS
=head2 Main Methods
=over
=item * parse()
Using the given data structure provided with the constructor, attempts to
parse and validate according to the appropriate specification definition.
Returns 1 if any errors found, otherwise returns 0.
=item * errors()
Returns a list of the errors found during parsing.
=back
=cut
sub parse {
my $self = shift;
my $data = $self->{data};
unless($self->{spec}) {
$self->{spec} = $data->{'meta-spec'} && $data->{'meta-spec'}{'version'} ? $data->{'meta-spec'}{'version'} : '1.0';
}
$self->check_map($definitions{$self->{spec}},$data);
return defined $self->{errors} ? 1 : 0;
}
sub errors {
my $self = shift;
return () unless($self->{errors});
return @{$self->{errors}};
}
=head2 Check Methods
=over
=item * check_map($spec,$data)
Checks whether a map (or hash) part of the data structure conforms to the
appropriate specification definition.
=item * check_list($spec,$data)
Checks whether a list (or array) part of the data structure conforms to
the appropriate specification definition.
=back
=cut
sub check_map {
my ($self,$spec,$data) = @_;
if(ref($spec) ne 'HASH') {
$self->_error( "Unknown META.yml specification, cannot validate." );
return;
}
if(ref($data) ne 'HASH') {
$self->_error( "Expected a map structure from data string or file." );
return;
}
for my $key (keys %$spec) {
next unless($spec->{$key}->{mandatory});
next if(defined $data->{$key});
push @{$self->{stack}}, $key;
$self->_error( "Missing mandatory field, '$key'" );
pop @{$self->{stack}};
}
for my $key (keys %$data) {
push @{$self->{stack}}, $key;
if($spec->{$key}) {
if($spec->{$key}{value}) {
$spec->{$key}{value}->($self,$key,$data->{$key});
} elsif($spec->{$key}{'map'}) {
$self->check_map($spec->{$key}{'map'},$data->{$key});
} elsif($spec->{$key}{'list'}) {
$self->check_list($spec->{$key}{'list'},$data->{$key});
}
} elsif ($spec->{':key'}) {
$spec->{':key'}{name}->($self,$key,$key);
if($spec->{':key'}{value}) {
$spec->{':key'}{value}->($self,$key,$data->{$key});
} elsif($spec->{':key'}{'map'}) {
$self->check_map($spec->{':key'}{'map'},$data->{$key});
} elsif($spec->{':key'}{'list'}) {
$self->check_list($spec->{':key'}{'list'},$data->{$key});
}
} else {
$self->_error( "Unknown key, '$key', found in map structure" );
}
pop @{$self->{stack}};
}
}
sub check_list {
my ($self,$spec,$data) = @_;
if(ref($data) ne 'ARRAY') {
$self->_error( "Expected a list structure" );
return;
}
if(defined $spec->{mandatory}) {
if(!defined $data->[0]) {
$self->_error( "Missing entries from mandatory list" );
}
}
for my $value (@$data) {
push @{$self->{stack}}, $value;
if(defined $spec->{value}) {
$spec->{value}->($self,'list',$value);
} elsif(defined $spec->{'map'}) {
$self->check_map($spec->{'map'},$value);
} elsif(defined $spec->{'list'}) {
$self->check_list($spec->{'list'},$value);
} elsif ($spec->{':key'}) {
$self->check_map($spec,$value);
} else {
$self->_error( "Unknown value type, '$value', found in list structure" );
}
pop @{$self->{stack}};
}
}
=head2 Validator Methods
=over
=item * header($self,$key,$value)
Validates that the YAML header is valid.
Note: No longer used as we now read the YAML data structure, not the file.
=item * url($self,$key,$value)
Validates that a given value is in an acceptable URL format
=item * urlspec($self,$key,$value)
Validates that the URL to a META.yml specification is a known one.
=item * string_or_undef($self,$key,$value)
Validates that the value is either a string or an undef value. Bit of a
catchall function for parts of the data structure that are completely user
defined.
=item * string($self,$key,$value)
Validates that a string exists for the given key.
=item * file($self,$key,$value)
Validate that a file is passed for the given key. This may be made more
thorough in the future. For now it acts like \&string.
=item * exversion($self,$key,$value)
Validates a list of versions, e.g. '<= 5, >=2, ==3, !=4, >1, <6, 0'.
=item * version($self,$key,$value)
Validates a single version string. Versions of the type '5.8.8' and '0.00_00'
are both valid. A leading 'v' like 'v1.2.3' is also valid.
=item * boolean($self,$key,$value)
Validates for a boolean value. Currently these values are '1', '0', 'true',
'false', however the latter 2 may be removed.
=item * license($self,$key,$value)
Validates that a value is given for the license. Returns 1 if an known license
type, or 2 if a value is given but the license type is not a recommended one.
=item * resource($self,$key,$value)
Validates that the given key is in CamelCase, to indicate a user defined
keyword.
=item * keyword($self,$key,$value)
Validates that key is in an acceptable format for the META.yml specification,
i.e. any in the character class [-_a-z].
For user defined keys, although not explicitly stated in the specifications
(v1.0 - v1.4), the convention is to precede the key with a pattern matching
qr{\Ax_}i. Following this any character from the character class [-_a-zA-Z]
can be used. This clarification has been added to v2.0 of the specification.
=item * identifier($self,$key,$value)
Validates that key is in an acceptable format for the META.yml specification,
for an identifier, i.e. any that matches the regular expression
qr/[a-z][a-z_]/i.
=item * module($self,$key,$value)
Validates that a given key is in an acceptable module name format, e.g.
'Test::CPAN::Meta::Version'.
=item * anything($self,$key,$value)
Usually reserved for user defined structures, allowing them to be considered
valid without a need for a specification definition for the structure.
=back
=cut
sub header {
my ($self,$key,$value) = @_;
if(defined $value) {
return 1 if($value && $value =~ /^--- #YAML:1.0/);
}
$self->_error( "file does not have a valid YAML header." );
return 0;
}
sub _uri_split {
return $_[0] =~ m,(?:([^:/?#]+):)?(?://([^/?#]*))?([^?#]*)(?:\?([^#]*))?(?:#(.*))?,;
}
sub url {
my ($self,$key,$value) = @_;
if($value) {
my ($scheme, $auth, $path, $query, $frag) = _uri_split($value);
unless ( $scheme ) {
$self->_error( "'$value' for '$key' does not have a URL scheme" );
return 0;
}
unless ( $auth ) {
$self->_error( "'$value' for '$key' does not have a URL authority" );
return 0;
}
return 1;
} else {
$value = '<undef>';
}
$self->_error( "'$value' for '$key' is not a valid URL." );
return 0;
}
sub urlspec {
my ($self,$key,$value) = @_;
if(defined $value) {
return 1 if($value && $known_specs{$self->{spec}} eq $value);
if($value && $known_urls{$value}) {
$self->_error( 'META.yml specification URL does not match version' );
return 0;
}
}
$self->_error( 'Unknown META.yml specification' );
return 0;
}
sub string {
my ($self,$key,$value) = @_;
if(defined $value) {
return 1 if($value || $value =~ /^0$/);
}
$self->_error( "value is an undefined string" );
return 0;
}
sub string_or_undef {
my ($self,$key,$value) = @_;
return 1 unless(defined $value);
return 1 if($value || $value =~ /^0$/);
$self->_error( "No string defined for '$key'" );
return 0;
}
sub file {
my ($self,$key,$value) = @_;
return 1 if(defined $value);
$self->_error( "No file defined for '$key'" );
return 0;
}
sub exversion {
my ($self,$key,$value) = @_;
if(defined $value && ($value || $value =~ /0/)) {
my $pass = 1;
for(split(",",$value)) { $self->version($key,$_) or ($pass = 0); }
return $pass;
}
$value = '<undef>' unless(defined $value);
$self->_error( "'$value' for '$key' is not a valid version." );
return 0;
}
sub version {
my ($self,$key,$value) = @_;
if(defined $value) {
return 0 unless($value || $value =~ /0/);
return 1 if($value =~ /^\s*((<|<=|>=|>|!=|==)\s*)?v?\d+((\.\d+((_|\.)\d+)?)?)/);
} else {
$value = '<undef>';
}
$self->_error( "'$value' for '$key' is not a valid version." );
return 0;
}
sub boolean {
my ($self,$key,$value) = @_;
if(defined $value) {
return 1 if($value =~ /^(0|1|true|false)$/);
} else {
$value = '<undef>';
}
$self->_error( "'$value' for '$key' is not a boolean value." );
return 0;
}
my %licenses = (
'perl' => 'http://dev.perl.org/licenses/',
'gpl' => 'http://www.opensource.org/licenses/gpl-license.php',
'apache' => 'http://apache.org/licenses/LICENSE-2.0',
'artistic' => 'http://opensource.org/licenses/artistic-license.php',
'artistic2' => 'http://opensource.org/licenses/artistic-license-2.0.php',
'artistic-2.0' => 'http://opensource.org/licenses/artistic-license-2.0.php',
'lgpl' => 'http://www.opensource.org/licenses/lgpl-license.phpt',
'bsd' => 'http://www.opensource.org/licenses/bsd-license.php',
'gpl' => 'http://www.opensource.org/licenses/gpl-license.php',
'mit' => 'http://opensource.org/licenses/mit-license.php',
'mozilla' => 'http://opensource.org/licenses/mozilla1.1.php',
'open_source' => undef,
'unrestricted' => undef,
'restrictive' => undef,
'unknown' => undef,
);
sub license {
my ($self,$key,$value) = @_;
if(defined $value) {
return 1 if($value && exists $licenses{$value});
return 2 if($value);
} else {
$value = '<undef>';
}
$self->_error( "License '$value' is unknown" );
return 0;
}
sub resource {
my ($self,$key) = @_;
if(defined $key) {
# a valid user defined key should be alphabetic
# and contain at least one capital case letter.
return 1 if($key && $key =~ /^[a-z]+$/i && $key =~ /[A-Z]/);
} else {
$key = '<undef>';
}
$self->_error( "Resource '$key' must be in CamelCase." );
return 0;
}
sub keyword {
my ($self,$key) = @_;
if(defined $key) {
return 1 if($key && $key =~ /^([a-z][-_a-z]*)$/); # spec defined
return 1 if($key && $key =~ /^x_([a-z][-_a-z]*)$/i); # user defined
} else {
$key = '<undef>';
}
$self->_error( "Key '$key' is not a legal keyword." );
return 0;
}
sub identifier {
my ($self,$key) = @_;
if(defined $key) {
return 1 if($key && $key =~ /^([a-z][_a-z]+)$/i); # spec 2.0 defined
} else {
$key = '<undef>';
}
$self->_error( "Key '$key' is not a legal identifier." );
return 0;
}
sub module {
my ($self,$key) = @_;
if(defined $key) {
return 1 if($key && $key =~ /^[A-Za-z0-9_]+(::[A-Za-z0-9_]+)*$/);
} else {
$key = '<undef>';
}
$self->_error( "Key '$key' is not a legal module name." );
return 0;
}
sub anything { return 1 }
sub _error {
my $self = shift;
my $mess = shift;
$mess .= ' ('.join(' -> ',@{$self->{stack}}).')' if($self->{stack});
$mess .= " [Validation: $self->{spec}]";
push @{$self->{errors}}, $mess;
}
q( "Before software can be reusable it first has to be usable." - Ralph Johnson );
__END__
#----------------------------------------------------------------------------
=head1 BUGS, PATCHES & FIXES
There are no known bugs at the time of this release. However, if you spot a
bug or are experiencing difficulties that are not explained within the POD
documentation, please send an email to barbie@cpan.org or submit a bug to the
RT system (http://rt.cpan.org/Public/Dist/Display.html?Name=Test-CPAN-Meta).
However, it would help greatly if you are able to pinpoint problems or even
supply a patch.
Fixes are dependent upon their severity and my availability. Should a fix not
be forthcoming, please feel free to (politely) remind me.
=head1 AUTHOR
Barbie, <barbie@cpan.org>
for Miss Barbell Productions, L<http://www.missbarbell.co.uk>
=head1 COPYRIGHT AND LICENSE
Copyright (C) 2007-2015 Barbie for Miss Barbell Productions
This distribution is free software; you can redistribute it and/or
modify it under the Artistic Licence v2.
=cut

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,54 @@
use strict;
use warnings;
package Test::Deep::All;
use Scalar::Util ();
use Test::Deep::Cmp;
sub init
{
my $self = shift;
my @list = map {
(Scalar::Util::blessed($_) && $_->isa('Test::Deep::All'))
? @{ $_->{val} }
: $_
} @_;
$self->{val} = \@list;
}
sub descend
{
my $self = shift;
my $got = shift;
my $data = $self->data;
my $index = 1;
foreach my $cmp (@{$self->{val}})
{
$data->{index} = $index;
$index++;
next if Test::Deep::descend($got, $cmp);
return 0
}
return 1;
}
sub render_stack
{
my $self = shift;
my $var = shift;
my $data = shift;
my $max = @{$self->{val}};
return "(Part $data->{index} of $max in $var)";
}
1;

View File

@@ -0,0 +1,63 @@
use strict;
use warnings;
package Test::Deep::Any;
use Scalar::Util ();
use Test::Deep::Cmp;
sub init
{
my $self = shift;
my @list = map {
(Scalar::Util::blessed($_) && $_->isa('Test::Deep::Any'))
? @{ $_->{val} }
: $_
} @_;
$self->{val} = \@list;
}
sub descend
{
my $self = shift;
my $got = shift;
foreach my $cmp (@{$self->{val}})
{
return 1 if Test::Deep::eq_deeply_cache($got, $cmp);
}
return 0;
}
sub renderExp
{
my $self = shift;
my @expect = map {; Test::Deep::wrap($_) } @{ $self->{val} };
my $things = join(", ", map {$_->renderExp} @expect);
return "Any of ( $things )";
}
sub diagnostics
{
my $self = shift;
my ($where, $last) = @_;
my $got = $self->renderGot($last->{got});
my $exp = $self->renderExp;
my $diag = <<EOM;
Comparing $where with Any
got : $got
expected : $exp
EOM
$diag =~ s/\n+$/\n/;
return $diag;
}
4;

View File

@@ -0,0 +1,36 @@
use strict;
use warnings;
package Test::Deep::Array;
use Test::Deep::Ref;
sub init
{
my $self = shift;
my $val = shift;
$self->{val} = $val;
}
sub descend
{
my $self = shift;
my $got = shift;
my $exp = $self->{val};
return 0 unless Test::Deep::descend($got, Test::Deep::arraylength(scalar @$exp));
return 0 unless $self->test_class($got);
return Test::Deep::descend($got, Test::Deep::arrayelementsonly($exp));
}
sub reset_arrow
{
return 0;
}
1;

View File

@@ -0,0 +1,37 @@
use strict;
use warnings;
package Test::Deep::ArrayEach;
use Test::Deep::Cmp;
use Scalar::Util ();
sub init
{
my $self = shift;
my $val = shift;
$self->{val} = $val;
}
sub descend
{
my $self = shift;
my $got = shift;
return unless ref $got && Scalar::Util::reftype($got) eq 'ARRAY';
my $exp = [ ($self->{val}) x @$got ];
return Test::Deep::descend($got, $exp);
}
sub renderExp
{
my $self = shift;
my $exp = shift;
return '[ ' . $self->SUPER::renderExp($self->{val}) . ', ... ]';
}
1;

View File

@@ -0,0 +1,54 @@
use strict;
use warnings;
package Test::Deep::ArrayElementsOnly;
use Test::Deep::Ref;
sub init
{
my $self = shift;
my $val = shift;
$self->{val} = $val;
}
sub descend
{
my $self = shift;
my $got = shift;
my $exp = $self->{val};
my $data = $self->data;
for my $i (0..$#{$exp})
{
$data->{index} = $i;
my $got_elem = $got->[$i];
my $exp_elem = $exp->[$i];
return 0 unless Test::Deep::descend($got_elem, $exp_elem)
}
return 1;
}
sub render_stack
{
my $self = shift;
my ($var, $data) = @_;
$var .= "->" unless $Test::Deep::Stack->incArrow;
$var .= "[$data->{index}]";
return $var;
}
sub reset_arrow
{
return 0;
}
1;

View File

@@ -0,0 +1,29 @@
use strict;
use warnings;
package Test::Deep::ArrayLength;
use Test::Deep::Ref;
sub init
{
my $self = shift;
my $val = shift;
$self->{val} = $val;
}
sub descend
{
my $self = shift;
my $got = shift;
my $exp = $self->{val};
return 0 unless $self->test_reftype($got, "ARRAY");
return Test::Deep::descend($got, Test::Deep::arraylengthonly($exp));
}
1;

View File

@@ -0,0 +1,60 @@
use strict;
use warnings;
package Test::Deep::ArrayLengthOnly;
use Test::Deep::Ref;
sub init
{
my $self = shift;
my $val = shift;
$self->{val} = $val;
}
sub descend
{
my $self = shift;
my $got = shift;
my $len = $self->{val};
return @$got == $len;
}
sub render_stack
{
my $self = shift;
my ($var, $data) = @_;
return "array length of $var";
}
sub renderVal
{
my $self = shift;
my $val = shift;
return "array with $val element(s)"
}
sub renderGot
{
my $self = shift;
my $got = shift;
return $self->renderVal(@$got + 0);
}
sub renderExp
{
my $self = shift;
return $self->renderVal($self->{val});
}
1;

View File

@@ -0,0 +1,47 @@
use strict;
use warnings;
package Test::Deep::Blessed;
use Test::Deep::Cmp;
use Scalar::Util qw( blessed );
sub init
{
my $self = shift;
my $val = shift;
$self->{val} = $val;
}
sub descend
{
my $self = shift;
my $got = shift;
my $exp = $self->{val};
my $blessed = blessed($got);
return Test::Deep::descend($blessed, Test::Deep::shallow($exp));
}
sub render_stack
{
my $self = shift;
my $var = shift;
return "blessed($var)"
}
sub renderGot
{
my $self = shift;
my $got = shift;
$self->SUPER::renderGot(blessed($got));
}
1;

View File

@@ -0,0 +1,46 @@
use strict;
use warnings;
package Test::Deep::Boolean;
use Test::Deep::Cmp;
sub init
{
my $self = shift;
$self->{val} = shift() ? 1 : 0;
}
sub descend
{
my $self = shift;
my $got = shift;
return !( $got xor $self->{val} );
}
sub diag_message
{
my $self = shift;
my $where = shift;
return "Comparing $where as a boolean";
}
sub renderExp
{
my $self = shift;
$self->renderGot($self->{val});
}
sub renderGot
{
my $self = shift;
my $val = shift;
return ($val ? "true" : "false")." (".Test::Deep::render_val($val).")";
}
1;

View File

@@ -0,0 +1,78 @@
use strict;
use warnings;
package Test::Deep::Cache;
use Test::Deep::Cache::Simple;
sub new
{
my $pkg = shift;
my $self = bless {}, $pkg;
$self->{expects} = [Test::Deep::Cache::Simple->new];
$self->{normal} = [Test::Deep::Cache::Simple->new];
$self->local;
return $self;
}
sub add
{
my $self = shift;
my $type = $self->type;
$self->{$type}->[-1]->add(@_);
}
sub cmp
{
# go through all the caches to see if we know this one
my $self = shift;
my $type = $self->type;
foreach my $cache (@{$self->{$type}})
{
return 1 if $cache->cmp(@_);
}
return 0
}
sub local
{
my $self = shift;
foreach my $type (qw( expects normal ))
{
push(@{$self->{$type}}, Test::Deep::Cache::Simple->new);
}
}
sub finish
{
my $self = shift;
my $keep = shift;
foreach my $type (qw( expects normal ))
{
my $caches = $self->{$type};
my $last = pop @$caches;
$caches->[-1]->absorb($last) if $keep;
}
}
sub type
{
return $Test::Deep::Expects ? "expects" : "normal";
}
1;

View File

@@ -0,0 +1,83 @@
use strict;
use warnings;
package Test::Deep::Cache::Simple;
use Carp qw( confess );
use Scalar::Util qw( refaddr );
BEGIN
{
if (grep /^weaken$/, @Scalar::Util::EXPORT_FAIL)
{
# we're running on a version of perl that has no weak refs, so we
# just install a no-op sub for weaken instead of importing it
*weaken = sub {};
}
else
{
Scalar::Util->import('weaken');
}
}
sub new
{
my $pkg = shift;
my $self = bless {}, $pkg;
return $self;
}
sub add
{
my $self = shift;
my ($d1, $d2) = @_;
{
local $SIG{__DIE__};
local $@;
# cannot weaken read only refs, no harm if we can't as they never
# disappear
eval{weaken($d1)};
eval{weaken($d2)};
}
$self->{fn_get_key(@_)} = [$d1, $d2];
}
sub cmp
{
my $self = shift;
my $key = fn_get_key(@_);
my $pair = $self->{$key};
# are both weakened refs still valid, if not delete this entry
if (ref($pair->[0]) and ref($pair->[1]))
{
return 1;
}
else
{
delete $self->{$key};
return 0;
}
}
sub absorb
{
my $self = shift;
my $other = shift;
@{$self}{keys %$other} = values %$other;
}
sub fn_get_key
{
return join(",", sort (map {refaddr($_)} @_));
}
1;

View File

@@ -0,0 +1,29 @@
use strict;
use warnings;
package Test::Deep::Class;
use Test::Deep::Cmp;
sub init
{
my $self = shift;
my $snobby = shift;
my $val = shift;
$self->{snobby} = $snobby;
$self->{val} = $val;
}
sub descend
{
my $self = shift;
my $got = shift;
local $Test::Deep::Snobby = $self->{snobby};
Test::Deep::wrap($self->{val})->descend($got);
}
1;

View File

@@ -0,0 +1,106 @@
use strict;
use warnings;
package Test::Deep::Cmp;
use overload
'&' => \&make_all,
'|' => \&make_any,
'""' => \&string,
fallback => 1,
;
use Scalar::Util ();
sub import
{
my $pkg = shift;
my $callpkg = caller();
if ($callpkg =~ /^Test::Deep::/)
{
no strict 'refs';
push @{$callpkg."::ISA"}, $pkg;
}
}
sub new
{
my $pkg = shift;
my $self = bless {}, $pkg;
$self->init(@_);
return $self;
}
sub init
{
}
sub make_all
{
my ($e1, $e2) = @_;
return Test::Deep::all($e1, $e2);
}
sub make_any
{
my ($e1, $e2) = @_;
return Test::Deep::any($e1, $e2);
}
sub cmp
{
my ($a1, $a2, $rev) = @_;
($a1, $a2) = ($a2, $a1) if $rev;
return (overload::StrVal($a1) cmp overload::StrVal($a2));
}
sub string
{
my $self = shift;
return overload::StrVal($self);
}
sub render_stack
{
my $self = shift;
my $var = shift;
return $var;
}
sub renderExp
{
my $self = shift;
return $self->renderGot($self->{val});
}
sub renderGot
{
my $self = shift;
return Test::Deep::render_val(@_);
}
sub reset_arrow
{
return 1;
}
sub data
{
my $self = shift;
return $Test::Deep::Stack->getLast;
}
1;

View File

@@ -0,0 +1,58 @@
use strict;
use warnings;
package Test::Deep::Code;
use Test::Deep::Cmp;
sub init
{
my $self = shift;
my $code = shift || die "No coderef supplied";
$self->{code} = $code;
}
sub descend
{
my $self = shift;
my $got = shift;
my ($ok, $diag) = &{$self->{code}}($got);
$self->data->{diag} = $diag;
return $ok;
}
sub diagnostics
{
my $self = shift;
my ($where, $last) = @_;
my $error = $last->{diag};
my $data = Test::Deep::render_val($last->{got});
my $diag = <<EOM;
Ran coderef at $where on
$data
EOM
if (defined($error))
{
$diag .= <<EOM;
and it said
$error
EOM
}
else
{
$diag .= <<EOM;
it failed but it didn't say why.
EOM
}
return $diag;
}
1;

View File

@@ -0,0 +1,104 @@
use strict;
use warnings;
package Test::Deep::Hash;
use Test::Deep::Ref;
sub init
{
my $self = shift;
my $val = shift;
$self->{val} = $val;
}
sub descend
{
my $self = shift;
my $got = shift;
my $exp = $self->{val};
my $data = $self->data;
return 0 unless Test::Deep::descend($got, $self->hash_keys($exp));
return 0 unless $self->test_class($got);
return Test::Deep::descend($got, $self->hash_elements($exp));
}
sub hash_elements
{
require Test::Deep::HashElements;
my $self = shift;
return Test::Deep::HashElements->new(@_);
}
sub hash_keys
{
require Test::Deep::HashKeys;
my $self = shift;
my $exp = shift;
return Test::Deep::HashKeys->new(keys %$exp);
}
sub reset_arrow
{
return 0;
}
package Test::Deep::SuperHash;
use base 'Test::Deep::Hash';
sub hash_elements
{
require Test::Deep::HashElements;
my $self = shift;
return Test::Deep::SuperHashElements->new(@_);
}
sub hash_keys
{
require Test::Deep::HashKeys;
my $self = shift;
my $exp = shift;
return Test::Deep::SuperHashKeys->new(keys %$exp);
}
package Test::Deep::SubHash;
use base 'Test::Deep::Hash';
sub hash_elements
{
require Test::Deep::HashElements;
my $self = shift;
return Test::Deep::SubHashElements->new(@_);
}
sub hash_keys
{
require Test::Deep::HashKeys;
my $self = shift;
my $exp = shift;
return Test::Deep::SubHashKeys->new(keys %$exp);
}
1;

View File

@@ -0,0 +1,29 @@
use strict;
use warnings;
package Test::Deep::HashEach;
use Test::Deep::Cmp;
sub init
{
my $self = shift;
my $val = shift;
$self->{val} = $val;
}
sub descend
{
my $self = shift;
my $got = shift;
my %exp;
@exp{keys %$got} = ($self->{val}) x (keys %$got);
return Test::Deep::descend($got, \%exp);
}
1;

View File

@@ -0,0 +1,94 @@
use strict;
use warnings;
package Test::Deep::HashElements;
use Test::Deep::Ref;
sub init
{
my $self = shift;
my $val = shift;
$self->{val} = $val;
}
sub descend
{
my $self = shift;
my $got = shift;
my $exp = $self->{val};
my $data = $self->data;
my $master = $self->getMaster($got, $exp);
foreach my $key (keys %$master)
{
$data->{index} = $key;
my $got_elem = exists $got->{$key} ? $got->{$key} : $Test::Deep::DNE;
my $exp_elem = exists $exp->{$key} ? $exp->{$key} : $Test::Deep::DNE;
next if Test::Deep::descend($got_elem, $exp_elem);
return 0;
}
return 1;
}
sub getMaster
{
my $self = shift;
my ($got, $exp) = @_;
return keys %$got > keys %$exp ? $got : $exp;
}
sub render_stack
{
my $self = shift;
my ($var, $data) = @_;
$var .= "->" unless $Test::Deep::Stack->incArrow;
$var .= '{"'.quotemeta($data->{index}).'"}';
return $var;
}
sub reset_arrow
{
return 0;
}
package Test::Deep::SuperHashElements;
use base 'Test::Deep::HashElements';
sub getMaster
{
my $self = shift;
my ($got, $exp) = @_;
return $exp;
}
package Test::Deep::SubHashElements;
use base 'Test::Deep::HashElements';
sub getMaster
{
my $self = shift;
my ($got, $exp) = @_;
return $got;
}
1;

View File

@@ -0,0 +1,68 @@
use strict;
use warnings;
package Test::Deep::HashKeys;
use Test::Deep::Ref;
sub init
{
my $self = shift;
my %keys;
@keys{@_} = ();
$self->{val} = \%keys;
$self->{keys} = [sort @_];
}
sub descend
{
my $self = shift;
my $got = shift;
my $exp = $self->{val};
return 0 unless $self->test_reftype($got, "HASH");
return Test::Deep::descend($got, $self->hashkeysonly($exp));
}
sub hashkeysonly
{
require Test::Deep::HashKeysOnly;
my $self = shift;
my $exp = shift;
return Test::Deep::HashKeysOnly->new(keys %$exp)
}
package Test::Deep::SuperHashKeys;
use base 'Test::Deep::HashKeys';
sub hashkeysonly
{
require Test::Deep::HashKeysOnly;
my $self = shift;
my $exp = shift;
return Test::Deep::SuperHashKeysOnly->new(keys %$exp)
}
package Test::Deep::SubHashKeys;
use base 'Test::Deep::HashKeys';
sub hashkeysonly
{
require Test::Deep::HashKeysOnly;
my $self = shift;
my $exp = shift;
return Test::Deep::SubHashKeysOnly->new(keys %$exp)
}
1;

View File

@@ -0,0 +1,126 @@
use strict;
use warnings;
package Test::Deep::HashKeysOnly;
use Test::Deep::Ref;
sub init
{
my $self = shift;
my %keys;
@keys{@_} = ();
$self->{val} = \%keys;
$self->{keys} = [sort @_];
}
sub descend
{
my $self = shift;
my $hash = shift;
my $data = $self->data;
my $exp = $self->{val};
my %got;
@got{keys %$hash} = ();
my @missing;
my @extra;
while (my ($key, $value) = each %$exp)
{
if (exists $got{$key})
{
delete $got{$key};
}
else
{
push(@missing, $key);
}
}
my @diags;
if (@missing and (not $self->ignoreMissing))
{
push(@diags, "Missing: ".nice_list(\@missing));
}
if (%got and (not $self->ignoreExtra))
{
push(@diags, "Extra: ".nice_list([keys %got]));
}
if (@diags)
{
$data->{diag} = join("\n", @diags);
return 0;
}
return 1;
}
sub diagnostics
{
my $self = shift;
my ($where, $last) = @_;
my $type = $self->{IgnoreDupes} ? "Set" : "Bag";
my $error = $last->{diag};
my $diag = <<EOM;
Comparing hash keys of $where
$error
EOM
return $diag;
}
sub nice_list
{
my $list = shift;
return join(", ",
(map {"'$_'"} sort @$list),
);
}
sub ignoreMissing
{
return 0;
}
sub ignoreExtra
{
return 0;
}
package Test::Deep::SuperHashKeysOnly;
use base 'Test::Deep::HashKeysOnly';
sub ignoreMissing
{
return 0;
}
sub ignoreExtra
{
return 1;
}
package Test::Deep::SubHashKeysOnly;
use base 'Test::Deep::HashKeysOnly';
sub ignoreMissing
{
return 1;
}
sub ignoreExtra
{
return 0;
}
1;

View File

@@ -0,0 +1,20 @@
use strict;
use warnings;
package Test::Deep::Ignore;
use Test::Deep::Cmp;
my $Singleton = __PACKAGE__->SUPER::new;
sub new
{
return $Singleton;
}
sub descend
{
return 1;
}
1;

View File

@@ -0,0 +1,43 @@
use strict;
use warnings;
package Test::Deep::Isa;
use Test::Deep::Cmp;
use Scalar::Util;
sub init
{
my $self = shift;
my $val = shift;
$self->{val} = $val;
}
sub descend
{
my $self = shift;
my $got = shift;
return Scalar::Util::blessed($got)
? $got->isa($self->{val})
: ref($got) eq $self->{val};
}
sub diag_message
{
my $self = shift;
my $where = shift;
return "Checking class of $where with isa()";
}
sub renderExp
{
my $self = shift;
return "blessed into or ref of type '$self->{val}'";
}
1;

View File

@@ -0,0 +1,24 @@
use strict;
use warnings;
package Test::Deep::ListMethods;
use base 'Test::Deep::Methods';
sub call_method
{
my $self = shift;
return [$self->SUPER::call_method(@_)];
}
sub render_stack
{
my $self = shift;
my $var = $self->SUPER::render_stack(@_);
return "[$var]";
}
1;

View File

@@ -0,0 +1,64 @@
use strict;
use warnings;
package Test::Deep::MM;
sub import
{
my $self = shift;
my ($pkg) = caller();
my $mpkg = $pkg."::Methods";
foreach my $attr (@_)
{
if ($attr =~ /^[a-z]/)
{
no strict 'refs';
*{$mpkg."::$attr"} = \&{$attr};
}
else
{
my $get_name = $mpkg."::get$attr";
my $set_name = $mpkg."::set$attr";
my $get_sub = sub {
return $_[0]->{$attr};
};
my $set_sub = sub {
return $_[0]->{$attr} = $_[1];
};
{
no strict 'refs';
*$get_name = $get_sub;
*$set_name = $set_sub;
push(@{$pkg."::ISA"}, $mpkg);
}
}
}
}
sub new
{
my $pkg = shift;
my $self = bless {}, $pkg;
$self->init(@_);
return $self;
}
sub init
{
my $self = shift;
while (@_)
{
my $name = shift || confess("No name");
my $method = "set$name";
$self->$method(shift);
}
}
1;

View File

@@ -0,0 +1,83 @@
use strict;
use warnings;
package Test::Deep::Methods;
use Test::Deep::Cmp;
use Scalar::Util;
sub init
{
my $self = shift;
# get them all into [$name,@args] => $value format
my @methods;
while (@_)
{
my $name = shift;
my $value = shift;
push(@methods,
[
ref($name) ? $name : [ $name ],
$value
]
);
}
$self->{methods} = \@methods;
}
sub descend
{
my $self = shift;
my $got = shift;
my $data = $self->data;
foreach my $method (@{$self->{methods}})
{
$data->{method} = $method;
my ($call, $exp_res) = @$method;
my ($name, @args) = @$call;
local $@;
my $got_res;
if (! eval { $got_res = $self->call_method($got, $call); 1 }) {
die $@ unless $@ =~ /\ACan't locate object method "\Q$name"/;
$got_res = $Test::Deep::DNE;
}
next if Test::Deep::descend($got_res, $exp_res);
return 0;
}
return 1;
}
sub call_method
{
my $self = shift;
my ($got, $call) = @_;
my ($name, @args) = @$call;
return $got->$name(@args);
}
sub render_stack
{
my $self = shift;
my ($var, $data) = @_;
my $method = $data->{method};
my ($call, $expect) = @$method;
my ($name, @args) = @$call;
my $args = @args ? "(".join(", ", @args).")" : "";
$var .= "->$name$args";
return $var;
}
1;

View File

@@ -0,0 +1,42 @@
use strict;
use warnings;
# this is for people who don't want Test::Builder to be loaded but want to
# use eq_deeply. It's a bit hacky...
package Test::Deep::NoTest;
our $NoTest;
{
local $NoTest = 1;
require Test::Deep;
}
sub import {
my $import = Test::Deep->can("import");
# make the stack look like it should for use Test::Deep
my $pkg = shift;
unshift(@_, "Test::Deep");
push @_, '_notest';
goto &$import;
}
1;
=head1 NAME
Test::Deep::NoTest - Use Test::Deep outside of the testing framework
=head1 SYNOPSIS
use Test::Deep::NoTest;
if (eq_deeply($a, $b)) {
print "they were deeply equal\n";
}
=head1 DESCRIPTION
This exports all the same things as Test::Deep but it does not load
Test::Builder so it can be used in ordinary non-test situations.

View File

@@ -0,0 +1,62 @@
use strict;
use warnings;
package Test::Deep::None;
use Test::Deep::Cmp;
sub init
{
my $self = shift;
my @list = map {
eval { $_->isa('Test::Deep::None') }
? @{ $_->{val} }
: $_
} @_;
$self->{val} = \@list;
}
sub descend
{
my $self = shift;
my $got = shift;
foreach my $cmp (@{$self->{val}})
{
return 0 if Test::Deep::eq_deeply_cache($got, $cmp);
}
return 1;
}
sub renderExp
{
my $self = shift;
my @expect = map {; Test::Deep::wrap($_) } @{ $self->{val} };
my $things = join(", ", map {$_->renderExp} @expect);
return "None of ( $things )";
}
sub diagnostics
{
my $self = shift;
my ($where, $last) = @_;
my $got = $self->renderGot($last->{got});
my $exp = $self->renderExp;
my $diag = <<EOM;
Comparing $where with None
got : $got
expected : $exp
EOM
$diag =~ s/\n+$/\n/;
return $diag;
}
1;

View File

@@ -0,0 +1,80 @@
use strict;
use warnings;
package Test::Deep::Number;
use Test::Deep::Cmp;
use Scalar::Util;
sub init
{
my $self = shift;
$self->{val} = shift(@_) + 0;
$self->{tolerance} = shift;
}
sub descend
{
my $self = shift;
my $got = shift;
$self->data->{got_string} = $got;
{
no warnings 'numeric';
$got += 0;
}
$self->data->{got} = $got;
if (defined(my $tolerance = $self->{tolerance}))
{
return abs($got - $self->{val}) <= $tolerance;
}
else
{
return $got == $self->{val};
}
}
sub diag_message
{
my $self = shift;
my $where = shift;
return "Comparing $where as a number";
}
sub renderGot
{
my $self = shift;
my $val = shift;
my $got_string = $self->data->{got_string};
if ("$val" ne "$got_string")
{
$got_string = $self->SUPER::renderGot($got_string);
return "$val ($got_string)"
}
else
{
return $val;
}
}
sub renderExp
{
my $self = shift;
my $exp = $self->{val};
if (defined(my $tolerance = $self->{tolerance}))
{
return "$exp +/- $tolerance";
}
else
{
return $exp;
}
}
1;

View File

@@ -0,0 +1,42 @@
use strict;
use warnings;
package Test::Deep::Obj;
use Test::Deep::Cmp;
use Scalar::Util;
sub init
{
my $self = shift;
my $val = shift;
$self->{val} = $val;
}
sub descend
{
my $self = shift;
my $got = shift;
return Scalar::Util::blessed($got)
&& $got->isa($self->{val});
}
sub diag_message
{
my $self = shift;
my $where = shift;
return "Checking class of $where with isa()";
}
sub renderExp
{
my $self = shift;
return "blessed into '$self->{val}' or subclass of '$self->{val}'";
}
1;

View File

@@ -0,0 +1,36 @@
use strict;
use warnings;
package Test::Deep::Ref;
use Test::Deep::Cmp;
use Scalar::Util qw( blessed );
sub test_class
{
my $self = shift;
my $got = shift;
my $exp = $self->{val};
if ($Test::Deep::Snobby)
{
return Test::Deep::descend($got, Test::Deep::blessed(blessed($exp)));
}
else
{
return 1;
}
}
sub test_reftype
{
my $self = shift;
my $got = shift;
my $reftype = shift;
return Test::Deep::descend($got, Test::Deep::reftype($reftype));
}
1;

View File

@@ -0,0 +1,46 @@
use strict;
use warnings;
package Test::Deep::RefType;
use Test::Deep::Cmp;
use Scalar::Util qw( reftype );
sub init
{
my $self = shift;
$self->{val} = shift;
}
sub descend
{
my $self = shift;
my $got = shift;
my $exp = $self->{val};
my $reftype = reftype($got);
return Test::Deep::descend($reftype, Test::Deep::shallow($exp));
}
sub render_stack
{
my $self = shift;
my $var = shift;
return "reftype($var)";
}
sub renderGot
{
my $self = shift;
my $got = shift;
$self->SUPER::renderGot(reftype($got));
}
1;

View File

@@ -0,0 +1,102 @@
use strict;
use warnings;
package Test::Deep::Regexp;
use Test::Deep::Cmp;
use Test::Deep::RegexpMatches;
sub init
{
my $self = shift;
my $val = shift;
$val = ref $val ? $val : qr/$val/;
$self->{val} = $val;
if (my $matches = shift)
{
$self->{matches} = Test::Deep::regexpmatches($matches, $val);
$self->{flags} = shift || "";
}
}
sub descend
{
my $self = shift;
my $got = shift;
my $re = $self->{val};
if (my $match_exp = $self->{matches})
{
my $flags = $self->{flags};
my @match_got;
if ($flags eq "g")
{
@match_got = $got =~ /$re/g;
}
else
{
@match_got = $got =~ /$re/;
}
if (@match_got)
{
return Test::Deep::descend(\@match_got, $match_exp);
}
else
{
return 0;
}
}
else
{
return ($got =~ $re) ? 1 : 0;
}
}
sub diag_message
{
my $self = shift;
my $where = shift;
return "Using Regexp on $where";
}
sub render_stack1
{
my $self = shift;
my $stack = shift;
return "($stack =~ $self->{regex})";
}
sub renderExp
{
my $self = shift;
return "$self->{val}";
}
sub renderGot
{
my $self = shift;
my $got = shift;
if (defined (my $class = Scalar::Util::blessed($got)))
{
my $ostr = qq{$got};
if ($ostr ne overload::StrVal($got))
{
return qq{'$ostr' (instance of $class)};
}
}
return Test::Deep::render_val($got);
}
1;

View File

@@ -0,0 +1,51 @@
use strict;
use warnings;
package Test::Deep::RegexpMatches;
use Test::Deep::Array;
use base 'Test::Deep::Array';
use Scalar::Util qw( blessed );
sub init
{
my $self = shift;
my $val = shift;
$val = Test::Deep::array($val) unless
blessed($val) and $val->isa("Test::Deep::Cmp");
$self->{val} = $val;
$self->{regex} = shift;
}
sub descend
{
my $self = shift;
my $got = shift;
return Test::Deep::descend($got, $self->{val});
}
sub render_stack
{
my $self = shift;
my $stack = shift;
$stack = "[$stack =~ $self->{regex}]";
return $stack;
# return $self->SUPER::render_stack($stack);
}
sub reset_arrow
{
return 1;
}
1;

View File

@@ -0,0 +1,47 @@
use strict;
use warnings;
package Test::Deep::RegexpOnly;
use Test::Deep::Cmp;
use Scalar::Util qw( blessed );
sub init
{
my $self = shift;
my $val = shift;
$val = ref $val ? $val : qr/$val/;
$self->{val} = $val;
}
sub descend
{
my $self = shift;
my $got = shift;
my $re = $self->{val};
return $got =~ $self->{val} ? 1 : 0;
}
sub diag_message
{
my $self = shift;
my $where = shift;
return "Using Regexp on $where";
}
sub renderExp
{
my $self = shift;
return "$self->{val}";
}
1;

View File

@@ -0,0 +1,43 @@
use strict;
use warnings;
package Test::Deep::RegexpRef;
use Test::Deep::Ref;
use Test::Deep::RegexpVersion;
sub init
{
my $self = shift;
my $val = shift;
$self->{val} = $val;
}
sub descend
{
my $self = shift;
my $got = shift;
my $exp = $self->{val};
if ($Test::Deep::RegexpVersion::OldStyle) {
return 0 unless $self->test_class($got, "Regexp");
return 0 unless $self->test_reftype($got, "SCALAR");
} else {
return 0 unless $self->test_reftype($got, "REGEXP");
}
return Test::Deep::descend($got, Test::Deep::regexprefonly($exp));
}
sub renderGot
{
my $self = shift;
return shift()."";
}
1;

View File

@@ -0,0 +1,43 @@
use strict;
use warnings;
package Test::Deep::RegexpRefOnly;
use Test::Deep::Ref;
sub init
{
my $self = shift;
my $val = shift;
$self->{val} = $val;
}
sub descend
{
my $self = shift;
my $got = shift;
my $exp = $self->{val};
return $got eq $exp;
}
sub render_stack
{
my $self = shift;
my ($var, $data) = @_;
return "m/$var/";
}
sub renderGot
{
my $self = shift;
return shift()."";
}
1;

View File

@@ -0,0 +1,11 @@
use strict;
use warnings;
package Test::Deep::RegexpVersion;
# Older versions of Perl treated Regexp refs as opaque scalars blessed
# into the "Regexp" class. Several bits of code need this so we
# centralise the test for that kind of version.
our $OldStyle = ($] < 5.011);
1;

View File

@@ -0,0 +1,29 @@
use strict;
use warnings;
package Test::Deep::ScalarRef;
use Test::Deep::Ref;
sub init
{
my $self = shift;
my $val = shift;
$self->{val} = $val;
}
sub descend
{
my $self = shift;
my $got = shift;
my $exp = $self->{val};
return 0 unless $self->test_class($got);
return 0 unless $self->test_reftype($got, Scalar::Util::reftype($exp));
return Test::Deep::descend($got, Test::Deep::scalarrefonly($exp));
}
1;

View File

@@ -0,0 +1,36 @@
use strict;
use warnings;
package Test::Deep::ScalarRefOnly;
use Test::Deep::Cmp;
sub init
{
my $self = shift;
my $val = shift;
$self->{val} = $val;
}
sub descend
{
my $self = shift;
my $got = shift;
my $exp = $self->{val};
return Test::Deep::descend($$got, $$exp);
}
sub render_stack
{
my $self = shift;
my ($var, $data) = @_;
return "\${$var}";
}
1;

View File

@@ -0,0 +1,193 @@
use strict;
use warnings;
package Test::Deep::Set;
use Test::Deep::Cmp;
sub init
{
my $self = shift;
$self->{IgnoreDupes} = shift;
$self->{SubSup} = shift;
$self->{val} = [];
$self->add(@_);
}
sub descend
{
my $self = shift;
my $d1 = shift;
my $d2 = $self->{val};
my $IgnoreDupes = $self->{IgnoreDupes};
my $data = $self->data;
my $SubSup = $self->{SubSup};
my $type = $IgnoreDupes ? "Set" : "Bag";
my $diag;
if (ref $d1 ne 'ARRAY')
{
my $got = Test::Deep::render_val($d1);
$diag = <<EOM;
got : $got
expect : An array to use as a $type
EOM
}
if (not $diag)
{
my @got = @$d1;
my @found;
my @missing;
foreach my $expect (@$d2)
{
my $found = 0;
for (my $i = $#got; $i >= 0; $i--)
{
if (Test::Deep::eq_deeply_cache($got[$i], $expect))
{
$found = 1;
push(@found, $expect);
splice(@got, $i, 1);
last unless $IgnoreDupes;
}
}
push(@missing, $expect) unless $found;
}
my @diags;
if (@missing and $SubSup ne "sub" && $SubSup ne "none")
{
push(@diags, "Missing: ".nice_list(\@missing));
}
if (@got and $SubSup ne "sup" && $SubSup ne "none")
{
my $got = __PACKAGE__->new($IgnoreDupes, "", @got);
push(@diags, "Extra: ".nice_list($got->{val}));
}
if (@found and $SubSup eq "none")
{
my $found = __PACKAGE__->new($IgnoreDupes, "", @found);
push(@diags, "Extra: ".nice_list($found->{val}));
}
$diag = join("\n", @diags);
}
if ($diag)
{
$data->{diag} = $diag;
return 0;
}
else
{
return 1;
}
}
sub diagnostics
{
my $self = shift;
my ($where, $last) = @_;
my $type = $self->{IgnoreDupes} ? "Set" : "Bag";
$type = "Sub$type" if $self->{SubSup} eq "sub";
$type = "Super$type" if $self->{SubSup} eq "sup";
$type = "NoneOf" if $self->{SubSup} eq "none";
my $error = $last->{diag};
my $diag = <<EOM;
Comparing $where as a $type
$error
EOM
return $diag;
}
sub add
{
# this takes an array.
# For each element A of the array, it looks for an element, B, already in
# the set which are deeply equal to A. If no matching B is found then A is
# added to the set. If a B is found and IgnoreDupes is true, then A will
# be discarded, if IgnoreDupes is false, then B will be added to the set
# again.
my $self = shift;
my @array = @_;
my $IgnoreDupes = $self->{IgnoreDupes};
my $already = $self->{val};
local $Test::Deep::Expects = 1;
foreach my $new_elem (@array)
{
my $want_push = 1;
my $push_this = $new_elem;
foreach my $old_elem (@$already)
{
if (Test::Deep::eq_deeply($new_elem, $old_elem))
{
$push_this = $old_elem;
$want_push = ! $IgnoreDupes;
last;
}
}
push(@$already, $push_this) if $want_push;
}
# so we can compare 2 Test::Deep::Set objects using array comparison
@$already = sort {(defined $a ? $a : "") cmp (defined $b ? $b : "")} @$already;
}
sub nice_list
{
my $list = shift;
my @scalars = grep ! ref $_, @$list;
my $refs = grep ref $_, @$list;
my @ref_string = "$refs reference" if $refs;
$ref_string[0] .= "s" if $refs > 1;
# sort them so we can predict the diagnostic output
return join(", ",
(map {Test::Deep::render_val($_)} sort {(defined $a ? $a : "") cmp (defined $b ? $b : "")} @scalars),
@ref_string
);
}
sub compare
{
my $self = shift;
my $other = shift;
return 0 if $self->{IgnoreDupes} != $other->{IgnoreDupes};
# this works (kind of) because the arrays are sorted
return Test::Deep::descend($self->{val}, $other->{val});
}
1;

View File

@@ -0,0 +1,51 @@
use strict;
use warnings;
package Test::Deep::Shallow;
use Test::Deep::Cmp;
use Scalar::Util qw( refaddr );
sub init
{
my $self = shift;
my $val = shift;
$self->{val} = $val;
}
sub descend
{
my $self = shift;
my $got = shift;
my $exp = $self->{val};
my $ok;
if (!defined $got and !defined $exp)
{
$ok = 1;
}
elsif (defined $got xor defined $exp)
{
$ok = 0;
}
elsif (ref $got and ref $exp)
{
$ok = refaddr($got) == refaddr($exp);
}
elsif (ref $got xor ref $exp)
{
$ok = 0;
}
else
{
$ok = $got eq $exp;
}
return $ok;
}
1;

View File

@@ -0,0 +1,85 @@
use strict;
use warnings;
package Test::Deep::Stack;
use Carp qw( confess );
use Scalar::Util;
use Test::Deep::MM qw( new init Stack Arrow );
sub init
{
my $self = shift;
$self->SUPER::init(@_);
$self->setStack([]) unless $self->getStack;
}
sub push
{
my $self = shift;
push(@{$self->getStack}, @_);
}
sub pop
{
my $self = shift;
return pop @{$self->getStack};
}
sub render
{
my $self = shift;
my $var = shift;
my $stack = $self->getStack;
$self->setArrow(0);
foreach my $data (@$stack)
{
my $exp = $data->{exp};
if (Scalar::Util::blessed($exp) and $exp->isa("Test::Deep::Cmp"))
{
$var = $exp->render_stack($var, $data);
$self->setArrow(0) if $exp->reset_arrow;
}
else
{
confess "Don't know how to render '$exp'";
}
}
return $var;
}
sub getLast
{
my $self = shift;
return $self->getStack->[-1];
}
sub incArrow
{
my $self = shift;
my $a = $self->getArrow;
$self->setArrow($a + 1);
return $a;
}
sub length
{
my $self = shift;
return @{$self->getStack} + 0;
}
1;

View File

@@ -0,0 +1,34 @@
use strict;
use warnings;
package Test::Deep::String;
use Test::Deep::Cmp;
sub init
{
my $self = shift;
$self->{val} = shift;
}
sub descend
{
my $self = shift;
my $got = shift()."";
$self->data->{got} = $got;
return $got eq $self->{val};
}
sub diag_message
{
my $self = shift;
my $where = shift;
return "Comparing $where as a string";
}
1;

View File

@@ -0,0 +1,216 @@
use 5.008001;
use strict;
use warnings;
package Test::FailWarnings;
# ABSTRACT: Add test failures if warnings are caught
our $VERSION = '0.008'; # VERSION
use Test::More 0.86;
use Cwd qw/getcwd/;
use File::Spec;
use Carp;
our $ALLOW_DEPS = 0;
our @ALLOW_FROM = ();
my $ORIG_DIR = getcwd(); # cache in case handler runs after a chdir
sub import {
my ( $class, @args ) = @_;
croak("import arguments must be key/value pairs")
unless @args % 2 == 0;
my %opts = @args;
$ALLOW_DEPS = $opts{'-allow_deps'};
if ( $opts{'-allow_from'} ) {
@ALLOW_FROM =
ref $opts{'-allow_from'} ? @{ $opts{'-allow_from'} } : $opts{'-allow_from'};
}
$SIG{__WARN__} = \&handler;
}
sub handler {
my $msg = shift;
$msg = '' unless defined $msg;
chomp $msg;
my ( $package, $filename, $line ) = _find_source();
# shortcut if ignoring dependencies and warning did not
# come from something local
if ($ALLOW_DEPS) {
$filename = File::Spec->abs2rel( $filename, $ORIG_DIR )
if File::Spec->file_name_is_absolute($filename);
return if $filename !~ /^(?:t|xt|lib|blib)/;
}
return if grep { $package eq $_ } @ALLOW_FROM;
if ( $msg !~ m/at .*? line \d/ ) {
chomp $msg;
$msg = "'$msg' at $filename line $line.";
}
else {
$msg = "'$msg'";
}
my $builder = Test::More->builder;
$builder->ok( 0, "Test::FailWarnings should catch no warnings" )
or $builder->diag("Warning was $msg");
}
sub _find_source {
my $i = 1;
while (1) {
my ( $pkg, $filename, $line ) = caller( $i++ );
return caller( $i - 2 ) unless defined $pkg;
next if $pkg =~ /^(?:Carp|warnings)/;
return ( $pkg, $filename, $line );
}
}
1;
# vim: ts=4 sts=4 sw=4 et:
__END__
=pod
=encoding utf-8
=head1 NAME
Test::FailWarnings - Add test failures if warnings are caught
=head1 VERSION
version 0.008
=head1 SYNOPSIS
Test file:
use strict;
use warnings;
use Test::More;
use Test::FailWarnings;
ok( 1, "first test" );
ok( 1 + "lkadjaks", "add non-numeric" );
done_testing;
Output:
ok 1 - first test
not ok 2 - Test::FailWarnings should catch no warnings
# Failed test 'Test::FailWarnings should catch no warnings'
# at t/bin/main-warn.pl line 7.
# Warning was 'Argument "lkadjaks" isn't numeric in addition (+) at t/bin/main-warn.pl line 7.'
ok 3 - add non-numeric
1..3
# Looks like you failed 1 test of 3.
=head1 DESCRIPTION
This module hooks C<$SIG{__WARN__}> and converts warnings to L<Test::More>
C<fail()> calls. It is designed to be used with C<done_testing>, when you
don't need to know the test count in advance.
Just as with L<Test::NoWarnings>, this does not catch warnings if other things
localize C<$SIG{__WARN__}>, as this is designed to catch I<unhandled> warnings.
=for Pod::Coverage handler
=head1 USAGE
=head2 Overriding C<$SIG{__WARN__}>
On C<import>, C<$SIG{__WARN__}> is replaced with
C<Test::FailWarnings::handler>.
use Test::FailWarnings; # global
If you don't want global replacement, require the module instead and localize
in whatever scope you want.
require Test::FailWarnings;
{
local $SIG{__WARN__} = \&Test::FailWarnings::handler;
# ... warnings will issue fail() here
}
When the handler reports on the source of the warning, it will look past
any calling packages starting with C<Carp> or C<warnings> to try to detect
the real origin of the warning.
=head2 Allowing warnings from dependencies
If you want to ignore failures from outside your own code, you can set
C<$Test::FailWarnings::ALLOW_DEPS> to a true value. You can
do that on the C<use> line with C<< -allow_deps >>.
use Test::FailWarnings -allow_deps => 1;
When true, warnings will only be thrown if they appear to originate from a filename
matching C<< qr/^(?:t|xt|lib|blib)/ >>
=head2 Allowing warnings from specific modules
If you want to white-list specific modules only, you can add their package
names to C<@Test::NoWarnings::ALLOW_FROM>. You can do that on the C<use> line
with C<< -allow_from >>.
use Test::FailWarnings -allow_from => [ qw/Annoying::Module/ ];
=head1 SEE ALSO
=over 4
=item *
L<Test::NoWarnings> -- catches warnings and reports in an C<END> block. Not (yet) friendly with C<done_testing>.
=item *
L<Test::Warnings> -- a replacement for Test::NoWarnings that works with done_testing
=item *
L<Test::Warn> -- test for warnings without triggering failures from this modules
=back
=for :stopwords cpan testmatrix url annocpan anno bugtracker rt cpants kwalitee diff irc mailto metadata placeholders metacpan
=head1 SUPPORT
=head2 Bugs / Feature Requests
Please report any bugs or feature requests through the issue tracker
at L<https://github.com/dagolden/Test-FailWarnings/issues>.
You will be notified automatically of any progress on your issue.
=head2 Source Code
This is open source software. The code repository is available for
public review and contribution under the terms of the license.
L<https://github.com/dagolden/Test-FailWarnings>
git clone https://github.com/dagolden/Test-FailWarnings.git
=head1 AUTHOR
David Golden <dagolden@cpan.org>
=head1 COPYRIGHT AND LICENSE
This software is Copyright (c) 2013 by David Golden.
This is free software, licensed under:
The Apache License, Version 2.0, January 2004
=cut

View File

@@ -0,0 +1,618 @@
package Test::Harness;
use 5.006;
use strict;
use warnings;
use constant IS_WIN32 => ( $^O =~ /^(MS)?Win32$/ );
use constant IS_VMS => ( $^O eq 'VMS' );
use TAP::Harness ();
use TAP::Parser::Aggregator ();
use TAP::Parser::Source ();
use TAP::Parser::SourceHandler::Perl ();
use Text::ParseWords qw(shellwords);
use Config;
use base 'Exporter';
# $ML $Last_ML_Print
BEGIN {
eval q{use Time::HiRes 'time'};
our $has_time_hires = !$@;
}
=head1 NAME
Test::Harness - Run Perl standard test scripts with statistics
=head1 VERSION
Version 3.42
=cut
our $VERSION = '3.42';
# Backwards compatibility for exportable variable names.
*verbose = *Verbose;
*switches = *Switches;
*debug = *Debug;
$ENV{HARNESS_ACTIVE} = 1;
$ENV{HARNESS_VERSION} = $VERSION;
END {
# For VMS.
delete $ENV{HARNESS_ACTIVE};
delete $ENV{HARNESS_VERSION};
}
our @EXPORT = qw(&runtests);
our @EXPORT_OK = qw(&execute_tests $verbose $switches);
our $Verbose = $ENV{HARNESS_VERBOSE} || 0;
our $Debug = $ENV{HARNESS_DEBUG} || 0;
our $Switches = '-w';
our $Columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
$Columns--; # Some shells have trouble with a full line of text.
our $Timer = $ENV{HARNESS_TIMER} || 0;
our $Color = $ENV{HARNESS_COLOR} || 0;
our $IgnoreExit = $ENV{HARNESS_IGNORE_EXIT} || 0;
=head1 SYNOPSIS
use Test::Harness;
runtests(@test_files);
=head1 DESCRIPTION
Although, for historical reasons, the L<Test::Harness> distribution
takes its name from this module it now exists only to provide
L<TAP::Harness> with an interface that is somewhat backwards compatible
with L<Test::Harness> 2.xx. If you're writing new code consider using
L<TAP::Harness> directly instead.
Emulation is provided for C<runtests> and C<execute_tests> but the
pluggable 'Straps' interface that previous versions of L<Test::Harness>
supported is not reproduced here. Straps is now available as a stand
alone module: L<Test::Harness::Straps>.
See L<TAP::Parser>, L<TAP::Harness> for the main documentation for this
distribution.
=head1 FUNCTIONS
The following functions are available.
=head2 runtests( @test_files )
This runs all the given I<@test_files> and divines whether they passed
or failed based on their output to STDOUT (details above). It prints
out each individual test which failed along with a summary report and
a how long it all took.
It returns true if everything was ok. Otherwise it will C<die()> with
one of the messages in the DIAGNOSTICS section.
=cut
sub _has_taint {
my $test = shift;
return TAP::Parser::SourceHandler::Perl->get_taint(
TAP::Parser::Source->shebang($test) );
}
sub _aggregate {
my ( $harness, $aggregate, @tests ) = @_;
# Don't propagate to our children
local $ENV{HARNESS_OPTIONS};
_apply_extra_INC($harness);
_aggregate_tests( $harness, $aggregate, @tests );
}
# Make sure the child sees all the extra junk in @INC
sub _apply_extra_INC {
my $harness = shift;
$harness->callback(
parser_args => sub {
my ( $args, $test ) = @_;
push @{ $args->{switches} }, map {"-I$_"} _filtered_inc();
}
);
}
sub _aggregate_tests {
my ( $harness, $aggregate, @tests ) = @_;
$aggregate->start();
$harness->aggregate_tests( $aggregate, @tests );
$aggregate->stop();
}
sub runtests {
my @tests = @_;
# shield against -l
local ( $\, $, );
my $harness = _new_harness();
my $aggregate = TAP::Parser::Aggregator->new();
local $ENV{PERL_USE_UNSAFE_INC} = 1 if not exists $ENV{PERL_USE_UNSAFE_INC};
_aggregate( $harness, $aggregate, @tests );
$harness->formatter->summary($aggregate);
my $total = $aggregate->total;
my $passed = $aggregate->passed;
my $failed = $aggregate->failed;
my @parsers = $aggregate->parsers;
my $num_bad = 0;
for my $parser (@parsers) {
$num_bad++ if $parser->has_problems;
}
die(sprintf(
"Failed %d/%d test programs. %d/%d subtests failed.\n",
$num_bad, scalar @parsers, $failed, $total
)
) if $num_bad;
return $total && $total == $passed;
}
sub _canon {
my @list = sort { $a <=> $b } @_;
my @ranges = ();
my $count = scalar @list;
my $pos = 0;
while ( $pos < $count ) {
my $end = $pos + 1;
$end++ while $end < $count && $list[$end] <= $list[ $end - 1 ] + 1;
push @ranges, ( $end == $pos + 1 )
? $list[$pos]
: join( '-', $list[$pos], $list[ $end - 1 ] );
$pos = $end;
}
return join( ' ', @ranges );
}
sub _new_harness {
my $sub_args = shift || {};
my ( @lib, @switches );
my @opt = map { shellwords($_) } grep { defined } $Switches, $ENV{HARNESS_PERL_SWITCHES};
while ( my $opt = shift @opt ) {
if ( $opt =~ /^ -I (.*) $ /x ) {
push @lib, length($1) ? $1 : shift @opt;
}
else {
push @switches, $opt;
}
}
# Do things the old way on VMS...
push @lib, _filtered_inc() if IS_VMS;
# If $Verbose isn't numeric default to 1. This helps core.
my $verbosity = ( $Verbose ? ( $Verbose !~ /\d/ ) ? 1 : $Verbose : 0 );
my $args = {
timer => $Timer,
directives => our $Directives,
lib => \@lib,
switches => \@switches,
color => $Color,
verbosity => $verbosity,
ignore_exit => $IgnoreExit,
};
$args->{stdout} = $sub_args->{out}
if exists $sub_args->{out};
my $class = $ENV{HARNESS_SUBCLASS} || 'TAP::Harness';
if ( defined( my $env_opt = $ENV{HARNESS_OPTIONS} ) ) {
for my $opt ( split /:/, $env_opt ) {
if ( $opt =~ /^j(\d*)$/ ) {
$args->{jobs} = $1 || 9;
}
elsif ( $opt eq 'c' ) {
$args->{color} = 1;
}
elsif ( $opt =~ m/^f(.*)$/ ) {
my $fmt = $1;
$fmt =~ s/-/::/g;
$args->{formatter_class} = $fmt;
}
elsif ( $opt =~ m/^a(.*)$/ ) {
my $archive = $1;
$class = "TAP::Harness::Archive";
$args->{archive} = $archive;
}
else {
die "Unknown HARNESS_OPTIONS item: $opt\n";
}
}
}
return TAP::Harness->_construct( $class, $args );
}
# Get the parts of @INC which are changed from the stock list AND
# preserve reordering of stock directories.
sub _filtered_inc {
my @inc = grep { !ref } @INC; #28567
if (IS_VMS) {
# VMS has a 255-byte limit on the length of %ENV entries, so
# toss the ones that involve perl_root, the install location
@inc = grep !/perl_root/i, @inc;
}
elsif (IS_WIN32) {
# Lose any trailing backslashes in the Win32 paths
s/[\\\/]+$// for @inc;
}
my @default_inc = _default_inc();
my @new_inc;
my %seen;
for my $dir (@inc) {
next if $seen{$dir}++;
if ( $dir eq ( $default_inc[0] || '' ) ) {
shift @default_inc;
}
else {
push @new_inc, $dir;
}
shift @default_inc while @default_inc and $seen{ $default_inc[0] };
}
return @new_inc;
}
{
# Cache this to avoid repeatedly shelling out to Perl.
my @inc;
sub _default_inc {
return @inc if @inc;
local $ENV{PERL5LIB};
local $ENV{PERLLIB};
my $perl = $ENV{HARNESS_PERL} || $^X;
# Avoid using -l for the benefit of Perl 6
chomp( @inc = `"$perl" -e "print join qq[\\n], \@INC, q[]"` );
return @inc;
}
}
sub _check_sequence {
my @list = @_;
my $prev;
while ( my $next = shift @list ) {
return if defined $prev && $next <= $prev;
$prev = $next;
}
return 1;
}
sub execute_tests {
my %args = @_;
my $harness = _new_harness( \%args );
my $aggregate = TAP::Parser::Aggregator->new();
my %tot = (
bonus => 0,
max => 0,
ok => 0,
bad => 0,
good => 0,
files => 0,
tests => 0,
sub_skipped => 0,
todo => 0,
skipped => 0,
bench => undef,
);
# Install a callback so we get to see any plans the
# harness executes.
$harness->callback(
made_parser => sub {
my $parser = shift;
$parser->callback(
plan => sub {
my $plan = shift;
if ( $plan->directive eq 'SKIP' ) {
$tot{skipped}++;
}
}
);
}
);
local $ENV{PERL_USE_UNSAFE_INC} = 1 if not exists $ENV{PERL_USE_UNSAFE_INC};
_aggregate( $harness, $aggregate, @{ $args{tests} } );
$tot{bench} = $aggregate->elapsed;
my @tests = $aggregate->descriptions;
# TODO: Work out the circumstances under which the files
# and tests totals can differ.
$tot{files} = $tot{tests} = scalar @tests;
my %failedtests = ();
my %todo_passed = ();
for my $test (@tests) {
my ($parser) = $aggregate->parsers($test);
my @failed = $parser->failed;
my $wstat = $parser->wait;
my $estat = $parser->exit;
my $planned = $parser->tests_planned;
my @errors = $parser->parse_errors;
my $passed = $parser->passed;
my $actual_passed = $parser->actual_passed;
my $ok_seq = _check_sequence( $parser->actual_passed );
# Duplicate exit, wait status semantics of old version
$estat ||= '' unless $wstat;
$wstat ||= '';
$tot{max} += ( $planned || 0 );
$tot{bonus} += $parser->todo_passed;
$tot{ok} += $passed > $actual_passed ? $passed : $actual_passed;
$tot{sub_skipped} += $parser->skipped;
$tot{todo} += $parser->todo;
if ( @failed || $estat || @errors ) {
$tot{bad}++;
my $huh_planned = $planned ? undef : '??';
my $huh_errors = $ok_seq ? undef : '??';
$failedtests{$test} = {
'canon' => $huh_planned
|| $huh_errors
|| _canon(@failed)
|| '??',
'estat' => $estat,
'failed' => $huh_planned
|| $huh_errors
|| scalar @failed,
'max' => $huh_planned || $planned,
'name' => $test,
'wstat' => $wstat
};
}
else {
$tot{good}++;
}
my @todo = $parser->todo_passed;
if (@todo) {
$todo_passed{$test} = {
'canon' => _canon(@todo),
'estat' => $estat,
'failed' => scalar @todo,
'max' => scalar $parser->todo,
'name' => $test,
'wstat' => $wstat
};
}
}
return ( \%tot, \%failedtests, \%todo_passed );
}
=head2 execute_tests( tests => \@test_files, out => \*FH )
Runs all the given C<@test_files> (just like C<runtests()>) but
doesn't generate the final report. During testing, progress
information will be written to the currently selected output
filehandle (usually C<STDOUT>), or to the filehandle given by the
C<out> parameter. The I<out> is optional.
Returns a list of two values, C<$total> and C<$failed>, describing the
results. C<$total> is a hash ref summary of all the tests run. Its
keys and values are this:
bonus Number of individual todo tests unexpectedly passed
max Number of individual tests ran
ok Number of individual tests passed
sub_skipped Number of individual tests skipped
todo Number of individual todo tests
files Number of test files ran
good Number of test files passed
bad Number of test files failed
tests Number of test files originally given
skipped Number of test files skipped
If C<< $total->{bad} == 0 >> and C<< $total->{max} > 0 >>, you've
got a successful test.
C<$failed> is a hash ref of all the test scripts that failed. Each key
is the name of a test script, each value is another hash representing
how that script failed. Its keys are these:
name Name of the test which failed
estat Script's exit value
wstat Script's wait status
max Number of individual tests
failed Number which failed
canon List of tests which failed (as string).
C<$failed> should be empty if everything passed.
=cut
1;
__END__
=head1 EXPORT
C<&runtests> is exported by C<Test::Harness> by default.
C<&execute_tests>, C<$verbose>, C<$switches> and C<$debug> are
exported upon request.
=head1 ENVIRONMENT VARIABLES THAT TAP::HARNESS::COMPATIBLE SETS
C<Test::Harness> sets these before executing the individual tests.
=over 4
=item C<HARNESS_ACTIVE>
This is set to a true value. It allows the tests to determine if they
are being executed through the harness or by any other means.
=item C<HARNESS_VERSION>
This is the version of C<Test::Harness>.
=back
=head1 ENVIRONMENT VARIABLES THAT AFFECT TEST::HARNESS
=over 4
=item C<HARNESS_PERL_SWITCHES>
Setting this adds perl command line switches to each test file run.
For example, C<HARNESS_PERL_SWITCHES=-T> will turn on taint mode.
C<HARNESS_PERL_SWITCHES=-MDevel::Cover> will run C<Devel::Cover> for
each test.
C<-w> is always set. You can turn this off in the test with C<BEGIN {
$^W = 0 }>.
=item C<HARNESS_TIMER>
Setting this to true will make the harness display the number of
milliseconds each test took. You can also use F<prove>'s C<--timer>
switch.
=item C<HARNESS_VERBOSE>
If true, C<Test::Harness> will output the verbose results of running
its tests. Setting C<$Test::Harness::verbose> will override this,
or you can use the C<-v> switch in the F<prove> utility.
=item C<HARNESS_OPTIONS>
Provide additional options to the harness. Currently supported options are:
=over
=item C<< j<n> >>
Run <n> (default 9) parallel jobs.
=item C<< c >>
Try to color output. See L<TAP::Formatter::Base/"new">.
=item C<< a<file.tgz> >>
Will use L<TAP::Harness::Archive> as the harness class, and save the TAP to
C<file.tgz>
=item C<< fPackage-With-Dashes >>
Set the formatter_class of the harness being run. Since the C<HARNESS_OPTIONS>
is seperated by C<:>, we use C<-> instead.
=back
Multiple options may be separated by colons:
HARNESS_OPTIONS=j9:c make test
=item C<HARNESS_SUBCLASS>
Specifies a TAP::Harness subclass to be used in place of TAP::Harness.
=item C<HARNESS_SUMMARY_COLOR_SUCCESS>
Determines the L<Term::ANSIColor> for the summary in case it is successful.
This color defaults to C<'green'>.
=item C<HARNESS_SUMMARY_COLOR_FAIL>
Determines the L<Term::ANSIColor> for the failure in case it is successful.
This color defaults to C<'red'>.
=back
=head1 Taint Mode
Normally when a Perl program is run in taint mode the contents of the
C<PERL5LIB> environment variable do not appear in C<@INC>.
Because C<PERL5LIB> is often used during testing to add build
directories to C<@INC> C<Test::Harness> passes the names of any
directories found in C<PERL5LIB> as -I switches. The net effect of this
is that C<PERL5LIB> is honoured even in taint mode.
=head1 SEE ALSO
L<TAP::Harness>
=head1 BUGS
Please report any bugs or feature requests to
C<bug-test-harness at rt.cpan.org>, or through the web interface at
L<http://rt.cpan.org/NoAuth/ReportBug.html?Queue=Test-Harness>. I will be
notified, and then you'll automatically be notified of progress on your bug
as I make changes.
=head1 AUTHORS
Andy Armstrong C<< <andy@hexten.net> >>
L<Test::Harness> 2.64 (maintained by Andy Lester and on which this
module is based) has this attribution:
Either Tim Bunce or Andreas Koenig, we don't know. What we know for
sure is, that it was inspired by Larry Wall's F<TEST> script that came
with perl distributions for ages. Numerous anonymous contributors
exist. Andreas Koenig held the torch for many years, and then
Michael G Schwern.
=head1 LICENCE AND COPYRIGHT
Copyright (c) 2007-2011, Andy Armstrong C<< <andy@hexten.net> >>. All rights reserved.
This module is free software; you can redistribute it and/or
modify it under the same terms as Perl itself. See L<perlartistic>.

View File

@@ -0,0 +1,450 @@
use strict;
use warnings;
package Test::Kwalitee; # git description: v1.27-5-ge8333c9
# vim: set ts=8 sts=4 sw=4 tw=115 et :
# ABSTRACT: Test the Kwalitee of a distribution before you release it
# KEYWORDS: testing tests kwalitee CPANTS quality lint errors critic
our $VERSION = '1.28';
use Cwd ();
use Test::Builder 0.88;
use Module::CPANTS::Analyse 0.92;
use parent 'Exporter';
our @EXPORT_OK = qw(kwalitee_ok);
my $Test;
BEGIN { $Test = Test::Builder->new }
sub import
{
my ($class, @args) = @_;
# back-compatibility mode!
if (@args % 2 == 0)
{
$Test->level($Test->level + 1);
my %args = @args;
my $result = kwalitee_ok(@{$args{tests}});
$Test->done_testing;
return $result;
}
# otherwise, do what a regular import would do...
$class->export_to_level(1, @_);
}
sub kwalitee_ok
{
my (@tests) = @_;
warn "These tests should not be running unless AUTHOR_TESTING=1 and/or RELEASE_TESTING=1!\n"
# this setting is internal and for this distribution only - there is
# no reason for you to need to circumvent this check in any other context.
# Please DO NOT enable this test to run for users, as it can fail
# unexpectedly as parts of the toolchain changes!
unless $ENV{_KWALITEE_NO_WARN} or $ENV{AUTHOR_TESTING} or $ENV{RELEASE_TESTING}
or (caller)[1] =~ m{^(?:\.[/\\])?xt\b}
or ((caller)[0]->isa(__PACKAGE__) and (caller(1))[1] =~ m{^(?:\.[/\\])?xt\b});
my @run_tests = grep { /^[^-]/ } @tests;
my @skip_tests = map { s/^-//; $_ } grep { /^-/ } @tests;
# These don't really work unless you have a tarball, so skip them
push @skip_tests, qw(extractable extracts_nicely no_generated_files
has_proper_version has_version manifest_matches_dist);
# MCA has a patch to add 'needs_tarball', 'no_build' as flags
my @skip_flags = qw(is_extra is_experimental needs_db);
my $basedir = Cwd::cwd;
my $analyzer = Module::CPANTS::Analyse->new({
distdir => $basedir,
dist => $basedir,
# for debugging..
opts => { no_capture => 1 },
});
my $ok = 1;
for my $generator (@{ $analyzer->mck->generators })
{
$generator->analyse($analyzer);
for my $indicator (sort { $a->{name} cmp $b->{name} } @{ $generator->kwalitee_indicators })
{
next if grep { $indicator->{$_} } @skip_flags;
next if @run_tests and not grep { $indicator->{name} eq $_ } @run_tests;
next if grep { $indicator->{name} eq $_ } @skip_tests;
my $result = _run_indicator($analyzer->d, $indicator);
$ok &&= $result;
}
}
return $ok;
}
sub _run_indicator
{
my ($dist, $metric) = @_;
my $subname = $metric->{name};
my $ok = 1;
$Test->level($Test->level + 1);
if (not $Test->ok( $metric->{code}->($dist), $subname))
{
$ok = 0;
$Test->diag('Error: ', $metric->{error});
# NOTE: this is poking into the analyse structures; we really should
# have a formal API for accessing this.
# attempt to print all the extra information we have
my @details;
push @details, $metric->{details}->($dist)
if $metric->{details} and ref $metric->{details} eq 'CODE';
push @details,
(ref $dist->{error}{$subname}
? @{$dist->{error}{$subname}}
: $dist->{error}{$subname})
if defined $dist->{error} and defined $dist->{error}{$subname};
$Test->diag("Details:\n", join("\n", @details)) if @details;
$Test->diag('Remedy: ', $metric->{remedy});
}
$Test->level($Test->level - 1);
return $ok;
}
1;
__END__
=pod
=encoding UTF-8
=head1 NAME
Test::Kwalitee - Test the Kwalitee of a distribution before you release it
=head1 VERSION
version 1.28
=head1 SYNOPSIS
In a separate test file:
use Test::More;
use strict;
use warnings;
BEGIN {
plan skip_all => 'these tests are for release candidate testing'
unless $ENV{RELEASE_TESTING};
}
use Test::Kwalitee 'kwalitee_ok';
kwalitee_ok();
done_testing;
=head1 DESCRIPTION
=for stopwords CPANTS
Kwalitee is an automatically-measurable gauge of how good your software is.
That's very different from quality, which a computer really can't measure in a
general sense. (If you can, you've solved a hard problem in computer science.)
In the world of the CPAN, the CPANTS project (CPAN Testing Service; also a
funny acronym on its own) measures Kwalitee with several metrics. If you plan
to release a distribution to the CPAN -- or even within your own organization
-- testing its Kwalitee before creating a release can help you improve your
quality as well.
C<Test::Kwalitee> and a short test file will do this for you automatically.
=head1 USAGE
Create a test file as shown in the synopsis. Run it. It will run all of the
potential Kwalitee tests on the current distribution, if possible. If any
fail, it will report those as regular diagnostics.
If you ship this test, it will not run for anyone else, because of the
C<RELEASE_TESTING> guard. (You can omit this guard if you move the test to
xt/release/, which is not run automatically by other users.)
=head1 FUNCTIONS
=head2 kwalitee_ok
With no arguments, runs all standard metrics.
To run only a handful of tests, pass their name(s) to the C<kwalitee_ok>
function:
kwalitee_ok(qw( use_strict has_tests ));
To disable a test, pass its name with a leading minus (C<->):
kwalitee_ok(qw( -use_strict has_readme ));
=head1 BACK-COMPATIBILITY MODE
Previous versions of this module ran tests directly via the C<import> sub, like so:
use Test::Kwalitee;
# and that's it!
...but this is problematic if you need to perform some setup first, as you
would need to do that in a C<BEGIN> block, or manually call C<import>. This is
messy!
However, this calling path is still available, e.g.:
use Test::Kwalitee tests => [ qw( use_strict has_tests ) ];
=head1 METRICS
The list of each available metric currently available on your
system can be obtained with the C<kwalitee-metrics> command (with
descriptions, if you pass C<--verbose> or C<-v>, but
as of L<Module::CPANTS::Analyse> 0.97_03, the tests include:
=over 4
=item *
has_abstract_in_pod
Does the main module have a C<=head1 NAME> section with a short description of the distribution?
=item *
has_buildtool
Does the distribution have a build tool file?
=for stopwords changelog
=item *
has_changelog
Does the distribution have a changelog?
=item *
has_humanreadable_license
Is there a C<LICENSE> section in documentation, and/or a F<LICENSE> file
present?
=item *
has_license_in_source_file
Is there license information in any of the source files?
=item *
has_manifest
Does the distribution have a F<MANIFEST>?
=item *
has_meta_yml
Does the distribution have a F<META.yml> file?
=item *
has_readme
Does the distribution have a F<README> file?
=item *
has_tests
Does the distribution have tests?
=item *
manifest_matches_dist
Do the F<MANIFEST> and distribution contents match?
=item *
meta_json_conforms_to_known_spec
Does META.json conform to the recognised META.json specification?
(For specs see L<CPAN::Meta::Spec>)
=item *
meta_json_is_parsable
Can the F<META.json> be parsed?
=item *
meta_yml_conforms_to_known_spec
=for stopwords recognised
Does META.yml conform to any recognised META.yml specification?
(For specs see
L<http://module-build.sourceforge.net/META-spec-current.html>)
=item *
meta_yml_is_parsable
Can the F<META.yml> be parsed?
=item *
no_broken_auto_install
Is the distribution using an old version of L<Module::Install>? Versions of
L<Module::Install> prior to 0.89 do not detect correctly that C<CPAN>/C<CPANPLUS>
shell is used.
=item *
no_broken_module_install
Does the distribution use an obsolete version of L<Module::Install>?
Versions of L<Module::Install> prior to 0.61 might not work on some systems at
all. Additionally if the F<Makefile.PL> uses the C<auto_install()>
feature, you need at least version 0.64. Also, 1.04 is known to be broken.
=item *
no_symlinks
Does the distribution have no symlinks?
=item *
use_strict
Does the distribution files all use strict?
=back
=head1 ACKNOWLEDGEMENTS
=for stopwords Klausner Dolan
With thanks to CPANTS and Thomas Klausner, as well as test tester Chris Dolan.
=head1 SEE ALSO
=over 4
=item *
L<kwalitee-metrics> (in this distribution)
=item *
L<Module::CPANTS::Analyse>
=item *
L<App::CPANTS::Lint>
=item *
L<Test::Kwalitee::Extra>
=item *
L<Dist::Zilla::Plugin::Test::Kwalitee>
=item *
L<Dist::Zilla::Plugin::Test::Kwalitee::Extra>
=item *
L<Dist::Zilla::App::Command::kwalitee>
=back
=head1 SUPPORT
Bugs may be submitted through L<the RT bug tracker|https://rt.cpan.org/Public/Dist/Display.html?Name=Test-Kwalitee>
(or L<bug-Test-Kwalitee@rt.cpan.org|mailto:bug-Test-Kwalitee@rt.cpan.org>).
There is also a mailing list available for users of this distribution, at
L<http://lists.perl.org/list/perl-qa.html>.
There is also an irc channel available for users of this distribution, at
L<C<#perl> on C<irc.perl.org>|irc://irc.perl.org/#perl-qa>.
=head1 AUTHORS
=over 4
=item *
chromatic <chromatic@wgz.org>
=item *
Karen Etheridge <ether@cpan.org>
=back
=head1 CONTRIBUTORS
=for stopwords David Steinbrunner Gavin Sherlock Kenichi Ishigaki Nathan Haigh Zoffix Znet Daniel Perrett
=over 4
=item *
David Steinbrunner <dsteinbrunner@pobox.com>
=item *
Gavin Sherlock <sherlock@cpan.org>
=item *
Kenichi Ishigaki <ishigaki@cpan.org>
=item *
Nathan Haigh <nathanhaigh@ukonline.co.uk>
=item *
Zoffix Znet <cpan@zoffix.com>
=item *
Daniel Perrett <perrettdl@googlemail.com>
=back
=head1 COPYRIGHT AND LICENSE
This software is copyright (c) 2005 by chromatic.
This is free software; you can redistribute it and/or modify it under
the same terms as the Perl 5 programming language system itself.
=cut

View File

@@ -0,0 +1,339 @@
package Test::LeakTrace;
use 5.008_001;
use strict;
use warnings;
our $VERSION = '0.17';
use XSLoader;
XSLoader::load(__PACKAGE__, $VERSION);
use Test::Builder::Module;
our @ISA = qw(Test::Builder::Module);
use Exporter qw(import); # use Exporter::import for backward compatibility
our @EXPORT = qw(
leaktrace leaked_refs leaked_info leaked_count
no_leaks_ok leaks_cmp_ok
count_sv
);
our %EXPORT_TAGS = (
all => \@EXPORT,
test => [qw(no_leaks_ok leaks_cmp_ok)],
util => [qw(leaktrace leaked_refs leaked_info leaked_count count_sv)],
);
sub _do_leaktrace{
my($block, $name, $need_stateinfo, $mode) = @_;
if(!defined($mode) && !defined wantarray){
warnings::warnif void => "Useless use of $name() in void context";
}
if($name eq 'leaked_count') {
my $start;
$start = count_sv();
$block->();
return count_sv() - $start;
}
local $SIG{__DIE__} = 'DEFAULT';
_start($need_stateinfo);
eval{
$block->();
};
if($@){
_finish(-silent);
die $@;
}
return _finish($mode);
}
sub leaked_refs(&){
my($block) = @_;
return _do_leaktrace($block, 'leaked_refs', 0);
}
sub leaked_info(&){
my($block) = @_;
return _do_leaktrace($block, 'leaked_refs', 1);
}
sub leaked_count(&){
my($block) = @_;
return scalar _do_leaktrace($block, 'leaked_count', 0);
}
sub leaktrace(&;$){
my($block, $mode) = @_;
_do_leaktrace($block, 'leaktrace', 1, defined($mode) ? $mode : -simple);
return;
}
sub leaks_cmp_ok(&$$;$){
my($block, $cmp_op, $expected, $description) = @_;
my $Test = __PACKAGE__->builder;
if(!_runops_installed()){
my $mod = exists $INC{'Devel/Cover.pm'} ? 'Devel::Cover' : 'strange runops routines';
return $Test->ok(1, "skipped (under $mod)");
}
# calls to prepare cache in $block
$block->();
my $got = _do_leaktrace($block, 'leaked_count', 0);
my $desc = sprintf 'leaks %s %-2s %s', $got, $cmp_op, $expected;
if(defined $description){
$description .= " ($desc)";
}
else{
$description = $desc;
}
my $result = $Test->cmp_ok($got, $cmp_op, $expected, $description);
if(!$result){
open local(*STDERR), '>', \(my $content = '');
$block->(); # calls it again because opening *STDERR changes the run-time environment
_do_leaktrace($block, 'leaktrace', 1, -verbose);
$Test->diag($content);
}
return $result;
}
sub no_leaks_ok(&;$){
# ($block, $description)
splice @_, 1, 0, ('<=', 0); # ($block, '<=', 0, $description);
goto &leaks_cmp_ok;
}
1;
__END__
=for stopwords sv gfx
=head1 NAME
Test::LeakTrace - Traces memory leaks
=head1 VERSION
This document describes Test::LeakTrace version 0.17.
=head1 SYNOPSIS
use Test::LeakTrace;
# simple report
leaktrace{
# ...
};
# verbose output
leaktrace{
# ...
} -verbose;
# with callback
leaktrace{
# ...
} sub {
my($ref, $file, $line) = @_;
warn "leaked $ref from $file line\n";
};
my @refs = leaked_refs{
# ...
};
my @info = leaked_info{
# ...
};
my $count = leaked_count{
# ...
};
# standard test interface
use Test::LeakTrace;
no_leaks_ok{
# ...
} 'no memory leaks';
leaks_cmp_ok{
# ...
} '<', 10;
=head1 DESCRIPTION
C<Test::LeakTrace> provides several functions that trace memory leaks.
This module scans arenas, the memory allocation system,
so it can detect any leaked SVs in given blocks.
B<Leaked SVs> are SVs which are not released after the end of the scope
they have been created. These SVs include global variables and internal caches.
For example, if you call a method in a tracing block, perl might prepare a cache
for the method. Thus, to trace true leaks, C<no_leaks_ok()> and C<leaks_cmp_ok()>
executes a block more than once.
=head1 INTERFACE
=head2 Exported functions
=head3 C<< leaked_info { BLOCK } >>
Executes I<BLOCK> and returns a list of leaked SVs and places where the SVs
come from, i.e. C<< [$ref, $file, $line] >>.
=head3 C<< leaked_refs { BLOCK } >>
Executes I<BLOCK> and returns a list of leaked SVs.
=head3 C<< leaked_count { BLOCK } >>
Executes I<BLOCK> and returns the number of leaked SVs.
=head3 C<< leaktrace { BLOCK } ?($mode | \&callback) >>
Executes I<BLOCK> and reports leaked SVs to C<*STDERR>.
Defined I<$mode>s are:
=over 4
=item -simple
Default. Reports the leaked SV identity (type and address), file name and line number.
=item -sv_dump
In addition to B<-simple>, dumps the sv content using C<sv_dump()>,
which also implements C<Devel::Peek::Dump()>.
=item -lines
In addition to B<-simple>, prints suspicious source lines.
=item -verbose
Both B<-sv_dump> and B<-lines>.
=back
=head3 C<< no_leaks_ok { BLOCK } ?$description >>
Tests that I<BLOCK> does not leaks SVs. This is a test function
using C<Test::Builder>.
Note that I<BLOCK> is called more than once. This is because
I<BLOCK> might prepare caches which are not memory leaks.
=head3 C<< leaks_cmp_ok { BLOCK } $cmp_op, $number, ?$description >>
Tests that I<BLOCK> leaks a specific number of SVs. This is a test
function using C<Test::Builder>.
Note that I<BLOCK> is called more than once. This is because
I<BLOCK> might prepare caches which are not memory leaks.
=head3 C<< count_sv() >>
Counts all the SVs in the arena.
=head2 Script interface
Like C<Devel::LeakTrace> C<Test::LeakTrace::Script> is provided for whole scripts.
The arguments of C<use Test::LeakTrace::Script> directive is the same as C<leaktrace()>.
$ TEST_LEAKTRACE=-sv_dump perl -MTest::LeakTrace::Script script.pl
$ perl -MTest::LeakTrace::Script=-verbose script.pl
#!perl
# ...
use Test::LeakTrace::Script sub{
my($ref, $file, $line) = @_;
# ...
};
# ...
=head1 EXAMPLES
=head2 Testing modules
Here is a test script template that checks memory leaks.
#!perl -w
use strict;
use constant HAS_LEAKTRACE => eval{ require Test::LeakTrace };
use Test::More HAS_LEAKTRACE ? (tests => 1) : (skip_all => 'require Test::LeakTrace');
use Test::LeakTrace;
use Some::Module;
leaks_cmp_ok{
my $o = Some::Module->new();
$o->something();
$o->something_else();
} '<', 1;
=head1 DEPENDENCIES
Perl 5.8.1 or later, and a C compiler.
=head1 CAVEATS
C<Test::LeakTrace> does not work with C<Devel::Cover> and modules which install
their own C<runops> routines, or the perl executor. So if the test functions of
this module detect strange C<runops> routines, they do nothing and report okay.
=head1 BUGS
No bugs have been reported.
Please report any bugs or feature requests to the author.
=head1 SEE ALSO
L<Devel::LeakTrace>.
L<Devel::LeakTrace::Fast>.
L<Test::TraceObject>.
L<Test::Weak>.
For guts:
L<perlguts>.
L<perlhack>.
F<sv.c>.
=head1 AUTHOR
Goro Fuji(gfx) E<lt>gfuji(at)cpan.orgE<gt>.
=head1 LICENSE AND COPYRIGHT
Copyright (c) 2009-2010, Goro Fuji(gfx). All rights reserved.
This library is free software; you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut

View File

@@ -0,0 +1,246 @@
=encoding utf-8
=head1 NAME
Test::LeakTrace::JA - メモリリークを追跡する
=head1 VERSION
This document describes Test::LeakTrace version 0.17.
=head1 SYNOPSIS
use Test::LeakTrace;
# simple report
leaktrace{
# ...
};
# verbose output
leaktrace{
# ...
} -verbose;
# with callback
leaktrace{
# ...
} sub {
my($ref, $file, $line) = @_;
warn "leaked $ref from $file line\n";
};
my @refs = leaked_refs{
# ...
};
my @info = leaked_info{
# ...
};
my $count = leaked_count{
# ...
};
# standard test interface
use Test::LeakTrace;
no_leaks_ok{
# ...
} "description";
leaks_cmp_ok{
# ...
} '<', 10;
=head1 DESCRIPTION
PerlのGCはリファレンスカウンタを用いたものなのでオブジェクトが開放されるタイミングが明確であることや体感速度が高速であることなど数々の利点があります。
その一方で循環参照を開放できないことCレベルでの操作でミスしやすいなど問題点がいくつかあります。それらの問題点のほとんどはメモリリークに関することですからメモリリークを追跡することは非常に重要な課題です。
C<Test::LeakTrce>はメモリリークを追跡するためのいくつかのユーティリティとC<Test::Builder>ベースのテスト関数を提供します。このモジュールはPerlのメモリアロケーションシステムであるアリーナを走査するためSVに関することであれば与えられたコードのどんなメモリリークでも検出できます。つまりPerlレベルでの循環参照を始めとしてXSモジュールやPerl自身のバグによるメモリリークを追跡することができます。
ここでB<リーク>とは特定のスコープ内で新たに作成されてそのスコープ終了後にも残っている値を意味します。これは新たに作成されたグローバルな値やPerlが暗黙のうちに作成するキャッシュの値も含みます。たとえばリーク追跡を行っている最中に新たに名前つきサブルーチンを定義すればそれはリークとみなされます。また継承したメソッドを呼び出したりオブジェクトを作成したりするだけで様々なキャッシュが生成されリークが報告される可能性があります。
=head1 INTERFACE
=head2 Exported functions
=head3 C<< leaked_info { BLOCK } >>
I<BLOCK>を実行し,追跡結果をリストで返します。
結果はリークした値のリファレンスファイル名行番号の三要素を持つ配列つまりC<< [$ref, $file, $line] >>のリストとなっています。
なおこの関数はPerl内部で使用する値を返す可能性があります。そのような内部用の値を変更するとPerl実行環境に致命的な影響を与える可能性があるので注意してください。また配列やハッシュの要素としてリファレンスではない配列やハッシュそれ自体が含まれる可能性があります。そのような値は通常Perlレベルで操作することができません。たとえばC<Data::Dumper>などで出力することはできません。
=head3 C<< leaked_refs { BLOCK } >>
I<BLOCK>を実行しリークしたSVのリファレンスのリストを返します。
C<< map{ $_->[0] } leaked_info{ BLOCK } >>と同じですが,より高速です。
=head3 C<< leaked_count { BLOCK } >>
I<BLOCK>を実行しリークしたSVのリファレンスの個数を返します。
C<leaked_info()>とC<leaked_refs()>もスカラコンテキストでは個数を返しますが,
C<leaked_count()>はコンテキストに依存しません。
=head3 C<< leaktrace { BLOCK } ?($mode | \&callback) >>
I<BLOCK>を実行しその中で起きたメモリリークをC<*STDERR>に報告します。
メモリリークの報告はI<$mode>で指定したモードに従います。
受け付けるI<$mode>は以下の通りです:
=over 4
=item -simple
デフォルトのモードです。リークしたSVの型とアドレスファイル名行番号を報告します。
=item -sv_dump
B<-simple>に加えてC<sv_dump()>でSVの中身をダンプします。
これはC<Devel::Peek::Dump()>の出力とほぼ同じです。
=item -lines
B<-simple>に加えて,リークしていると見られる行の周辺を出力します。
=item -verbose
B<-simple>とB<-sv_dump>とB<-lines>の全てを出力します。
=back
より細かな制御のためにコールバックを指定することもできます。
I<\&callback>はリークしたSV毎に呼び出されその引数はリークしたSVのリファレンスファイル名行番号の3つです。
=head3 C<< no_leaks_ok { BLOCK } ?$description >>
I<BLOCK>にメモリリークがないことテストします。
これはC<Test::Builder>ベースのテスト関数です。
なおI<BLOCK>は複数回実行されます。これは,初回の実行でキャッシュを用意する可能性を考慮するためです。
=head3 C<< leaks_cmp_ok { BLOCK } $cmp_op, $count, ?$description >>
I<BLOCK>のメモリリーク数と特定の数値を比較するテストを行います。
これはC<Test::Builder>ベースのテスト関数です。
なおI<BLOCK>は複数回実行されます。これは,初回の実行でキャッシュを用意する可能性を考慮するためです。
=head2 Script interface
C<Devel::LeakTrace>と同様にスクリプトのリーク追跡のためにC<Test::LeakTrace::Script>が提供されます。C<use Test::LeakTrace::Script>宣言の引数はC<leaktrace()>と同じです。
$ TEST_LEAKTRACE=-sv_dump perl -MTest::LeakTrace::Script script.pl
$ perl -MTest::LeakTrace::Script=-verbose script.pl
#!perl
# ...
use Test::LeakTrace::Script sub{
my($ref, $file, $line) = @_;
# ...
};
# ...
=head1 EXAMPLES
=head2 Testing modules
以下はモジュールのメモリリークをチェックするテストスクリプトのテンプレートです。
#!perl -w
use strict;
use constant HAS_LEAKTRACE => eval{ require Test::LeakTrace };
use Test::More HAS_LEAKTRACE ? (tests => 1) : (skip_all => 'require Test::LeakTrace');
use Test::LeakTrace;
use Some::Module;
leaks_cmp_ok{
my $o = Some::Module->new();
$o->something();
$o->something_else();
} '<', 1;
=head1 GUTS
C<Test::LeakTrace>はアリーナを走査します。アリーナとはPerlが作成するSVのためのメモリアロケーションシステムでありF<sv.c>で実装されています。
アリーナの走査にはF<sv.c>にあるC<S_visit()>のコードを元にしたマクロを用いています。
さてアリーナを走査すればメモリリークの検出そのものは簡単にできるように思えます。まずコードブロックを実行する前に一度アリーナを走査し全てのSVに「使用済み」の印を付けておきます。次にコードブロック実行後にもう一度アリーナを走査し使用済みの印がついていないSVがあればそれはコードブロック内で作成され開放されなかったSVだと考えます。あとはそれを報告するだけです。実際にはSVに対して使用済みの印を付けるスペースがないためインサイドアウト法を応用して外部のコンテナに使用済みの印を保存します。
これを仮にPerlコードで書くと以下のようになります。
my %used_sv;
foreach my $sv(@ARENA){
$used_sv{$sv}++;
}
$block->();
my @leaked
foreach my $sv(@ARENA){
if(not exists $used_sv{$sv}){
push @leaked, $sv;
}
}
say 'leaked count: ', scalar @leaked;
リークしたSVを得るだけならこの方法で十分です。実際C<leaked_refs()>とC<leaked_count()>はこのような方法でリークしたSVやその個数を調べています。
しかしリークしたSVのステートメントの情報つまりファイル名や行番号を得るためにはこれだけでは不十分です。Perl 5.10以降にはSVが作成されたときのステートメント情報を追跡する機能があるのですがこの機能を利用するためにはコンパイラオプションとしてにC<-DDEBUG_LEAKING_SCALARS>を与えてPerlをビルドしなければなりません。
そこでC<Test::LeakTrace>では拡張可能なC<PL_runops>を利用してPerl VMがOPコードを実行する1ステートメント毎にアリーナを走査しステートメント情報を記録します。これは1ステートメント毎にマークスイープのような処理を行うのに等しく非常に時間が掛かります。しかしPerlを特殊な条件の下でビルドする必要もなくバージョンに依存した機能もほとんど使用しないため多くの環境で動かすことができます。
またC<no_leaks_ok()>のようなテスト関数はまずC<leaked_count()>でリークしたSVの個数を得てから必要に応じてリークした位置を特定するためにC<leaktrace()>を実行するため,テストが成功する限りは時間の掛かる追跡処理はしません。
=head1 DEPENDENCIES
Perl 5.8.1 or later, and a C compiler.
=head1 CAVEATS
C<Test::LeakTrace>はC<Devel::Cover>と一緒に動かすことはできません。
したがってC<Devel::Cover>の元で動いていることが検出されると,テスト関数は何も行わずにテストをパスさせます。
=head1 BUGS
No bugs have been reported.
Please report any bugs or feature requests to the author.
=head1 SEE ALSO
L<Devel::LeakTrace>.
L<Devel::LeakTrace::Fast>.
L<Test::TraceObject>.
L<Test::Weak>.
For guts:
L<perlguts>.
L<perlhack>.
L<sv.c>.
=head1 AUTHOR
Goro Fuji E<lt>gfuji(at)cpan.orgE<gt>.
=head1 LICENSE AND COPYRIGHT
Copyright (c) 2009, Goro Fuji. Some rights reserved.
This library is free software; you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut

View File

@@ -0,0 +1,75 @@
package Test::LeakTrace::Script;
use strict;
use warnings;
use Test::LeakTrace ();
my $Mode = $ENV{TEST_LEAKTRACE};
sub import{
shift;
$Mode = shift if @_;
}
no warnings 'void';
INIT{
Test::LeakTrace::_start(1);
}
END{
$Mode = -simple unless defined $Mode;
Test::LeakTrace::_finish($Mode);
return;
}
1;
__END__
=head1 NAME
Test::LeakTrace::Script - A LeakTrace interface for whole scripts
=head1 SYNOPSIS
#!perl -w
use Test::LeakTrace::Script sub{
my($svref, $file, $line) = @_;
warn "leaked $svref from $file line $line.\n";
};
=head1 DESCRIPTION
This is a interface to C<Test::LeakTrace> for whole scripts.
=head1 INTERFACE
=head2 Command line interface
$ perl -MTest::LeakTrace::Script script.pl
$ perl -MTest::LeakTrace::Script=-verbose script.pl
$ TEST_LEAKTRACE=-lines script.pl
=head1 ENVIRONMENT VARIABLES
=head2 TEST_LEAKTRACE=mode
=head3 -simple (DEFAULT)
=head3 -sv_dump
=head3 -lines
=head3 -verbose
=head1 SEE ALSO
L<Test::LeakTrace>.
=cut

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,79 @@
package Test::More::UTF8;
use warnings;
use strict;
use Test::More ();
use Carp;
=head1 NAME
Test::More::UTF8 - Enhancing Test::More for UTF8-based projects
=cut
our $VERSION = '0.05';
=head1 SYNOPSIS
use Test::More;
use Test::More::UTF8;
# now we can easily use flagged strings without warnings like "Wide character in print ..."
is("\x{410}","\x{420}"); # got a failure message without warnings
=head1 LIMITATIONS
This module have reason only for perl 5.8 and higher
=head1 FEATURES
This module also switch on by default utf8 pragma. To disable this, add "-utf8" option
use Test::More::UTF8 qw(-utf8);
By default binmode ':utf8' will be done on all output handles: failure_output, todo_output, output. It is possible to choose only some of them
use Test::More::UTF8 qw(failure); # enable :utf8 only on failure_output
use Test::More::UTF8 qw(todo); # enable :utf8 only on todo_output
use Test::More::UTF8 qw(out); # enable :utf8 only on output
=cut
sub import {
my $pkg = shift;
my %args = map {$_ => 1} @_;
my ($do_utf8,@h) = (1);
push @h, 'failure_output' if delete $args{failure} or delete $args{failure_output};
push @h, 'todo_output' if delete $args{todo} or delete $args{todo_output};
push @h, 'output' if delete $args{out} or delete $args{output};
$do_utf8 = 0 if delete $args{-utf8} or delete $args{-utf};
%args and croak "Unsupported options to $pkg: ".join ', ', keys %args;
@h or @h = qw(failure_output todo_output output);
binmode Test::More->builder->$_, ':utf8' for @h;
if ($do_utf8) {
require utf8;
@_ = ('utf8');
goto &utf8::import;
}
return;
}
=head1 AUTHOR
Mons Anderson, <mons@cpan.org>
=head1 BUGS
None known
=head1 COPYRIGHT & LICENSE
Copyright 2009 Mons Anderson, all rights reserved.
This program is free software; you can redistribute it and/or modify it
under the same terms as Perl itself.
=cut
1; # End of Test::More::UTF8

View File

@@ -0,0 +1,334 @@
package Test::Needs;
use strict;
use warnings;
no warnings 'once';
our $VERSION = '0.002006';
$VERSION =~ tr/_//d;
BEGIN {
*_WORK_AROUND_HINT_LEAKAGE
= "$]" < 5.011 && !("$]" >= 5.009004 && "$]" < 5.010001)
? sub(){1} : sub(){0};
*_WORK_AROUND_BROKEN_MODULE_STATE
= "$]" < 5.009
? sub(){1} : sub(){0};
}
our @EXPORT = qw(test_needs);
sub _try_require {
local %^H
if _WORK_AROUND_HINT_LEAKAGE;
my ($module) = @_;
(my $file = "$module.pm") =~ s{::|'}{/}g;
my $err;
{
local $@;
eval { require $file }
or $err = $@;
}
if (defined $err) {
delete $INC{$file}
if _WORK_AROUND_BROKEN_MODULE_STATE;
die $err
unless $err =~ /\ACan't locate \Q$file\E/;
return !1;
}
!0;
}
sub _croak {
my $message = join '', @_;
my $i = 1;
while (my ($p, $f, $l) = caller($i++)) {
next
if $p->isa(__PACKAGE__);
die "$message at $f line $l.\n";
}
die $message;
}
sub _try_version {
my ($module, $version) = @_;
local $@;
!!eval { $module->VERSION($version); 1 };
}
sub _numify_version {
for ($_[0]) {
return
!$_ ? 0
: /^[0-9]+\.[0-9]+$/ ? sprintf('%.6f', $_)
: /^v?([0-9]+(?:\.[0-9]+)+)$/
? sprintf('%d.%03d%03d', ((split /\./, $1), 0, 0)[0..2])
: /^(\x05)(.*)$/s
? sprintf('%d.%03d%03d', map ord, $1, split //, $2)
: _croak qq{version "$_" does not look like a number};
}
}
sub _find_missing {
my @bad = map {
my ($module, $version) = @$_;
$module eq 'perl' ? do {
$version = _numify_version($version);
"$]" < $version ? (sprintf "perl %s (have %.6f)", $version, $]) : ()
}
: $module =~ /^\d|[^\w:]|:::|[^:]:[^:]|^:|:$/
? _croak sprintf qq{"%s" does not look like a module name}, $module
: _try_require($module) ? (
defined $version && !_try_version($module, $version)
? "$module $version (have ".(defined $module->VERSION ? $module->VERSION : 'undef').')'
: ()
)
: $version ? "$module $version"
: $module;
}
_pairs(@_);
@bad ? "Need " . join(', ', @bad) : undef;
}
sub import {
my $class = shift;
my $target = caller;
if (@_) {
local $Test::Builder::Level = ($Test::Builder::Level||0) + 1;
test_needs(@_);
}
no strict 'refs';
*{"${target}::$_"} = \&{"${class}::$_"}
for @{"${class}::EXPORT"};
}
sub test_needs {
my $missing = _find_missing(@_);
local $Test::Builder::Level = ($Test::Builder::Level||0) + 1;
if ($missing) {
if ($ENV{RELEASE_TESTING}) {
_fail("$missing due to RELEASE_TESTING");
}
else {
_skip($missing);
}
}
}
sub _skip { _fail_or_skip($_[0], 0) }
sub _fail { _fail_or_skip($_[0], 1) }
sub _pairs {
map +(
ref eq 'HASH' ? do {
my $arg = $_;
map [ $_ => $arg->{$_} ], sort keys %$arg;
}
: ref eq 'ARRAY' ? do {
my $arg = $_;
map [ @{$arg}[$_*2,$_*2+1] ], 0 .. int($#$arg / 2);
}
: [ $_ ]
), @_;
}
sub _fail_or_skip {
my ($message, $fail) = @_;
if ($INC{'Test2/API.pm'}) {
my $ctx = Test2::API::context();
my $hub = $ctx->hub;
if ($fail) {
$ctx->ok(0, "Test::Needs modules available", [$message]);
}
else {
my $plan = $hub->plan;
my $tests = $hub->count;
if ($plan || $tests) {
my $skips
= $plan && $plan ne 'NO PLAN' ? $plan - $tests : 1;
$ctx->skip("Test::Needs modules not available") for 1 .. $skips;
$ctx->note($message);
}
else {
$ctx->plan(0, 'SKIP', $message);
}
}
$ctx->done_testing;
$ctx->release if $Test2::API::VERSION < 1.302053;
$ctx->send_event('+'._t2_terminate_event());
}
elsif ($INC{'Test/Builder.pm'}) {
my $tb = Test::Builder->new;
my $has_plan = Test::Builder->can('has_plan') ? 'has_plan'
: sub { $_[0]->expected_tests || eval { $_[0]->current_test($_[0]->current_test); 'no_plan' } };
if ($fail) {
$tb->plan(tests => 1)
unless $tb->$has_plan;
$tb->ok(0, "Test::Needs modules available");
$tb->diag($message);
}
else {
my $plan = $tb->$has_plan;
my $tests = $tb->current_test;
if ($plan || $tests) {
my $skips
= $plan && $plan ne 'no_plan' ? $plan - $tests : 1;
$tb->skip("Test::Needs modules not available")
for 1 .. $skips;
Test::Builer->can('note') ? $tb->note($message) : print "# $message\n";
}
else {
$tb->skip_all($message);
}
}
$tb->done_testing
if Test::Builder->can('done_testing');
die bless {} => 'Test::Builder::Exception'
if Test::Builder->can('parent') && $tb->parent;
}
else {
if ($fail) {
print "1..1\n";
print "not ok 1 - Test::Needs modules available\n";
print STDERR "# $message\n";
exit 1;
}
else {
print "1..0 # SKIP $message\n";
}
}
exit 0;
}
my $terminate_event;
sub _t2_terminate_event () {
return $terminate_event
if $terminate_event;
local $@;
my $file = __FILE__;
$terminate_event = eval <<"END_CODE" or die "$@";
package # hide
Test::Needs::Event::Terminate;
use Test2::Event ();
our \@ISA = qw(Test2::Event);
sub no_display { 1 }
sub terminate { 0 }
\$INC{'Test/Needs/Event/Terminate.pm'} = \$file;
__PACKAGE__;
END_CODE
}
1;
__END__
=pod
=encoding utf-8
=head1 NAME
Test::Needs - Skip tests when modules not available
=head1 SYNOPSIS
# need one module
use Test::Needs 'Some::Module';
# need multiple modules
use Test::Needs 'Some::Module', 'Some::Other::Module';
# need a given version of a module
use Test::Needs {
'Some::Module' => '1.005',
};
# check later
use Test::Needs;
test_needs 'Some::Module';
# skips remainder of subtest
use Test::More;
use Test::Needs;
subtest 'my subtest' => sub {
test_needs 'Some::Module';
...
};
# check perl version
use Test::Needs { perl => 5.020 };
=head1 DESCRIPTION
Skip test scripts if modules are not available. The requested modules will be
loaded, and optionally have their versions checked. If the module is missing,
the test script will be skipped. Modules that are found but fail to compile
will exit with an error rather than skip.
If used in a subtest, the remainder of the subtest will be skipped.
Skipping will work even if some tests have already been run, or if a plan has
been declared.
Versions are checked via a C<< $module->VERSION($wanted_version) >> call.
Versions must be provided in a format that will be accepted. No extra
processing is done on them.
If C<perl> is used as a module, the version is checked against the running perl
version (L<$]|perlvar/$]>). The version can be specified as a number,
dotted-decimal string, v-string, or version object.
If the C<RELEASE_TESTING> environment variable is set, the tests will fail
rather than skip. Subtests will be aborted, but the test script will continue
running after that point.
=head1 EXPORTS
=head2 test_needs
Has the same interface as when using Test::Needs in a C<use>.
=head1 SEE ALSO
=over 4
=item L<Test::Requires>
A similar module, with some important differences. L<Test::Requires> will act
as a C<use> statement (despite its name), calling the import sub. Under
C<RELEASE_TESTING>, it will BAIL_OUT if a module fails to load rather than
using a normal test fail. It also doesn't distinguish between missing modules
and broken modules.
=item L<Test2::Require::Module>
Part of the L<Test2> ecosystem. Only supports running as a C<use> command to
skip an entire plan.
=item L<Test2::Require::Perl>
Part of the L<Test2> ecosystem. Only supports running as a C<use> command to
skip an entire plan. Checks perl versions.
=item L<Test::If>
Acts as a C<use> statement. Only supports running as a C<use> command to skip
an entire plan. Can skip based on subref results.
=back
=head1 AUTHORS
haarg - Graham Knop (cpan:HAARG) <haarg@haarg.org>
=head1 CONTRIBUTORS
None so far.
=head1 COPYRIGHT AND LICENSE
Copyright (c) 2016 the Test::Needs L</AUTHORS> and L</CONTRIBUTORS>
as listed above.
This library is free software and may be distributed under the same terms
as perl itself. See L<http://dev.perl.org/licenses/>.
=cut

View File

@@ -0,0 +1,336 @@
package Test::NoWarnings;
use 5.006;
use strict;
use warnings;
use Carp ();
use Exporter ();
use Test::Builder ();
use Test::NoWarnings::Warning ();
use vars qw( $VERSION @EXPORT_OK @ISA $do_end_test );
BEGIN {
$VERSION = '1.04';
@ISA = 'Exporter';
@EXPORT_OK = qw(
clear_warnings
had_no_warnings
warnings
);
# Do we add the warning test at the end?
$do_end_test = 0;
}
my $TEST = Test::Builder->new;
my $PID = $$;
my @WARNINGS = ();
my $EARLY = 0;
$SIG{__WARN__} = make_catcher(\@WARNINGS);
sub import {
$do_end_test = 1;
if ( grep { $_ eq ':early' } @_ ) {
@_ = grep { $_ ne ':early' } @_;
$EARLY = 1;
}
goto &Exporter::import;
}
# the END block must be after the "use Test::Builder" to make sure it runs
# before Test::Builder's end block
# only run the test if there have been other tests
END {
had_no_warnings() if $do_end_test;
}
sub make_warning {
local $SIG{__WARN__};
my $msg = shift;
my $warning = Test::NoWarnings::Warning->new;
$warning->setMessage($msg);
$warning->fillTest($TEST);
$warning->fillTrace(__PACKAGE__);
$Carp::Internal{__PACKAGE__.""}++;
local $Carp::CarpLevel = $Carp::CarpLevel + 1;
$warning->fillCarp($msg);
$Carp::Internal{__PACKAGE__.""}--;
return $warning;
}
# this make a subroutine which can be used in $SIG{__WARN__}
# it takes one argument, a ref to an array
# it will push the details of the warning onto the end of the array.
sub make_catcher {
my $array = shift;
return sub {
my $msg = shift;
# Generate the warning
$Carp::Internal{__PACKAGE__.""}++;
push(@$array, make_warning($msg));
$Carp::Internal{__PACKAGE__.""}--;
# Show the diag early rather than at the end
if ( $EARLY ) {
$TEST->diag( $array->[-1]->toString );
}
return $msg;
};
}
sub had_no_warnings {
return 0 if $$ != $PID;
local $SIG{__WARN__};
my $name = shift || "no warnings";
my $ok;
my $diag;
if ( @WARNINGS == 0 ) {
$ok = 1;
} else {
$ok = 0;
$diag = "There were " . scalar(@WARNINGS) . " warning(s)\n";
unless ( $EARLY ) {
$diag .= join "----------\n", map { $_->toString } @WARNINGS;
}
}
$TEST->ok($ok, $name) || $TEST->diag($diag);
return $ok;
}
sub clear_warnings {
local $SIG{__WARN__};
@WARNINGS = ();
}
sub warnings {
local $SIG{__WARN__};
return @WARNINGS;
}
sub builder {
local $SIG{__WARN__};
if ( @_ ) {
$TEST = shift;
}
return $TEST;
}
1;
__END__
=pod
=head1 NAME
Test::NoWarnings - Make sure you didn't emit any warnings while testing
=head1 SYNOPSIS
For scripts that have no plan
use Test::NoWarnings;
that's it, you don't need to do anything else
For scripts that look like
use Test::More tests => x;
change to
use Test::More tests => x + 1;
use Test::NoWarnings;
=head1 DESCRIPTION
In general, your tests shouldn't produce warnings. This modules causes any
warnings to be captured and stored. It automatically adds an extra test that
will run when your script ends to check that there were no warnings. If
there were any warings, the test will give a "not ok" and diagnostics of
where, when and what the warning was, including a stack trace of what was
going on when the it occurred.
If some of your tests B<are supposed to> produce warnings then you should be
capturing and checking them with L<Test::Warn>, that way L<Test::NoWarnings>
will not see them and so not complain.
The test is run by an C<END> block in Test::NoWarnings. It will not be run
when any forked children exit.
=head1 USAGE
Simply by using the module, you automatically get an extra test at the end
of your script that checks that no warnings were emitted. So just stick
use Test::NoWarnings;
at the top of your script and continue as normal.
If you want more control you can invoke the test manually at any time with
C<had_no_warnings>.
The warnings your test has generated so far are stored in an array. You can
look inside and clear this whenever you want with C<warnings()> and
C<clear_warnings>, however, if you are doing this sort of thing then you
probably want to use L<Test::Warn> in combination with L<Test::NoWarnings>.
=head2 use vs require
You will almost always want to do
use Test::NoWarnings
If you do a C<require> rather than a C<use>, then there will be no automatic
test at the end of your script.
=head2 Output
If warning is captured during your test then the details will output as part
of the diagnostics. You will get:
=over 2
=item o
the number and name of the test that was executed just before the warning
(if no test had been executed these will be 0 and '')
=item o
the message passed to C<warn>,
=item o
a full dump of the stack when warn was called, courtesy of the C<Carp>
module
=back
By default, all warning messages will be emitted in one block at the end of
your test script.
=head2 The :early pragma
One common complaint from people using Test::NoWarnings is that all of the
warnings are emitted in one go at the end. While this is the safest and
most correct time to emit these diagnostics, it can make debugging these
warnings difficult.
As of Test::NoWarnings 1.04 you can provide an experimental C<:early> pragma
when loading the module to force warnings to be thrown via diag at the time
that they actually occur.
use Test::NoWarnings ':early';
As this will cause the diag to be emitted against the previous test and not
the one in which the warning actually occurred it is recommended that the
pragma be turned on only for debugging and left off when not needed.
=head1 FUNCTIONS
=head2 had_no_warnings
This checks that there have been warnings emitted by your test scripts.
Usually you will not call this explicitly as it is called automatically when
your script finishes.
=head2 clear_warnings
This will clear the array of warnings that have been captured. If the array
is empty then a call to C<had_no_warnings()> will produce a pass result.
=head2 warnings
This will return the array of warnings captured so far. Each element of this
array is an object containing information about the warning. The following
methods are available on these object.
=over 2
=item *
$warn-E<gt>getMessage
Get the message that would been printed by the warning.
=item *
$warn-E<gt>getCarp
Get a stack trace of what was going on when the warning happened, this stack
trace is just a string generated by the L<Carp> module.
=item *
$warn-E<gt>getTrace
Get a stack trace object generated by the L<Devel::StackTrace> module. This
will return undef if L<Devel::StackTrace> is not installed.
=item *
$warn-E<gt>getTest
Get the number of the test that executed before the warning was emitted.
=item *
$warn-E<gt>getTestName
Get the name of the test that executed before the warning was emitted.
=back
=head1 PITFALLS
When counting your tests for the plan, don't forget to include the test that
runs automatically when your script ends.
=head1 SUPPORT
Bugs should be reported via the CPAN bug tracker at
L<http://rt.cpan.org/NoAuth/ReportBug.html?Queue=Test-NoWarnings>
For other issues, contact the author.
=head1 HISTORY
This was previously known as L<Test::Warn::None>
=head1 SEE ALSO
L<Test::Builder>, L<Test::Warn>
=head1 AUTHORS
Fergal Daly E<lt>fergal@esatclear.ieE<gt>
Adam Kennedy E<lt>adamk@cpan.orgE<gt>
=head1 COPYRIGHT
Copyright 2003 - 2007 Fergal Daly.
Some parts copyright 2010 - 2011 Adam Kennedy.
This program is free software and comes with no warranty. It is distributed
under the LGPL license
See the file F<LGPL> included in this distribution or
F<http://www.fsf.org/licenses/licenses.html>.
=cut

View File

@@ -0,0 +1,78 @@
package Test::NoWarnings::Warning;
use 5.006;
use strict;
use Carp ();
use vars qw{$VERSION};
BEGIN {
$VERSION = '1.04';
# Optional stacktrace support
eval "require Devel::StackTrace";
}
sub new {
my $class = shift;
bless { @_ }, $class;
}
sub getTrace {
$_[0]->{Trace};
}
sub fillTrace {
my $self = shift;
$self->{Trace} = Devel::StackTrace->new(
ignore_class => [__PACKAGE__, @_],
) if $Devel::StackTrace::VERSION;
}
sub getCarp {
$_[0]->{Carp};
}
sub fillCarp {
my $self = shift;
my $msg = shift;
$Carp::Internal{ __PACKAGE__ . "" }++;
local $Carp::CarpLevel = $Carp::CarpLevel + 1;
$self->{Carp} = Carp::longmess($msg);
$Carp::Internal{ __PACKAGE__ . "" }--;
}
sub getMessage {
$_[0]->{Message};
}
sub setMessage {
$_[0]->{Message} = $_[1];
}
sub fillTest {
my $self = shift;
my $builder = shift;
my $prev_test = $builder->current_test;
$self->{Test} = $prev_test;
my @tests = $builder->details;
my $prev_test_name = $prev_test ? $tests[$prev_test - 1]->{name} : "";
$self->{TestName} = $prev_test_name;
}
sub getTest {
$_[0]->{Test};
}
sub getTestName {
$_[0]->{TestName};
}
sub toString {
my $self = shift;
return <<EOM;
Previous test $self->{Test} '$self->{TestName}'
$self->{Carp}
EOM
}
1;

View File

@@ -0,0 +1,309 @@
package Test::Pod;
use strict;
use warnings;
=head1 NAME
Test::Pod - check for POD errors in files
=head1 VERSION
Version 1.52
=cut
our $VERSION = '1.52';
=head1 SYNOPSIS
C<Test::Pod> lets you check the validity of a POD file, and report
its results in standard C<Test::Simple> fashion.
use Test::Pod tests => $num_tests;
pod_file_ok( $file, "Valid POD file" );
Module authors can include the following in a F<t/pod.t> file and
have C<Test::Pod> automatically find and check all POD files in a
module distribution:
use Test::More;
eval "use Test::Pod 1.00";
plan skip_all => "Test::Pod 1.00 required for testing POD" if $@;
all_pod_files_ok();
You can also specify a list of files to check, using the
C<all_pod_files()> function supplied:
use strict;
use Test::More;
eval "use Test::Pod 1.00";
plan skip_all => "Test::Pod 1.00 required for testing POD" if $@;
my @poddirs = qw( blib script );
all_pod_files_ok( all_pod_files( @poddirs ) );
Or even (if you're running under L<Apache::Test>):
use strict;
use Test::More;
eval "use Test::Pod 1.00";
plan skip_all => "Test::Pod 1.00 required for testing POD" if $@;
my @poddirs = qw( blib script );
use File::Spec::Functions qw( catdir updir );
all_pod_files_ok(
all_pod_files( map { catdir updir, $_ } @poddirs )
);
=head1 DESCRIPTION
Check POD files for errors or warnings in a test file, using
C<Pod::Simple> to do the heavy lifting.
=cut
use Test::Builder;
use Pod::Simple;
our %ignore_dirs = (
'.bzr' => 'Bazaar',
'.git' => 'Git',
'.hg' => 'Mercurial',
'.pc' => 'quilt',
'.svn' => 'Subversion',
CVS => 'CVS',
RCS => 'RCS',
SCCS => 'SCCS',
_darcs => 'darcs',
_sgbak => 'Vault/Fortress',
);
my $Test = Test::Builder->new;
sub import {
my $self = shift;
my $caller = caller;
for my $func ( qw( pod_file_ok all_pod_files all_pod_files_ok ) ) {
no strict 'refs';
*{$caller."::".$func} = \&$func;
}
$Test->exported_to($caller);
$Test->plan(@_);
}
sub _additional_test_pod_specific_checks {
my ($ok, $errata, $file) = @_;
return $ok;
}
=head1 FUNCTIONS
=head2 pod_file_ok( FILENAME[, TESTNAME ] )
C<pod_file_ok()> will okay the test if the POD parses correctly. Certain
conditions are not reported yet, such as a file with no pod in it at all.
When it fails, C<pod_file_ok()> will show any pod checking errors as
diagnostics.
The optional second argument TESTNAME is the name of the test. If it
is omitted, C<pod_file_ok()> chooses a default test name "POD test
for FILENAME".
=cut
sub pod_file_ok {
my $file = shift;
my $name = @_ ? shift : "POD test for $file";
if ( !-f $file ) {
$Test->ok( 0, $name );
$Test->diag( "$file does not exist" );
return;
}
my $checker = Pod::Simple->new;
$checker->output_string( \my $trash ); # Ignore any output
$checker->parse_file( $file );
my $ok = !$checker->any_errata_seen;
$ok = _additional_test_pod_specific_checks( $ok, ($checker->{errata}||={}), $file );
$name .= ' (no pod)' if !$checker->content_seen;
$Test->ok( $ok, $name );
if ( !$ok ) {
my $lines = $checker->{errata};
for my $line ( sort { $a<=>$b } keys %$lines ) {
my $errors = $lines->{$line};
$Test->diag( "$file ($line): $_" ) for @$errors;
}
}
return $ok;
} # pod_file_ok
=head2 all_pod_files_ok( [@entries] )
Checks all the files under C<@entries> for valid POD. It runs
L<all_pod_files()> on directories and assumes everything else to be a file to
be tested. It calls the C<plan()> function for you (one test for each file),
so you can't have already called C<plan>.
If C<@entries> is empty or not passed, the function finds all POD files in
files in the F<blib> directory if it exists, or the F<lib> directory if not.
A POD file matches the conditions specified below in L</all_pod_files>.
If you're testing a module, just make a F<t/pod.t>:
use Test::More;
eval "use Test::Pod 1.00";
plan skip_all => "Test::Pod 1.00 required for testing POD" if $@;
all_pod_files_ok();
Returns true if all pod files are ok, or false if any fail.
=cut
sub all_pod_files_ok {
my @args = @_ ? @_ : _starting_points();
my @files = map { -d $_ ? all_pod_files($_) : $_ } @args;
unless (@files) {
$Test->skip_all( "No files found in (@args)\n" );
return 1;
}
$Test->plan( tests => scalar @files );
my $ok = 1;
foreach my $file ( @files ) {
pod_file_ok( $file ) or undef $ok;
}
return $ok;
}
=head2 all_pod_files( [@dirs] )
X<all_pod_files>
Returns a list of all the POD files in I<@dirs> and in directories below. If
no directories are passed, it defaults to F<blib> if F<blib> exists, or else
F<lib> if not. Skips any files in F<CVS>, F<.svn>, F<.git> and similar
directories. See C<%Test::Pod::ignore_dirs> for a list of them.
A POD file is:
=over 4
=item * Any file that ends in F<.pl>, F<.PL>, F<.pm>, F<.pod>, F<.psgi> or F<.t>.
=item * Any file that has a first line with a shebang and "perl" on it.
=item * Any file that ends in F<.bat> and has a first line with "--*-Perl-*--" on it.
=back
The order of the files returned is machine-dependent. If you want them
sorted, you'll have to sort them yourself.
=cut
sub all_pod_files {
my @pod;
require File::Find;
File::Find::find({
preprocess => sub { grep {
!exists $ignore_dirs{$_}
|| !-d File::Spec->catfile($File::Find::dir, $_)
} @_ },
wanted => sub { -f $_ && _is_perl($_) && push @pod, $File::Find::name },
no_chdir => 1,
}, @_ ? @_ : _starting_points());
return @pod;
}
sub _starting_points {
return 'blib' if -e 'blib';
return 'lib';
}
sub _is_perl {
my $file = shift;
# accept as a Perl file everything that ends with a well known Perl suffix ...
return 1 if $file =~ /[.](?:PL|p(?:[lm]|od|sgi)|t)$/;
open my $fh, '<', $file or return;
my $first = <$fh>;
close $fh;
return unless $first;
# ... or that has a she-bang as first line ...
return 1 if $first =~ /^#!.*perl/;
# ... or that is a .bat ad has a Perl comment line first
return 1 if $file =~ /[.]bat$/i && $first =~ /--[*]-Perl-[*]--/;
return;
}
=head1 SUPPORT
This module is managed in an open L<GitHub
repository|http://github.com/perl-pod/test-pod/>. Feel free to fork and
contribute, or to clone L<git://github.com/perl-pod/test-pod.git> and send
patches!
Found a bug? Please L<post|http://github.com/perl-pod/test-pod/issues> or
L<email|mailto:bug-test-pod@rt.cpan.org> a report!
=head1 AUTHORS
=over
=item David E. Wheeler <david@justatheory.com>
Current maintainer.
=item Andy Lester C<< <andy at petdance.com> >>
Maintainer emeritus.
=item brian d foy
Original author.
=back
=head1 ACKNOWLEDGEMENTS
Thanks brian d foy for the original code, and to these folks for contributions:
=over
=item * Andy Lester
=item * David E. Wheeler
=item * Paul Miller
=item * Peter Edwards
=item * Luca Ferrari
=back
=head1 COPYRIGHT AND LICENSE
Copyright 2006-2010, Andy Lester; 2010-2015 David E. Wheeler. Some Rights
Reserved.
This module is free software; you can redistribute it and/or modify it under
the same terms as Perl itself.
=cut
1;

View File

@@ -0,0 +1,318 @@
package Test::Pod::Coverage;
=head1 NAME
Test::Pod::Coverage - Check for pod coverage in your distribution.
=head1 VERSION
Version 1.10
=cut
our $VERSION = "1.10";
=head1 SYNOPSIS
In one of your dist's test files (eg C<t/pod-coverage.t>):
use Test::Pod::Coverage tests=>1;
pod_coverage_ok( "Foo::Bar", "Foo::Bar is covered" );
=head1 DESCRIPTION
Test::Pod::Coverage is used to create a test for your distribution,
to ensure that all relevant files in your distribution are appropriately
documented in pod.
Can also be called with L<Pod::Coverage> parms.
use Test::Pod::Coverage tests=>1;
pod_coverage_ok(
"Foo::Bar",
{ also_private => [ qr/^[A-Z_]+$/ ], },
"Foo::Bar, with all-caps functions as privates",
);
The L<Pod::Coverage> parms are also useful for subclasses that don't
re-document the parent class's methods. Here's an example from
L<Mail::SRS>.
pod_coverage_ok( "Mail::SRS" ); # No exceptions
# Define the three overridden methods.
my $trustme = { trustme => [qr/^(new|parse|compile)$/] };
pod_coverage_ok( "Mail::SRS::DB", $trustme );
pod_coverage_ok( "Mail::SRS::Guarded", $trustme );
pod_coverage_ok( "Mail::SRS::Reversable", $trustme );
pod_coverage_ok( "Mail::SRS::Shortcut", $trustme );
Alternately, you could use L<Pod::Coverage::CountParents>, which always allows
a subclass to reimplement its parents' methods without redocumenting them. For
example:
my $trustparents = { coverage_class => 'Pod::Coverage::CountParents' };
pod_coverage_ok( "IO::Handle::Frayed", $trustparents );
(The C<coverage_class> parameter is not passed to the coverage class with other
parameters.)
If you want POD coverage for your module, but don't want to make
Test::Pod::Coverage a prerequisite for installing, create the following
as your F<t/pod-coverage.t> file:
use Test::More;
eval "use Test::Pod::Coverage";
plan skip_all => "Test::Pod::Coverage required for testing pod coverage" if $@;
plan tests => 1;
pod_coverage_ok( "Pod::Master::Html");
Finally, Module authors can include the following in a F<t/pod-coverage.t>
file and have C<Test::Pod::Coverage> automatically find and check all
modules in the module distribution:
use Test::More;
eval "use Test::Pod::Coverage 1.00";
plan skip_all => "Test::Pod::Coverage 1.00 required for testing POD coverage" if $@;
all_pod_coverage_ok();
=cut
use 5.006;
use strict;
use warnings;
use Pod::Coverage;
use Test::Builder;
my $Test = Test::Builder->new;
sub import {
my $self = shift;
my $caller = caller;
no strict 'refs';
*{$caller.'::pod_coverage_ok'} = \&pod_coverage_ok;
*{$caller.'::all_pod_coverage_ok'} = \&all_pod_coverage_ok;
*{$caller.'::all_modules'} = \&all_modules;
$Test->exported_to($caller);
$Test->plan(@_);
}
=head1 FUNCTIONS
All functions listed below are exported to the calling namespace.
=head2 all_pod_coverage_ok( [$parms, ] $msg )
Checks that the POD code in all modules in the distro have proper POD
coverage.
If the I<$parms> hashref if passed in, they're passed into the
C<Pod::Coverage> object that the function uses. Check the
L<Pod::Coverage> manual for what those can be.
The exception is the C<coverage_class> parameter, which specifies a class to
use for coverage testing. It defaults to C<Pod::Coverage>.
=cut
sub all_pod_coverage_ok {
my $parms = (@_ && (ref $_[0] eq "HASH")) ? shift : {};
my $msg = shift;
my $ok = 1;
my @modules = all_modules();
if ( @modules ) {
$Test->plan( tests => scalar @modules );
for my $module ( @modules ) {
my $thismsg = defined $msg ? $msg : "Pod coverage on $module";
my $thisok = pod_coverage_ok( $module, $parms, $thismsg );
$ok = 0 unless $thisok;
}
}
else {
$Test->plan( tests => 1 );
$Test->ok( 1, "No modules found." );
}
return $ok;
}
=head2 pod_coverage_ok( $module, [$parms, ] $msg )
Checks that the POD code in I<$module> has proper POD coverage.
If the I<$parms> hashref if passed in, they're passed into the
C<Pod::Coverage> object that the function uses. Check the
L<Pod::Coverage> manual for what those can be.
The exception is the C<coverage_class> parameter, which specifies a class to
use for coverage testing. It defaults to C<Pod::Coverage>.
=cut
sub pod_coverage_ok {
my $module = shift;
my %parms = (@_ && (ref $_[0] eq "HASH")) ? %{(shift)} : ();
my $msg = @_ ? shift : "Pod coverage on $module";
my $pc_class = (delete $parms{coverage_class}) || 'Pod::Coverage';
eval "require $pc_class" or die $@;
my $pc = $pc_class->new( package => $module, %parms );
my $rating = $pc->coverage;
my $ok;
if ( defined $rating ) {
$ok = ($rating == 1);
$Test->ok( $ok, $msg );
if ( !$ok ) {
my @nakies = sort $pc->naked;
my $s = @nakies == 1 ? "" : "s";
$Test->diag(
sprintf( "Coverage for %s is %3.1f%%, with %d naked subroutine$s:",
$module, $rating*100, scalar @nakies ) );
$Test->diag( "\t$_" ) for @nakies;
}
}
else { # No symbols
my $why = $pc->why_unrated;
my $nopublics = ( $why =~ "no public symbols defined" );
my $verbose = $ENV{HARNESS_VERBOSE} || 0;
$ok = $nopublics;
$Test->ok( $ok, $msg );
$Test->diag( "$module: $why" ) unless ( $nopublics && !$verbose );
}
return $ok;
}
=head2 all_modules( [@dirs] )
Returns a list of all modules in I<$dir> and in directories below. If
no directories are passed, it defaults to F<blib> if F<blib> exists,
or F<lib> if not.
Note that the modules are as "Foo::Bar", not "Foo/Bar.pm".
The order of the files returned is machine-dependent. If you want them
sorted, you'll have to sort them yourself.
=cut
sub all_modules {
my @starters = @_ ? @_ : _starting_points();
my %starters = map {$_,1} @starters;
my @queue = @starters;
my @modules;
while ( @queue ) {
my $file = shift @queue;
if ( -d $file ) {
local *DH;
opendir DH, $file or next;
my @newfiles = readdir DH;
closedir DH;
@newfiles = File::Spec->no_upwards( @newfiles );
@newfiles = grep { $_ ne "CVS" && $_ ne ".svn" } @newfiles;
push @queue, map "$file/$_", @newfiles;
}
if ( -f $file ) {
next unless $file =~ /\.pm$/;
my @parts = File::Spec->splitdir( $file );
shift @parts if @parts && exists $starters{$parts[0]};
shift @parts if @parts && $parts[0] eq "lib";
$parts[-1] =~ s/\.pm$// if @parts;
# Untaint the parts
for ( @parts ) {
if ( /^([a-zA-Z0-9_\.\-]+)$/ && ($_ eq $1) ) {
$_ = $1; # Untaint the original
}
else {
die qq{Invalid and untaintable filename "$file"!};
}
}
my $module = join( "::", @parts );
push( @modules, $module );
}
} # while
return @modules;
}
sub _starting_points {
return 'blib' if -e 'blib';
return 'lib';
}
=head1 BUGS
Please report any bugs or feature requests to
C<bug-test-pod-coverage at rt.cpan.org>, or through the web interface at
L<http://rt.cpan.org/NoAuth/ReportBug.html?Queue=Test-Pod-Coverage>.
I will be notified, and then you'll automatically be notified of progress on
your bug as I make changes.
=head1 SUPPORT
You can find documentation for this module with the perldoc command.
perldoc Test::Pod::Coverage
You can also look for information at:
=over 4
=item * AnnoCPAN: Annotated CPAN documentation
L<http://annocpan.org/dist/Test-Pod-Coverage>
=item * CPAN Ratings
L<http://cpanratings.perl.org/d/Test-Pod-Coverage>
=item * RT: CPAN's request tracker
L<http://rt.cpan.org/NoAuth/Bugs.html?Dist=Test-Pod-Coverage>
=item * Search CPAN
L<http://search.cpan.org/dist/Test-Pod-Coverage>
=back
=head1 REPOSITORY
L<https://github.com/neilbowers/Test-Pod-Coverage>
=head1 AUTHOR
Written by Andy Lester, C<< <andy at petdance.com> >>.
=head1 ACKNOWLEDGEMENTS
Thanks to Ricardo Signes for patches, and Richard Clamp for
writing Pod::Coverage.
=head1 COPYRIGHT & LICENSE
Copyright 2006, Andy Lester, All Rights Reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the Artistic License version 2.0.
See http://dev.perl.org/licenses/ for more information
=cut
1;

View File

@@ -0,0 +1,220 @@
package Test::Simple;
use 5.006;
use strict;
our $VERSION = '1.302183';
use Test::Builder::Module;
our @ISA = qw(Test::Builder::Module);
our @EXPORT = qw(ok);
my $CLASS = __PACKAGE__;
=head1 NAME
Test::Simple - Basic utilities for writing tests.
=head1 SYNOPSIS
use Test::Simple tests => 1;
ok( $foo eq $bar, 'foo is bar' );
=head1 DESCRIPTION
** If you are unfamiliar with testing B<read L<Test::Tutorial> first!> **
This is an extremely simple, extremely basic module for writing tests
suitable for CPAN modules and other pursuits. If you wish to do more
complicated testing, use the Test::More module (a drop-in replacement
for this one).
The basic unit of Perl testing is the ok. For each thing you want to
test your program will print out an "ok" or "not ok" to indicate pass
or fail. You do this with the C<ok()> function (see below).
The only other constraint is you must pre-declare how many tests you
plan to run. This is in case something goes horribly wrong during the
test and your test program aborts, or skips a test or whatever. You
do this like so:
use Test::Simple tests => 23;
You must have a plan.
=over 4
=item B<ok>
ok( $foo eq $bar, $name );
ok( $foo eq $bar );
C<ok()> is given an expression (in this case C<$foo eq $bar>). If it's
true, the test passed. If it's false, it didn't. That's about it.
C<ok()> prints out either "ok" or "not ok" along with a test number (it
keeps track of that for you).
# This produces "ok 1 - Hell not yet frozen over" (or not ok)
ok( get_temperature($hell) > 0, 'Hell not yet frozen over' );
If you provide a $name, that will be printed along with the "ok/not
ok" to make it easier to find your test when if fails (just search for
the name). It also makes it easier for the next guy to understand
what your test is for. It's highly recommended you use test names.
All tests are run in scalar context. So this:
ok( @stuff, 'I have some stuff' );
will do what you mean (fail if stuff is empty)
=cut
sub ok ($;$) { ## no critic (Subroutines::ProhibitSubroutinePrototypes)
return $CLASS->builder->ok(@_);
}
=back
Test::Simple will start by printing number of tests run in the form
"1..M" (so "1..5" means you're going to run 5 tests). This strange
format lets L<Test::Harness> know how many tests you plan on running in
case something goes horribly wrong.
If all your tests passed, Test::Simple will exit with zero (which is
normal). If anything failed it will exit with how many failed. If
you run less (or more) tests than you planned, the missing (or extras)
will be considered failures. If no tests were ever run Test::Simple
will throw a warning and exit with 255. If the test died, even after
having successfully completed all its tests, it will still be
considered a failure and will exit with 255.
So the exit codes are...
0 all tests successful
255 test died or all passed but wrong # of tests run
any other number how many failed (including missing or extras)
If you fail more than 254 tests, it will be reported as 254.
This module is by no means trying to be a complete testing system.
It's just to get you started. Once you're off the ground its
recommended you look at L<Test::More>.
=head1 EXAMPLE
Here's an example of a simple .t file for the fictional Film module.
use Test::Simple tests => 5;
use Film; # What you're testing.
my $btaste = Film->new({ Title => 'Bad Taste',
Director => 'Peter Jackson',
Rating => 'R',
NumExplodingSheep => 1
});
ok( defined($btaste) && ref $btaste eq 'Film', 'new() works' );
ok( $btaste->Title eq 'Bad Taste', 'Title() get' );
ok( $btaste->Director eq 'Peter Jackson', 'Director() get' );
ok( $btaste->Rating eq 'R', 'Rating() get' );
ok( $btaste->NumExplodingSheep == 1, 'NumExplodingSheep() get' );
It will produce output like this:
1..5
ok 1 - new() works
ok 2 - Title() get
ok 3 - Director() get
not ok 4 - Rating() get
# Failed test 'Rating() get'
# in t/film.t at line 14.
ok 5 - NumExplodingSheep() get
# Looks like you failed 1 tests of 5
Indicating the Film::Rating() method is broken.
=head1 CAVEATS
Test::Simple will only report a maximum of 254 failures in its exit
code. If this is a problem, you probably have a huge test script.
Split it into multiple files. (Otherwise blame the Unix folks for
using an unsigned short integer as the exit status).
Because VMS's exit codes are much, much different than the rest of the
universe, and perl does horrible mangling to them that gets in my way,
it works like this on VMS.
0 SS$_NORMAL all tests successful
4 SS$_ABORT something went wrong
Unfortunately, I can't differentiate any further.
=head1 NOTES
Test::Simple is B<explicitly> tested all the way back to perl 5.6.0.
Test::Simple is thread-safe in perl 5.8.1 and up.
=head1 HISTORY
This module was conceived while talking with Tony Bowden in his
kitchen one night about the problems I was having writing some really
complicated feature into the new Testing module. He observed that the
main problem is not dealing with these edge cases but that people hate
to write tests B<at all>. What was needed was a dead simple module
that took all the hard work out of testing and was really, really easy
to learn. Paul Johnson simultaneously had this idea (unfortunately,
he wasn't in Tony's kitchen). This is it.
=head1 SEE ALSO
=over 4
=item L<Test::More>
More testing functions! Once you outgrow Test::Simple, look at
L<Test::More>. Test::Simple is 100% forward compatible with L<Test::More>
(i.e. you can just use L<Test::More> instead of Test::Simple in your
programs and things will still work).
=back
Look in L<Test::More>'s SEE ALSO for more testing modules.
=head1 AUTHORS
Idea by Tony Bowden and Paul Johnson, code by Michael G Schwern
E<lt>schwern@pobox.comE<gt>, wardrobe by Calvin Klein.
=head1 MAINTAINERS
=over 4
=item Chad Granum E<lt>exodist@cpan.orgE<gt>
=back
=head1 COPYRIGHT
Copyright 2001-2008 by Michael G Schwern E<lt>schwern@pobox.comE<gt>.
This program is free software; you can redistribute it and/or
modify it under the same terms as Perl itself.
See F<http://www.perl.com/perl/misc/Artistic.html>
=cut
1;

View File

@@ -0,0 +1,695 @@
use strict;
package Test::Tester;
BEGIN
{
if (*Test::Builder::new{CODE})
{
warn "You should load Test::Tester before Test::Builder (or anything that loads Test::Builder)"
}
}
use Test::Builder;
use Test::Tester::CaptureRunner;
use Test::Tester::Delegate;
require Exporter;
use vars qw( @ISA @EXPORT );
our $VERSION = '1.302183';
@EXPORT = qw( run_tests check_tests check_test cmp_results show_space );
@ISA = qw( Exporter );
my $Test = Test::Builder->new;
my $Capture = Test::Tester::Capture->new;
my $Delegator = Test::Tester::Delegate->new;
$Delegator->{Object} = $Test;
my $runner = Test::Tester::CaptureRunner->new;
my $want_space = $ENV{TESTTESTERSPACE};
sub show_space
{
$want_space = 1;
}
my $colour = '';
my $reset = '';
if (my $want_colour = $ENV{TESTTESTERCOLOUR} || $ENV{TESTTESTERCOLOR})
{
if (eval { require Term::ANSIColor; 1 })
{
eval { require Win32::Console::ANSI } if 'MSWin32' eq $^O; # support color on windows platforms
my ($f, $b) = split(",", $want_colour);
$colour = Term::ANSIColor::color($f).Term::ANSIColor::color("on_$b");
$reset = Term::ANSIColor::color("reset");
}
}
sub new_new
{
return $Delegator;
}
sub capture
{
return Test::Tester::Capture->new;
}
sub fh
{
# experiment with capturing output, I don't like it
$runner = Test::Tester::FHRunner->new;
return $Test;
}
sub find_run_tests
{
my $d = 1;
my $found = 0;
while ((not $found) and (my ($sub) = (caller($d))[3]) )
{
# print "$d: $sub\n";
$found = ($sub eq "Test::Tester::run_tests");
$d++;
}
# die "Didn't find 'run_tests' in caller stack" unless $found;
return $d;
}
sub run_tests
{
local($Delegator->{Object}) = $Capture;
$runner->run_tests(@_);
return ($runner->get_premature, $runner->get_results);
}
sub check_test
{
my $test = shift;
my $expect = shift;
my $name = shift;
$name = "" unless defined($name);
@_ = ($test, [$expect], $name);
goto &check_tests;
}
sub check_tests
{
my $test = shift;
my $expects = shift;
my $name = shift;
$name = "" unless defined($name);
my ($prem, @results) = eval { run_tests($test, $name) };
$Test->ok(! $@, "Test '$name' completed") || $Test->diag($@);
$Test->ok(! length($prem), "Test '$name' no premature diagnostication") ||
$Test->diag("Before any testing anything, your tests said\n$prem");
local $Test::Builder::Level = $Test::Builder::Level + 1;
cmp_results(\@results, $expects, $name);
return ($prem, @results);
}
sub cmp_field
{
my ($result, $expect, $field, $desc) = @_;
if (defined $expect->{$field})
{
$Test->is_eq($result->{$field}, $expect->{$field},
"$desc compare $field");
}
}
sub cmp_result
{
my ($result, $expect, $name) = @_;
my $sub_name = $result->{name};
$sub_name = "" unless defined($name);
my $desc = "subtest '$sub_name' of '$name'";
{
local $Test::Builder::Level = $Test::Builder::Level + 1;
cmp_field($result, $expect, "ok", $desc);
cmp_field($result, $expect, "actual_ok", $desc);
cmp_field($result, $expect, "type", $desc);
cmp_field($result, $expect, "reason", $desc);
cmp_field($result, $expect, "name", $desc);
}
# if we got no depth then default to 1
my $depth = 1;
if (exists $expect->{depth})
{
$depth = $expect->{depth};
}
# if depth was explicitly undef then don't test it
if (defined $depth)
{
$Test->is_eq($result->{depth}, $depth, "checking depth") ||
$Test->diag('You need to change $Test::Builder::Level');
}
if (defined(my $exp = $expect->{diag}))
{
my $got = '';
if (ref $exp eq 'Regexp') {
if (not $Test->like($result->{diag}, $exp,
"subtest '$sub_name' of '$name' compare diag"))
{
$got = $result->{diag};
}
} else {
# if there actually is some diag then put a \n on the end if it's not
# there already
$exp .= "\n" if (length($exp) and $exp !~ /\n$/);
if (not $Test->ok($result->{diag} eq $exp,
"subtest '$sub_name' of '$name' compare diag"))
{
$got = $result->{diag};
}
}
if ($got) {
my $glen = length($got);
my $elen = length($exp);
for ($got, $exp)
{
my @lines = split("\n", $_);
$_ = join("\n", map {
if ($want_space)
{
$_ = $colour.escape($_).$reset;
}
else
{
"'$colour$_$reset'"
}
} @lines);
}
$Test->diag(<<EOM);
Got diag ($glen bytes):
$got
Expected diag ($elen bytes):
$exp
EOM
}
}
}
sub escape
{
my $str = shift;
my $res = '';
for my $char (split("", $str))
{
my $c = ord($char);
if(($c>32 and $c<125) or $c == 10)
{
$res .= $char;
}
else
{
$res .= sprintf('\x{%x}', $c)
}
}
return $res;
}
sub cmp_results
{
my ($results, $expects, $name) = @_;
$Test->is_num(scalar @$results, scalar @$expects, "Test '$name' result count");
for (my $i = 0; $i < @$expects; $i++)
{
my $expect = $expects->[$i];
my $result = $results->[$i];
local $Test::Builder::Level = $Test::Builder::Level + 1;
cmp_result($result, $expect, $name);
}
}
######## nicked from Test::More
sub plan {
my(@plan) = @_;
my $caller = caller;
$Test->exported_to($caller);
my @imports = ();
foreach my $idx (0..$#plan) {
if( $plan[$idx] eq 'import' ) {
my($tag, $imports) = splice @plan, $idx, 2;
@imports = @$imports;
last;
}
}
$Test->plan(@plan);
__PACKAGE__->_export_to_level(1, __PACKAGE__, @imports);
}
sub import {
my($class) = shift;
{
no warnings 'redefine';
*Test::Builder::new = \&new_new;
}
goto &plan;
}
sub _export_to_level
{
my $pkg = shift;
my $level = shift;
(undef) = shift; # redundant arg
my $callpkg = caller($level);
$pkg->export($callpkg, @_);
}
############
1;
__END__
=head1 NAME
Test::Tester - Ease testing test modules built with Test::Builder
=head1 SYNOPSIS
use Test::Tester tests => 6;
use Test::MyStyle;
check_test(
sub {
is_mystyle_eq("this", "that", "not eq");
},
{
ok => 0, # expect this to fail
name => "not eq",
diag => "Expected: 'this'\nGot: 'that'",
}
);
or
use Test::Tester tests => 6;
use Test::MyStyle;
check_test(
sub {
is_mystyle_qr("this", "that", "not matching");
},
{
ok => 0, # expect this to fail
name => "not matching",
diag => qr/Expected: 'this'\s+Got: 'that'/,
}
);
or
use Test::Tester;
use Test::More tests => 3;
use Test::MyStyle;
my ($premature, @results) = run_tests(
sub {
is_database_alive("dbname");
}
);
# now use Test::More::like to check the diagnostic output
like($results[0]->{diag}, "/^Database ping took \\d+ seconds$"/, "diag");
=head1 DESCRIPTION
If you have written a test module based on Test::Builder then Test::Tester
allows you to test it with the minimum of effort.
=head1 HOW TO USE (THE EASY WAY)
From version 0.08 Test::Tester no longer requires you to included anything
special in your test modules. All you need to do is
use Test::Tester;
in your test script B<before> any other Test::Builder based modules and away
you go.
Other modules based on Test::Builder can be used to help with the
testing. In fact you can even use functions from your module to test
other functions from the same module (while this is possible it is
probably not a good idea, if your module has bugs, then
using it to test itself may give the wrong answers).
The easiest way to test is to do something like
check_test(
sub { is_mystyle_eq("this", "that", "not eq") },
{
ok => 0, # we expect the test to fail
name => "not eq",
diag => "Expected: 'this'\nGot: 'that'",
}
);
this will execute the is_mystyle_eq test, capturing its results and
checking that they are what was expected.
You may need to examine the test results in a more flexible way, for
example, the diagnostic output may be quite long or complex or it may involve
something that you cannot predict in advance like a timestamp. In this case
you can get direct access to the test results:
my ($premature, @results) = run_tests(
sub {
is_database_alive("dbname");
}
);
like($result[0]->{diag}, "/^Database ping took \\d+ seconds$"/, "diag");
or
check_test(
sub { is_mystyle_qr("this", "that", "not matching") },
{
ok => 0, # we expect the test to fail
name => "not matching",
diag => qr/Expected: 'this'\s+Got: 'that'/,
}
);
We cannot predict how long the database ping will take so we use
Test::More's like() test to check that the diagnostic string is of the right
form.
=head1 HOW TO USE (THE HARD WAY)
I<This is here for backwards compatibility only>
Make your module use the Test::Tester::Capture object instead of the
Test::Builder one. How to do this depends on your module but assuming that
your module holds the Test::Builder object in $Test and that all your test
routines access it through $Test then providing a function something like this
sub set_builder
{
$Test = shift;
}
should allow your test scripts to do
Test::YourModule::set_builder(Test::Tester->capture);
and after that any tests inside your module will captured.
=head1 TEST RESULTS
The result of each test is captured in a hash. These hashes are the same as
the hashes returned by Test::Builder->details but with a couple of extra
fields.
These fields are documented in L<Test::Builder> in the details() function
=over 2
=item ok
Did the test pass?
=item actual_ok
Did the test really pass? That is, did the pass come from
Test::Builder->ok() or did it pass because it was a TODO test?
=item name
The name supplied for the test.
=item type
What kind of test? Possibilities include, skip, todo etc. See
L<Test::Builder> for more details.
=item reason
The reason for the skip, todo etc. See L<Test::Builder> for more details.
=back
These fields are exclusive to Test::Tester.
=over 2
=item diag
Any diagnostics that were output for the test. This only includes
diagnostics output B<after> the test result is declared.
Note that Test::Builder ensures that any diagnostics end in a \n and
it in earlier versions of Test::Tester it was essential that you have
the final \n in your expected diagnostics. From version 0.10 onward,
Test::Tester will add the \n if you forgot it. It will not add a \n if
you are expecting no diagnostics. See below for help tracking down
hard to find space and tab related problems.
=item depth
This allows you to check that your test module is setting the correct value
for $Test::Builder::Level and thus giving the correct file and line number
when a test fails. It is calculated by looking at caller() and
$Test::Builder::Level. It should count how many subroutines there are before
jumping into the function you are testing. So for example in
run_tests( sub { my_test_function("a", "b") } );
the depth should be 1 and in
sub deeper { my_test_function("a", "b") }
run_tests(sub { deeper() });
depth should be 2, that is 1 for the sub {} and one for deeper(). This
might seem a little complex but if your tests look like the simple
examples in this doc then you don't need to worry as the depth will
always be 1 and that's what Test::Tester expects by default.
B<Note>: if you do not specify a value for depth in check_test() then it
automatically compares it against 1, if you really want to skip the depth
test then pass in undef.
B<Note>: depth will not be correctly calculated for tests that run from a
signal handler or an END block or anywhere else that hides the call stack.
=back
Some of Test::Tester's functions return arrays of these hashes, just
like Test::Builder->details. That is, the hash for the first test will
be array element 1 (not 0). Element 0 will not be a hash it will be a
string which contains any diagnostic output that came before the first
test. This should usually be empty, if it's not, it means something
output diagnostics before any test results showed up.
=head1 SPACES AND TABS
Appearances can be deceptive, especially when it comes to emptiness. If you
are scratching your head trying to work out why Test::Tester is saying that
your diagnostics are wrong when they look perfectly right then the answer is
probably whitespace. From version 0.10 on, Test::Tester surrounds the
expected and got diag values with single quotes to make it easier to spot
trailing whitespace. So in this example
# Got diag (5 bytes):
# 'abcd '
# Expected diag (4 bytes):
# 'abcd'
it is quite clear that there is a space at the end of the first string.
Another way to solve this problem is to use colour and inverse video on an
ANSI terminal, see below COLOUR below if you want this.
Unfortunately this is sometimes not enough, neither colour nor quotes will
help you with problems involving tabs, other non-printing characters and
certain kinds of problems inherent in Unicode. To deal with this, you can
switch Test::Tester into a mode whereby all "tricky" characters are shown as
\{xx}. Tricky characters are those with ASCII code less than 33 or higher
than 126. This makes the output more difficult to read but much easier to
find subtle differences between strings. To turn on this mode either call
C<show_space()> in your test script or set the C<TESTTESTERSPACE> environment
variable to be a true value. The example above would then look like
# Got diag (5 bytes):
# abcd\x{20}
# Expected diag (4 bytes):
# abcd
=head1 COLOUR
If you prefer to use colour as a means of finding tricky whitespace
characters then you can set the C<TESTTESTCOLOUR> environment variable to a
comma separated pair of colours, the first for the foreground, the second
for the background. For example "white,red" will print white text on a red
background. This requires the Term::ANSIColor module. You can specify any
colour that would be acceptable to the Term::ANSIColor::color function.
If you spell colour differently, that's no problem. The C<TESTTESTERCOLOR>
variable also works (if both are set then the British spelling wins out).
=head1 EXPORTED FUNCTIONS
=head3 ($premature, @results) = run_tests(\&test_sub)
\&test_sub is a reference to a subroutine.
run_tests runs the subroutine in $test_sub and captures the results of any
tests inside it. You can run more than 1 test inside this subroutine if you
like.
$premature is a string containing any diagnostic output from before
the first test.
@results is an array of test result hashes.
=head3 cmp_result(\%result, \%expect, $name)
\%result is a ref to a test result hash.
\%expect is a ref to a hash of expected values for the test result.
cmp_result compares the result with the expected values. If any differences
are found it outputs diagnostics. You may leave out any field from the
expected result and cmp_result will not do the comparison of that field.
=head3 cmp_results(\@results, \@expects, $name)
\@results is a ref to an array of test results.
\@expects is a ref to an array of hash refs.
cmp_results checks that the results match the expected results and if any
differences are found it outputs diagnostics. It first checks that the
number of elements in \@results and \@expects is the same. Then it goes
through each result checking it against the expected result as in
cmp_result() above.
=head3 ($premature, @results) = check_tests(\&test_sub, \@expects, $name)
\&test_sub is a reference to a subroutine.
\@expect is a ref to an array of hash refs which are expected test results.
check_tests combines run_tests and cmp_tests into a single call. It also
checks if the tests died at any stage.
It returns the same values as run_tests, so you can further examine the test
results if you need to.
=head3 ($premature, @results) = check_test(\&test_sub, \%expect, $name)
\&test_sub is a reference to a subroutine.
\%expect is a ref to an hash of expected values for the test result.
check_test is a wrapper around check_tests. It combines run_tests and
cmp_tests into a single call, checking if the test died. It assumes
that only a single test is run inside \&test_sub and include a test to
make sure this is true.
It returns the same values as run_tests, so you can further examine the test
results if you need to.
=head3 show_space()
Turn on the escaping of characters as described in the SPACES AND TABS
section.
=head1 HOW IT WORKS
Normally, a test module (let's call it Test:MyStyle) calls
Test::Builder->new to get the Test::Builder object. Test::MyStyle calls
methods on this object to record information about test results. When
Test::Tester is loaded, it replaces Test::Builder's new() method with one
which returns a Test::Tester::Delegate object. Most of the time this object
behaves as the real Test::Builder object. Any methods that are called are
delegated to the real Test::Builder object so everything works perfectly.
However once we go into test mode, the method calls are no longer passed to
the real Test::Builder object, instead they go to the Test::Tester::Capture
object. This object seems exactly like the real Test::Builder object,
except, instead of outputting test results and diagnostics, it just records
all the information for later analysis.
=head1 CAVEATS
Support for calling Test::Builder->note is minimal. It's implemented
as an empty stub, so modules that use it will not crash but the calls
are not recorded for testing purposes like the others. Patches
welcome.
=head1 SEE ALSO
L<Test::Builder> the source of testing goodness. L<Test::Builder::Tester>
for an alternative approach to the problem tackled by Test::Tester -
captures the strings output by Test::Builder. This means you cannot get
separate access to the individual pieces of information and you must predict
B<exactly> what your test will output.
=head1 AUTHOR
This module is copyright 2005 Fergal Daly <fergal@esatclear.ie>, some parts
are based on other people's work.
Plan handling lifted from Test::More. written by Michael G Schwern
<schwern@pobox.com>.
Test::Tester::Capture is a cut down and hacked up version of Test::Builder.
Test::Builder was written by chromatic <chromatic@wgz.org> and Michael G
Schwern <schwern@pobox.com>.
=head1 LICENSE
Under the same license as Perl itself
See http://www.perl.com/perl/misc/Artistic.html
=cut

View File

@@ -0,0 +1,241 @@
use strict;
package Test::Tester::Capture;
our $VERSION = '1.302183';
use Test::Builder;
use vars qw( @ISA );
@ISA = qw( Test::Builder );
# Make Test::Tester::Capture thread-safe for ithreads.
BEGIN {
use Config;
*share = sub { 0 };
*lock = sub { 0 };
}
my $Curr_Test = 0; share($Curr_Test);
my @Test_Results = (); share(@Test_Results);
my $Prem_Diag = {diag => ""}; share($Curr_Test);
sub new
{
# Test::Tester::Capgture::new used to just return __PACKAGE__
# because Test::Builder::new enforced its singleton nature by
# return __PACKAGE__. That has since changed, Test::Builder::new now
# returns a blessed has and around version 0.78, Test::Builder::todo
# started wanting to modify $self. To cope with this, we now return
# a blessed hash. This is a short-term hack, the correct thing to do
# is to detect which style of Test::Builder we're dealing with and
# act appropriately.
my $class = shift;
return bless {}, $class;
}
sub ok {
my($self, $test, $name) = @_;
my $ctx = $self->ctx;
# $test might contain an object which we don't want to accidentally
# store, so we turn it into a boolean.
$test = $test ? 1 : 0;
lock $Curr_Test;
$Curr_Test++;
my($pack, $file, $line) = $self->caller;
my $todo = $self->todo();
my $result = {};
share($result);
unless( $test ) {
@$result{ 'ok', 'actual_ok' } = ( ( $todo ? 1 : 0 ), 0 );
}
else {
@$result{ 'ok', 'actual_ok' } = ( 1, $test );
}
if( defined $name ) {
$name =~ s|#|\\#|g; # # in a name can confuse Test::Harness.
$result->{name} = $name;
}
else {
$result->{name} = '';
}
if( $todo ) {
my $what_todo = $todo;
$result->{reason} = $what_todo;
$result->{type} = 'todo';
}
else {
$result->{reason} = '';
$result->{type} = '';
}
$Test_Results[$Curr_Test-1] = $result;
unless( $test ) {
my $msg = $todo ? "Failed (TODO)" : "Failed";
$result->{fail_diag} = (" $msg test ($file at line $line)\n");
}
$result->{diag} = "";
$result->{_level} = $Test::Builder::Level;
$result->{_depth} = Test::Tester::find_run_tests();
$ctx->release;
return $test ? 1 : 0;
}
sub skip {
my($self, $why) = @_;
$why ||= '';
my $ctx = $self->ctx;
lock($Curr_Test);
$Curr_Test++;
my %result;
share(%result);
%result = (
'ok' => 1,
actual_ok => 1,
name => '',
type => 'skip',
reason => $why,
diag => "",
_level => $Test::Builder::Level,
_depth => Test::Tester::find_run_tests(),
);
$Test_Results[$Curr_Test-1] = \%result;
$ctx->release;
return 1;
}
sub todo_skip {
my($self, $why) = @_;
$why ||= '';
my $ctx = $self->ctx;
lock($Curr_Test);
$Curr_Test++;
my %result;
share(%result);
%result = (
'ok' => 1,
actual_ok => 0,
name => '',
type => 'todo_skip',
reason => $why,
diag => "",
_level => $Test::Builder::Level,
_depth => Test::Tester::find_run_tests(),
);
$Test_Results[$Curr_Test-1] = \%result;
$ctx->release;
return 1;
}
sub diag {
my($self, @msgs) = @_;
return unless @msgs;
# Prevent printing headers when compiling (i.e. -c)
return if $^C;
my $ctx = $self->ctx;
# Escape each line with a #.
foreach (@msgs) {
$_ = 'undef' unless defined;
}
push @msgs, "\n" unless $msgs[-1] =~ /\n\Z/;
my $result = $Curr_Test ? $Test_Results[$Curr_Test - 1] : $Prem_Diag;
$result->{diag} .= join("", @msgs);
$ctx->release;
return 0;
}
sub details {
return @Test_Results;
}
# Stub. Feel free to send me a patch to implement this.
sub note {
}
sub explain {
return Test::Builder::explain(@_);
}
sub premature
{
return $Prem_Diag->{diag};
}
sub current_test
{
if (@_ > 1)
{
die "Don't try to change the test number!";
}
else
{
return $Curr_Test;
}
}
sub reset
{
$Curr_Test = 0;
@Test_Results = ();
$Prem_Diag = {diag => ""};
}
1;
__END__
=head1 NAME
Test::Tester::Capture - Help testing test modules built with Test::Builder
=head1 DESCRIPTION
This is a subclass of Test::Builder that overrides many of the methods so
that they don't output anything. It also keeps track of its own set of test
results so that you can use Test::Builder based modules to perform tests on
other Test::Builder based modules.
=head1 AUTHOR
Most of the code here was lifted straight from Test::Builder and then had
chunks removed by Fergal Daly <fergal@esatclear.ie>.
=head1 LICENSE
Under the same license as Perl itself
See http://www.perl.com/perl/misc/Artistic.html
=cut

View File

@@ -0,0 +1,79 @@
# $Header: /home/fergal/my/cvs/Test-Tester/lib/Test/Tester/CaptureRunner.pm,v 1.3 2003/03/05 01:07:55 fergal Exp $
use strict;
package Test::Tester::CaptureRunner;
our $VERSION = '1.302183';
use Test::Tester::Capture;
require Exporter;
sub new
{
my $pkg = shift;
my $self = bless {}, $pkg;
return $self;
}
sub run_tests
{
my $self = shift;
my $test = shift;
capture()->reset;
$self->{StartLevel} = $Test::Builder::Level;
&$test();
}
sub get_results
{
my $self = shift;
my @results = capture()->details;
my $start = $self->{StartLevel};
foreach my $res (@results)
{
next if defined $res->{depth};
my $depth = $res->{_depth} - $res->{_level} - $start - 3;
# print "my $depth = $res->{_depth} - $res->{_level} - $start - 1\n";
$res->{depth} = $depth;
}
return @results;
}
sub get_premature
{
return capture()->premature;
}
sub capture
{
return Test::Tester::Capture->new;
}
__END__
=head1 NAME
Test::Tester::CaptureRunner - Help testing test modules built with Test::Builder
=head1 DESCRIPTION
This stuff if needed to allow me to play with other ways of monitoring the
test results.
=head1 AUTHOR
Copyright 2003 by Fergal Daly <fergal@esatclear.ie>.
=head1 LICENSE
Under the same license as Perl itself
See http://www.perl.com/perl/misc/Artistic.html
=cut

View File

@@ -0,0 +1,45 @@
use strict;
use warnings;
package Test::Tester::Delegate;
our $VERSION = '1.302183';
use Scalar::Util();
use vars '$AUTOLOAD';
sub new
{
my $pkg = shift;
my $obj = shift;
my $self = bless {}, $pkg;
return $self;
}
sub AUTOLOAD
{
my ($sub) = $AUTOLOAD =~ /.*::(.*?)$/;
return if $sub eq "DESTROY";
my $obj = $_[0]->{Object};
my $ref = $obj->can($sub);
shift(@_);
unshift(@_, $obj);
goto &$ref;
}
sub can {
my $this = shift;
my ($sub) = @_;
return $this->{Object}->can($sub) if Scalar::Util::blessed($this);
return $this->SUPER::can(@_);
}
1;

View File

@@ -0,0 +1,618 @@
=head1 NAME
Test::Tutorial - A tutorial about writing really basic tests
=head1 DESCRIPTION
I<AHHHHHHH!!!! NOT TESTING! Anything but testing!
Beat me, whip me, send me to Detroit, but don't make
me write tests!>
I<*sob*>
I<Besides, I don't know how to write the damned things.>
Is this you? Is writing tests right up there with writing
documentation and having your fingernails pulled out? Did you open up
a test and read
######## We start with some black magic
and decide that's quite enough for you?
It's ok. That's all gone now. We've done all the black magic for
you. And here are the tricks...
=head2 Nuts and bolts of testing.
Here's the most basic test program.
#!/usr/bin/perl -w
print "1..1\n";
print 1 + 1 == 2 ? "ok 1\n" : "not ok 1\n";
Because 1 + 1 is 2, it prints:
1..1
ok 1
What this says is: C<1..1> "I'm going to run one test." [1] C<ok 1>
"The first test passed". And that's about all magic there is to
testing. Your basic unit of testing is the I<ok>. For each thing you
test, an C<ok> is printed. Simple. L<Test::Harness> interprets your test
results to determine if you succeeded or failed (more on that later).
Writing all these print statements rapidly gets tedious. Fortunately,
there's L<Test::Simple>. It has one function, C<ok()>.
#!/usr/bin/perl -w
use Test::Simple tests => 1;
ok( 1 + 1 == 2 );
That does the same thing as the previous code. C<ok()> is the backbone
of Perl testing, and we'll be using it instead of roll-your-own from
here on. If C<ok()> gets a true value, the test passes. False, it
fails.
#!/usr/bin/perl -w
use Test::Simple tests => 2;
ok( 1 + 1 == 2 );
ok( 2 + 2 == 5 );
From that comes:
1..2
ok 1
not ok 2
# Failed test (test.pl at line 5)
# Looks like you failed 1 tests of 2.
C<1..2> "I'm going to run two tests." This number is a I<plan>. It helps to
ensure your test program ran all the way through and didn't die or skip some
tests. C<ok 1> "The first test passed." C<not ok 2> "The second test failed".
Test::Simple helpfully prints out some extra commentary about your tests.
It's not scary. Come, hold my hand. We're going to give an example
of testing a module. For our example, we'll be testing a date
library, L<Date::ICal>. It's on CPAN, so download a copy and follow
along. [2]
=head2 Where to start?
This is the hardest part of testing, where do you start? People often get
overwhelmed at the apparent enormity of the task of testing a whole module.
The best place to start is at the beginning. L<Date::ICal> is an
object-oriented module, and that means you start by making an object. Test
C<new()>.
#!/usr/bin/perl -w
# assume these two lines are in all subsequent examples
use strict;
use warnings;
use Test::Simple tests => 2;
use Date::ICal;
my $ical = Date::ICal->new; # create an object
ok( defined $ical ); # check that we got something
ok( $ical->isa('Date::ICal') ); # and it's the right class
Run that and you should get:
1..2
ok 1
ok 2
Congratulations! You've written your first useful test.
=head2 Names
That output isn't terribly descriptive, is it? When you have two tests you can
figure out which one is #2, but what if you have 102 tests?
Each test can be given a little descriptive name as the second
argument to C<ok()>.
use Test::Simple tests => 2;
ok( defined $ical, 'new() returned something' );
ok( $ical->isa('Date::ICal'), " and it's the right class" );
Now you'll see:
1..2
ok 1 - new() returned something
ok 2 - and it's the right class
=head2 Test the manual
The simplest way to build up a decent testing suite is to just test what
the manual says it does. [3] Let's pull something out of the
L<Date::ICal/SYNOPSIS> and test that all its bits work.
#!/usr/bin/perl -w
use Test::Simple tests => 8;
use Date::ICal;
$ical = Date::ICal->new( year => 1964, month => 10, day => 16,
hour => 16, min => 12, sec => 47,
tz => '0530' );
ok( defined $ical, 'new() returned something' );
ok( $ical->isa('Date::ICal'), " and it's the right class" );
ok( $ical->sec == 47, ' sec()' );
ok( $ical->min == 12, ' min()' );
ok( $ical->hour == 16, ' hour()' );
ok( $ical->day == 17, ' day()' );
ok( $ical->month == 10, ' month()' );
ok( $ical->year == 1964, ' year()' );
Run that and you get:
1..8
ok 1 - new() returned something
ok 2 - and it's the right class
ok 3 - sec()
ok 4 - min()
ok 5 - hour()
not ok 6 - day()
# Failed test (- at line 16)
ok 7 - month()
ok 8 - year()
# Looks like you failed 1 tests of 8.
Whoops, a failure! [4] L<Test::Simple> helpfully lets us know on what line the
failure occurred, but not much else. We were supposed to get 17, but we
didn't. What did we get?? Dunno. You could re-run the test in the debugger
or throw in some print statements to find out.
Instead, switch from L<Test::Simple> to L<Test::More>. L<Test::More>
does everything L<Test::Simple> does, and more! In fact, L<Test::More> does
things I<exactly> the way L<Test::Simple> does. You can literally swap
L<Test::Simple> out and put L<Test::More> in its place. That's just what
we're going to do.
L<Test::More> does more than L<Test::Simple>. The most important difference at
this point is it provides more informative ways to say "ok". Although you can
write almost any test with a generic C<ok()>, it can't tell you what went
wrong. The C<is()> function lets us declare that something is supposed to be
the same as something else:
use Test::More tests => 8;
use Date::ICal;
$ical = Date::ICal->new( year => 1964, month => 10, day => 16,
hour => 16, min => 12, sec => 47,
tz => '0530' );
ok( defined $ical, 'new() returned something' );
ok( $ical->isa('Date::ICal'), " and it's the right class" );
is( $ical->sec, 47, ' sec()' );
is( $ical->min, 12, ' min()' );
is( $ical->hour, 16, ' hour()' );
is( $ical->day, 17, ' day()' );
is( $ical->month, 10, ' month()' );
is( $ical->year, 1964, ' year()' );
"Is C<< $ical->sec >> 47?" "Is C<< $ical->min >> 12?" With C<is()> in place,
you get more information:
1..8
ok 1 - new() returned something
ok 2 - and it's the right class
ok 3 - sec()
ok 4 - min()
ok 5 - hour()
not ok 6 - day()
# Failed test (- at line 16)
# got: '16'
# expected: '17'
ok 7 - month()
ok 8 - year()
# Looks like you failed 1 tests of 8.
Aha. C<< $ical->day >> returned 16, but we expected 17. A
quick check shows that the code is working fine, we made a mistake
when writing the tests. Change it to:
is( $ical->day, 16, ' day()' );
... and everything works.
Any time you're doing a "this equals that" sort of test, use C<is()>.
It even works on arrays. The test is always in scalar context, so you
can test how many elements are in an array this way. [5]
is( @foo, 5, 'foo has 5 elements' );
=head2 Sometimes the tests are wrong
This brings up a very important lesson. Code has bugs. Tests are
code. Ergo, tests have bugs. A failing test could mean a bug in the
code, but don't discount the possibility that the test is wrong.
On the flip side, don't be tempted to prematurely declare a test
incorrect just because you're having trouble finding the bug.
Invalidating a test isn't something to be taken lightly, and don't use
it as a cop out to avoid work.
=head2 Testing lots of values
We're going to be wanting to test a lot of dates here, trying to trick
the code with lots of different edge cases. Does it work before 1970?
After 2038? Before 1904? Do years after 10,000 give it trouble?
Does it get leap years right? We could keep repeating the code above,
or we could set up a little try/expect loop.
use Test::More tests => 32;
use Date::ICal;
my %ICal_Dates = (
# An ICal string And the year, month, day
# hour, minute and second we expect.
'19971024T120000' => # from the docs.
[ 1997, 10, 24, 12, 0, 0 ],
'20390123T232832' => # after the Unix epoch
[ 2039, 1, 23, 23, 28, 32 ],
'19671225T000000' => # before the Unix epoch
[ 1967, 12, 25, 0, 0, 0 ],
'18990505T232323' => # before the MacOS epoch
[ 1899, 5, 5, 23, 23, 23 ],
);
while( my($ical_str, $expect) = each %ICal_Dates ) {
my $ical = Date::ICal->new( ical => $ical_str );
ok( defined $ical, "new(ical => '$ical_str')" );
ok( $ical->isa('Date::ICal'), " and it's the right class" );
is( $ical->year, $expect->[0], ' year()' );
is( $ical->month, $expect->[1], ' month()' );
is( $ical->day, $expect->[2], ' day()' );
is( $ical->hour, $expect->[3], ' hour()' );
is( $ical->min, $expect->[4], ' min()' );
is( $ical->sec, $expect->[5], ' sec()' );
}
Now we can test bunches of dates by just adding them to
C<%ICal_Dates>. Now that it's less work to test with more dates, you'll
be inclined to just throw more in as you think of them.
Only problem is, every time we add to that we have to keep adjusting
the C<< use Test::More tests => ## >> line. That can rapidly get
annoying. There are ways to make this work better.
First, we can calculate the plan dynamically using the C<plan()>
function.
use Test::More;
use Date::ICal;
my %ICal_Dates = (
...same as before...
);
# For each key in the hash we're running 8 tests.
plan tests => keys(%ICal_Dates) * 8;
...and then your tests...
To be even more flexible, use C<done_testing>. This means we're just
running some tests, don't know how many. [6]
use Test::More; # instead of tests => 32
... # tests here
done_testing(); # reached the end safely
If you don't specify a plan, L<Test::More> expects to see C<done_testing()>
before your program exits. It will warn you if you forget it. You can give
C<done_testing()> an optional number of tests you expected to run, and if the
number ran differs, L<Test::More> will give you another kind of warning.
=head2 Informative names
Take a look at the line:
ok( defined $ical, "new(ical => '$ical_str')" );
We've added more detail about what we're testing and the ICal string
itself we're trying out to the name. So you get results like:
ok 25 - new(ical => '19971024T120000')
ok 26 - and it's the right class
ok 27 - year()
ok 28 - month()
ok 29 - day()
ok 30 - hour()
ok 31 - min()
ok 32 - sec()
If something in there fails, you'll know which one it was and that
will make tracking down the problem easier. Try to put a bit of
debugging information into the test names.
Describe what the tests test, to make debugging a failed test easier
for you or for the next person who runs your test.
=head2 Skipping tests
Poking around in the existing L<Date::ICal> tests, I found this in
F<t/01sanity.t> [7]
#!/usr/bin/perl -w
use Test::More tests => 7;
use Date::ICal;
# Make sure epoch time is being handled sanely.
my $t1 = Date::ICal->new( epoch => 0 );
is( $t1->epoch, 0, "Epoch time of 0" );
# XXX This will only work on unix systems.
is( $t1->ical, '19700101Z', " epoch to ical" );
is( $t1->year, 1970, " year()" );
is( $t1->month, 1, " month()" );
is( $t1->day, 1, " day()" );
# like the tests above, but starting with ical instead of epoch
my $t2 = Date::ICal->new( ical => '19700101Z' );
is( $t2->ical, '19700101Z', "Start of epoch in ICal notation" );
is( $t2->epoch, 0, " and back to ICal" );
The beginning of the epoch is different on most non-Unix operating systems [8].
Even though Perl smooths out the differences for the most part, certain ports
do it differently. MacPerl is one off the top of my head. [9] Rather than
putting a comment in the test and hoping someone will read the test while
debugging the failure, we can explicitly say it's never going to work and skip
the test.
use Test::More tests => 7;
use Date::ICal;
# Make sure epoch time is being handled sanely.
my $t1 = Date::ICal->new( epoch => 0 );
is( $t1->epoch, 0, "Epoch time of 0" );
SKIP: {
skip('epoch to ICal not working on Mac OS', 6)
if $^O eq 'MacOS';
is( $t1->ical, '19700101Z', " epoch to ical" );
is( $t1->year, 1970, " year()" );
is( $t1->month, 1, " month()" );
is( $t1->day, 1, " day()" );
# like the tests above, but starting with ical instead of epoch
my $t2 = Date::ICal->new( ical => '19700101Z' );
is( $t2->ical, '19700101Z', "Start of epoch in ICal notation" );
is( $t2->epoch, 0, " and back to ICal" );
}
A little bit of magic happens here. When running on anything but MacOS, all
the tests run normally. But when on MacOS, C<skip()> causes the entire
contents of the SKIP block to be jumped over. It never runs. Instead,
C<skip()> prints special output that tells L<Test::Harness> that the tests have
been skipped.
1..7
ok 1 - Epoch time of 0
ok 2 # skip epoch to ICal not working on MacOS
ok 3 # skip epoch to ICal not working on MacOS
ok 4 # skip epoch to ICal not working on MacOS
ok 5 # skip epoch to ICal not working on MacOS
ok 6 # skip epoch to ICal not working on MacOS
ok 7 # skip epoch to ICal not working on MacOS
This means your tests won't fail on MacOS. This means fewer emails
from MacPerl users telling you about failing tests that you know will
never work. You've got to be careful with skip tests. These are for
tests which don't work and I<never will>. It is not for skipping
genuine bugs (we'll get to that in a moment).
The tests are wholly and completely skipped. [10] This will work.
SKIP: {
skip("I don't wanna die!");
die, die, die, die, die;
}
=head2 Todo tests
While thumbing through the L<Date::ICal> man page, I came across this:
ical
$ical_string = $ical->ical;
Retrieves, or sets, the date on the object, using any
valid ICal date/time string.
"Retrieves or sets". Hmmm. I didn't see a test for using C<ical()> to set
the date in the Date::ICal test suite. So I wrote one:
use Test::More tests => 1;
use Date::ICal;
my $ical = Date::ICal->new;
$ical->ical('20201231Z');
is( $ical->ical, '20201231Z', 'Setting via ical()' );
Run that. I saw:
1..1
not ok 1 - Setting via ical()
# Failed test (- at line 6)
# got: '20010814T233649Z'
# expected: '20201231Z'
# Looks like you failed 1 tests of 1.
Whoops! Looks like it's unimplemented. Assume you don't have the time to fix
this. [11] Normally, you'd just comment out the test and put a note in a todo
list somewhere. Instead, explicitly state "this test will fail" by wrapping it
in a C<TODO> block:
use Test::More tests => 1;
TODO: {
local $TODO = 'ical($ical) not yet implemented';
my $ical = Date::ICal->new;
$ical->ical('20201231Z');
is( $ical->ical, '20201231Z', 'Setting via ical()' );
}
Now when you run, it's a little different:
1..1
not ok 1 - Setting via ical() # TODO ical($ical) not yet implemented
# got: '20010822T201551Z'
# expected: '20201231Z'
L<Test::More> doesn't say "Looks like you failed 1 tests of 1". That '#
TODO' tells L<Test::Harness> "this is supposed to fail" and it treats a
failure as a successful test. You can write tests even before
you've fixed the underlying code.
If a TODO test passes, L<Test::Harness> will report it "UNEXPECTEDLY
SUCCEEDED". When that happens, remove the TODO block with C<local $TODO> and
turn it into a real test.
=head2 Testing with taint mode.
Taint mode is a funny thing. It's the globalest of all global
features. Once you turn it on, it affects I<all> code in your program
and I<all> modules used (and all the modules they use). If a single
piece of code isn't taint clean, the whole thing explodes. With that
in mind, it's very important to ensure your module works under taint
mode.
It's very simple to have your tests run under taint mode. Just throw
a C<-T> into the C<#!> line. L<Test::Harness> will read the switches
in C<#!> and use them to run your tests.
#!/usr/bin/perl -Tw
...test normally here...
When you say C<make test> it will run with taint mode on.
=head1 FOOTNOTES
=over 4
=item 1
The first number doesn't really mean anything, but it has to be 1.
It's the second number that's important.
=item 2
For those following along at home, I'm using version 1.31. It has
some bugs, which is good -- we'll uncover them with our tests.
=item 3
You can actually take this one step further and test the manual
itself. Have a look at L<Test::Inline> (formerly L<Pod::Tests>).
=item 4
Yes, there's a mistake in the test suite. What! Me, contrived?
=item 5
We'll get to testing the contents of lists later.
=item 6
But what happens if your test program dies halfway through?! Since we
didn't say how many tests we're going to run, how can we know it
failed? No problem, L<Test::More> employs some magic to catch that death
and turn the test into a failure, even if every test passed up to that
point.
=item 7
I cleaned it up a little.
=item 8
Most Operating Systems record time as the number of seconds since a
certain date. This date is the beginning of the epoch. Unix's starts
at midnight January 1st, 1970 GMT.
=item 9
MacOS's epoch is midnight January 1st, 1904. VMS's is midnight,
November 17th, 1858, but vmsperl emulates the Unix epoch so it's not a
problem.
=item 10
As long as the code inside the SKIP block at least compiles. Please
don't ask how. No, it's not a filter.
=item 11
Do NOT be tempted to use TODO tests as a way to avoid fixing simple
bugs!
=back
=head1 AUTHORS
Michael G Schwern E<lt>schwern@pobox.comE<gt> and the perl-qa dancers!
=head1 MAINTAINERS
=over 4
=item Chad Granum E<lt>exodist@cpan.orgE<gt>
=back
=head1 COPYRIGHT
Copyright 2001 by Michael G Schwern E<lt>schwern@pobox.comE<gt>.
This documentation is free; you can redistribute it and/or modify it
under the same terms as Perl itself.
Irrespective of its distribution, all code examples in these files
are hereby placed into the public domain. You are permitted and
encouraged to use this code in your own programs for fun
or for profit as you see fit. A simple comment in the code giving
credit would be courteous but is not required.
=cut

View File

@@ -0,0 +1,161 @@
package Test::UseAllModules;
use strict;
use warnings;
use ExtUtils::Manifest qw( maniread );
our $VERSION = '0.17';
use Exporter;
our @ISA = qw/Exporter/;
our @EXPORT = qw/all_uses_ok/;
use Test::More;
my $RULE = qr{^lib/(.+)\.pm$};
sub import {
shift->export_to_level(1);
shift if @_ && $_[0] eq 'under';
my @dirs = ('lib', @_);
my %seen;
@dirs = grep { !$seen{$_}++ } map { s|/+$||; $_ } @dirs;
$RULE = '^(?:'.(join '|', @dirs).')/(.+)\.pm\s*$';
unshift @INC, @dirs;
}
sub _get_module_list {
shift if @_ && $_[0] eq 'except';
my @exceptions = @_;
my @modules;
my $manifest = maniread();
READ:
foreach my $file (keys %{ $manifest }) {
if (my ($module) = $file =~ m|$RULE|) {
$module =~ s|/|::|g;
foreach my $rule (@exceptions) {
next READ if $module eq $rule || $module =~ /$rule/;
}
push @modules, $module;
}
}
return @modules;
}
sub _planned {
Test::More->builder->has_plan;
}
sub all_uses_ok {
unless (-f 'MANIFEST') {
plan skip_all => 'no MANIFEST' unless _planned();
return;
}
my @modules = _get_module_list(@_);
unless (@modules) {
plan skip_all => 'no .pm files are found under the lib directory' unless _planned();
return;
}
plan tests => scalar @modules unless _planned();
my @failed;
foreach my $module (@modules) {
use_ok($module) or push @failed, $module;
}
BAIL_OUT( 'failed: ' . (join ',', @failed) ) if @failed;
}
1;
__END__
=head1 NAME
Test::UseAllModules - do use_ok() for all the MANIFESTed modules
=head1 SYNOPSIS
# basic usage
use strict;
use Test::UseAllModules;
BEGIN { all_uses_ok(); }
# if you also want to test modules under t/lib
use strict;
use Test::UseAllModules under => qw(lib t/lib);
BEGIN { all_uses_ok(); }
# if you have modules that'll fail use_ok() for themselves
use strict;
use Test::UseAllModules;
BEGIN {
all_uses_ok except => qw(
Some::Dependent::Module
Another::Dependent::Module
^Yet::Another::Dependent::.* # you can use regex
)
}
=head1 DESCRIPTION
I'm sick of writing 00_load.t (or something like that) that'll do use_ok() for every module I write. I'm sicker of updating 00_load.t when I add another file to the distro. This module reads MANIFEST to find modules to be tested and does use_ok() for each of them. Now all you have to do is update MANIFEST. You don't have to modify the test any more (hopefully).
=head1 EXPORTED FUNCTION
=head2 all_uses_ok
Does Test::More's use_ok() for every module found in MANIFEST. If you have modules you don't want to test, give those modules or some regex rules as the argument. The word 'except' is ignored as shown above.
As of 0.11, you can also test modules under arbitrary directories by providing a directory list at the loading time (the word 'under' is ignored as shown above). Modules under the lib directory are always tested.
=head1 PROTECTED FUNCTION
=head2 _get_module_list
Returns module paths to test. This function will not be exported. If you want to use this (see below), you always need to call it by the full qualified name.
=head1 NOTES
As of 0.03, this module calls BAIL_OUT of Test::More if any of the use_ok tests should fail. (Thus the following tests will be ignored. Missing or unloadable modules cause a lot of errors of the same kind.)
As of 0.12, you can add extra tests before/after all_uses_ok() if you explicitly declare test plan like this.
use strict;
use warnings;
use Test::More;
use Test::UseAllModules;
use Test::NoWarnings;
plan tests => Test::UseAllModules::_get_module_list() + 1;
all_uses_ok();
# and extra nowarnings test
=head1 SEE ALSO
There're several modules like this on the CPAN now. L<Test::Compile> and a bit confusing L<Test::LoadAllModules> try to find modules to test by traversing directories. I'm not a big fan of them as they tend to find temporary or unrelated modules as well, but they may be handier especially if you're too lazy to update MANIFEST every time.
=head1 AUTHOR
Kenichi Ishigaki, E<lt>ishigaki@cpan.orgE<gt>
=head1 COPYRIGHT AND LICENSE
Copyright (C) 2006 by Kenichi Ishigaki
This library is free software; you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut

View File

@@ -0,0 +1,507 @@
use strict;
use warnings;
package Test::Warnings; # git description: v0.029-2-g97d1c9f
# vim: set ts=8 sts=4 sw=4 tw=115 et :
# ABSTRACT: Test for warnings and the lack of them
# KEYWORDS: testing tests warnings
our $VERSION = '0.030';
use parent 'Exporter';
use Test::Builder;
our @EXPORT_OK = qw(
allow_warnings allowing_warnings
had_no_warnings
warnings warning
);
our %EXPORT_TAGS = ( all => \@EXPORT_OK );
my $warnings_allowed;
my $forbidden_warnings_found;
my $done_testing_called;
my $no_end_test;
my $fail_on_warning;
my $report_warnings;
my @collected_warnings;
sub import
{
my $class = shift @_;
my %names; @names{@_} = ();
# END block will check for this status
$no_end_test = exists $names{':no_end_test'};
# __WARN__ handler will check for this status
$fail_on_warning = exists $names{':fail_on_warning'};
# Collect and report warnings at the end
$report_warnings = exists $names{':report_warnings'};
delete @names{qw(:no_end_test :fail_on_warning :report_warnings)};
__PACKAGE__->export_to_level(1, $class, keys %names);
}
# for testing this module only!
my $tb;
sub _builder(;$)
{
if (not @_)
{
$tb ||= Test::Builder->new;
return $tb;
}
$tb = shift;
}
my $_orig_warn_handler = $SIG{__WARN__};
$SIG{__WARN__} = sub {
if ($warnings_allowed)
{
Test::Builder->new->note($_[0]);
}
else
{
$forbidden_warnings_found++;
push @collected_warnings, $_[0] if $report_warnings;
# TODO: this doesn't handle blessed coderefs... does anyone care?
goto &$_orig_warn_handler if $_orig_warn_handler
and ( (ref $_orig_warn_handler eq 'CODE')
or ($_orig_warn_handler ne 'DEFAULT'
and $_orig_warn_handler ne 'IGNORE'
and defined &$_orig_warn_handler));
if ($_[0] =~ /\n$/) {
warn $_[0];
} else {
require Carp;
Carp::carp($_[0]);
}
_builder->ok(0, 'unexpected warning') if $fail_on_warning;
}
};
sub warnings(;&)
{
# if someone manually does warnings->import in the same namespace this is
# imported into, this sub will be called. in that case, just return the
# string "warnings" so it calls the correct method.
if (!@_) {
return 'warnings';
}
my $code = shift;
my @warnings;
local $SIG{__WARN__} = sub {
push @warnings, shift;
};
$code->();
@warnings;
}
sub warning(&)
{
my @warnings = &warnings(@_);
return @warnings == 1 ? $warnings[0] : \@warnings;
}
if (Test::Builder->can('done_testing'))
{
# monkeypatch Test::Builder::done_testing:
# check for any forbidden warnings, and record that we have done so
# so we do not check again via END
no strict 'refs';
my $orig = *{'Test::Builder::done_testing'}{CODE};
no warnings 'redefine';
*{'Test::Builder::done_testing'} = sub {
# only do this at the end of all tests, not at the end of a subtest
my $builder = _builder;
my $in_subtest_sub = $builder->can('in_subtest');
if (not $no_end_test
and not ($in_subtest_sub ? $builder->$in_subtest_sub : $builder->parent))
{
local $Test::Builder::Level = $Test::Builder::Level + 3;
had_no_warnings('no (unexpected) warnings (via done_testing)');
$done_testing_called = 1;
}
$orig->(@_);
};
}
END {
if (not $no_end_test
and not $done_testing_called
# skip this if there is no plan and no tests have been run (e.g.
# compilation tests of this module!)
and (_builder->expected_tests or _builder->current_test > 0)
)
{
local $Test::Builder::Level = $Test::Builder::Level + 1;
had_no_warnings('no (unexpected) warnings (via END block)');
}
}
# setter
sub allow_warnings(;$)
{
$warnings_allowed = @_ || defined $_[0] ? $_[0] : 1;
}
# getter
sub allowing_warnings() { $warnings_allowed }
# call at any time to assert no (unexpected) warnings so far
sub had_no_warnings(;$)
{
_builder->ok(!$forbidden_warnings_found, shift || 'no (unexpected) warnings');
if ($report_warnings and $forbidden_warnings_found) {
_builder->diag("Got the following unexpected warnings:");
for my $i (1 .. @collected_warnings) {
_builder->diag(" $i: $collected_warnings[ $i - 1 ]");
}
}
}
1;
__END__
=pod
=encoding UTF-8
=head1 NAME
Test::Warnings - Test for warnings and the lack of them
=head1 VERSION
version 0.030
=head1 SYNOPSIS
use Test::More;
use Test::Warnings;
pass('yay!');
done_testing;
emits TAP:
ok 1 - yay!
ok 2 - no (unexpected) warnings (via done_testing)
1..2
and:
use Test::More tests => 3;
use Test::Warnings 0.005 ':all';
pass('yay!');
like(warning { warn "oh noes!" }, qr/^oh noes/, 'we warned');
emits TAP:
ok 1 - yay!
ok 2 - we warned
ok 3 - no (unexpected) warnings (via END block)
1..3
=head1 DESCRIPTION
If you've ever tried to use L<Test::NoWarnings> to confirm there are no warnings
generated by your tests, combined with the convenience of C<done_testing> to
not have to declare a
L<test count|Test::More/I love it-when-a-plan-comes-together>,
you'll have discovered that these two features do not play well together,
as the test count will be calculated I<before> the warnings test is run,
resulting in a TAP error. (See C<examples/test_nowarnings.pl> in this
distribution for a demonstration.)
This module is intended to be used as a drop-in replacement for
L<Test::NoWarnings>: it also adds an extra test, but runs this test I<before>
C<done_testing> calculates the test count, rather than after. It does this by
hooking into C<done_testing> as well as via an C<END> block. You can declare
a plan, or not, and things will still Just Work.
It is actually equivalent to:
use Test::NoWarnings 1.04 ':early';
as warnings are still printed normally as they occur. You are safe, and
enthusiastically encouraged, to perform a global search-replace of the above
with C<use Test::Warnings;> whether or not your tests have a plan.
It can also be used as a replacement for L<Test::Warn>, if you wish to test
the content of expected warnings; read on to find out how.
=head1 FUNCTIONS
The following functions are available for import (not included by default; you
can also get all of them by importing the tag C<:all>):
=head2 C<< allow_warnings([bool]) >> - EXPERIMENTAL - MAY BE REMOVED
When passed a true value, or no value at all, subsequent warnings will not
result in a test failure; when passed a false value, subsequent warnings will
result in a test failure. Initial value is C<false>.
When warnings are allowed, any warnings will instead be emitted via
L<Test::Builder::note|Test::Builder/Output>.
=head2 C<allowing_warnings> - EXPERIMENTAL - MAY BE REMOVED
Returns whether we are currently allowing warnings (set by C<allow_warnings>
as described above).
=head2 C<< had_no_warnings(<optional test name>) >>
Tests whether there have been any warnings so far, not preceded by an
C<allowing_warnings> call. It is run
automatically at the end of all tests, but can also be called manually at any
time, as often as desired.
=head2 C<< warnings( { code } ) >>
Given a code block, runs the block and returns a list of all the
(not previously allowed via C<allow_warnings>) warnings issued within. This
lets you test for the presence of warnings that you not only would I<allow>,
but I<must> be issued. Testing functions are not provided; given the strings
returned, you can test these yourself using your favourite testing functions,
such as L<Test::More::is|Test::More/is> or L<Test::Deep::cmp_deeply|Test::Deep/cmp_deeply>.
You can use this construct as a replacement for
L<Test::Warn::warnings_are|Test::Warn/warnings_are>:
is_deeply(
[ warnings { ... } ],
[
'warning message 1',
'warning message 2',
],
'got expected warnings',
);
or, to replace L<Test::Warn::warnings_like|Test::Warn/warnings_like>:
cmp_deeply(
[ warnings { ... } ],
bag( # ordering of messages doesn't matter
re(qr/warning message 1/),
re(qr/warning message 2/),
),
'got expected warnings (in any order)',
);
Warnings generated by this code block are I<NOT> propagated further. However,
since they are returned from this function with their filename and line
numbers intact, you can re-issue them yourself immediately after calling
C<warnings(...)>, if desired.
Note that C<use Test::Warnings 'warnings'> will give you a C<warnings>
subroutine in your namespace (most likely C<main>, if you're writing a test),
so you (or things you load) can't subsequently do C<< warnings->import >> --
it will result in the error: "Not enough arguments for
Test::Warnings::warnings at ..., near "warnings->import"". To work around
this, either use the fully-qualified form (C<Test::warnings>) or make your
calls to the C<warnings> package first.
=head2 C<< warning( { code } ) >>
Same as C<< warnings( { code } ) >>, except a scalar is always returned - the
single warning produced, if there was one, or an arrayref otherwise -- which
can be more convenient to use than C<warnings()> if you are expecting exactly
one warning.
However, you are advised to capture the result from C<warning()> into a temp
variable so you can dump its value if it doesn't contain what you expect.
e.g. with this test:
like(
warning { foo() },
qr/^this is a warning/,
'got a warning from foo()',
);
if you get two warnings (or none) back instead of one, you'll get an
arrayref, which will result in an unhelpful test failure message like:
# Failed test 'got a warning from foo()'
# at t/mytest.t line 10.
# 'ARRAY(0xdeadbeef)'
# doesn't match '(?^:^this is a warning)'
So instead, change your test to:
my $warning = warning { foo() };
like(
$warning,
qr/^this is a warning/,
'got a warning from foo()',
) or diag 'got warning(s): ', explain($warning);
=head1 IMPORT OPTIONS
=head2 C<:all>
Imports all functions listed above
=head2 C<:no_end_test>
Disables the addition of a C<had_no_warnings> test
via C<END> or C<done_testing>
=head2 C<:fail_on_warning>
=for stopwords unexempted
When used, fail immediately when an unexempted warning is generated (as opposed to waiting until
L</had_no_warnings> or C<done_testing> is called).
I recommend you only turn this option on when debugging a test, to see where a surprise warning is coming from,
and rely on the end-of-tests check otherwise.
=head2 C<:report_warnings>
When used, C<had_no_warnings()> will print all the unexempted warning content, in case it had been suppressed
earlier by other captures (such as L<Test::Output/stderr_like> or L<Capture::Tiny/capture>).
=head1 CAVEATS
=for stopwords smartmatch TODO irc
Sometimes new warnings can appear in Perl that should B<not> block
installation -- for example, smartmatch was recently deprecated in
perl 5.17.11, so now any distribution that uses smartmatch and also
tests for warnings cannot be installed under 5.18.0. You might want to
consider only making warnings fail tests in an author environment -- you can
do this with the L<if> pragma:
use if $ENV{AUTHOR_TESTING} || $ENV{RELEASE_TESTING}, 'Test::Warnings';
In future versions of this module, when interfaces are added to test the
content of warnings, there will likely be additional sugar available to
indicate that warnings should be checked only in author tests (or TODO when
not in author testing), but will still provide exported subs. Comments are
enthusiastically solicited - drop me an email, write up an RT ticket, or come
by C<#perl-qa> on irc!
=for stopwords Achtung
B<Achtung!> This is not a great idea:
sub warning_like(&$;$) {
my ($code, $pattern, $name) = @_;
like( &warning($code), $pattern, $name );
}
warning_like( { ... }, qr/foo/, 'foo appears in the warning' );
If the code in the C<{ ... }> is going to warn with a stack trace with the
arguments to each subroutine in its call stack (for example via C<Carp::cluck>),
the test name, "foo appears in the warning" will itself be matched by the
regex (see F<examples/warning_like.t>). Instead, write this:
like( warning { ... }, qr/foo/, 'foo appears in the warning' );
=head1 TO DO (or: POSSIBLE FEATURES COMING IN FUTURE RELEASES)
=over
=item * C<< allow_warnings(qr/.../) >> - allow some warnings and not others
=for stopwords subtest subtests
=item * more sophisticated handling in subtests - if we save some state on the
L<Test::Builder> object itself, we can allow warnings in a subtest and then
the state will revert when the subtest ends, as well as check for warnings at
the end of every subtest via C<done_testing>.
=item * sugar for making failures TODO when testing outside an author
environment
=back
=head1 SEE ALSO
=for stopwords YANWT
=over 4
=item *
L<Test::NoWarnings>
=item *
L<Test::FailWarnings>
=item *
L<blogs.perl.org: YANWT (Yet Another No-Warnings Tester)|http://blogs.perl.org/users/ether/2013/03/yanwt-yet-another-no-warnings-tester.html>
=item *
L<strictures> - which makes all warnings fatal in tests, hence lessening the need for special warning testing
=item *
L<Test::Warn>
=item *
L<Test::Fatal>
=back
=head1 SUPPORT
Bugs may be submitted through L<the RT bug tracker|https://rt.cpan.org/Public/Dist/Display.html?Name=Test-Warnings>
(or L<bug-Test-Warnings@rt.cpan.org|mailto:bug-Test-Warnings@rt.cpan.org>).
There is also a mailing list available for users of this distribution, at
L<http://lists.perl.org/list/perl-qa.html>.
There is also an irc channel available for users of this distribution, at
L<C<#perl> on C<irc.perl.org>|irc://irc.perl.org/#perl-qa>.
I am also usually active on irc, as 'ether' at C<irc.perl.org>.
=head1 AUTHOR
Karen Etheridge <ether@cpan.org>
=head1 CONTRIBUTORS
=for stopwords Graham Knop A. Sinan Unur Leon Timmermans Tina Mueller
=over 4
=item *
Graham Knop <haarg@haarg.org>
=item *
A. Sinan Unur <nanis@cpan.org>
=item *
Leon Timmermans <fawaka@gmail.com>
=item *
Tina Mueller <cpan2@tinita.de>
=back
=head1 COPYRIGHT AND LICENCE
This software is copyright (c) 2013 by Karen Etheridge.
This is free software; you can redistribute it and/or modify it under
the same terms as the Perl 5 programming language system itself.
=cut

View File

@@ -0,0 +1,64 @@
package Test::use::ok;
use 5.005;
our $VERSION = '1.302183';
__END__
=head1 NAME
Test::use::ok - Alternative to Test::More::use_ok
=head1 SYNOPSIS
use ok 'Some::Module';
=head1 DESCRIPTION
According to the B<Test::More> documentation, it is recommended to run
C<use_ok()> inside a C<BEGIN> block, so functions are exported at
compile-time and prototypes are properly honored.
That is, instead of writing this:
use_ok( 'Some::Module' );
use_ok( 'Other::Module' );
One should write this:
BEGIN { use_ok( 'Some::Module' ); }
BEGIN { use_ok( 'Other::Module' ); }
However, people often either forget to add C<BEGIN>, or mistakenly group
C<use_ok> with other tests in a single C<BEGIN> block, which can create subtle
differences in execution order.
With this module, simply change all C<use_ok> in test scripts to C<use ok>,
and they will be executed at C<BEGIN> time. The explicit space after C<use>
makes it clear that this is a single compile-time action.
=head1 SEE ALSO
L<Test::More>
=head1 MAINTAINER
=over 4
=item Chad Granum E<lt>exodist@cpan.orgE<gt>
=back
=encoding utf8
=head1 CC0 1.0 Universal
To the extent possible under law, 唐鳳 has waived all copyright and related
or neighboring rights to L<Test-use-ok>.
This work is published from Taiwan.
L<http://creativecommons.org/publicdomain/zero/1.0>
=cut