#/bin/sh # -*-Perl-*- (for Emacs) vim:set filetype=perl: (for vim) #======================================================================# # Run the right perl version: if [ -x /usr/local/bin/perl ]; then perl=/usr/local/bin/perl elif [ -x /usr/bin/perl ]; then perl=/usr/bin/perl else perl=`which perl| sed 's/.*aliased to *//'` fi exec $perl -x -S $0 "$@" # -x: start from the following line #======================================================================# #! /Good_Path/perl -w # line 17 # Name: pc_auto-test # Description: # Test compilation and results on a set of sample directories to # validate the pencil code. Uses Perl because I started to do this with # the Test/Test::Harness module and because we need to do pattern # matching to extract the relevant output lines. # To Do: # - Add an option to exclude those tests that use MPI # - Remove directory from /tmp when encountering Ctrl-C # - Treat `svn update' as separate stage that can be `OK' or not use strict; BEGIN { # Make sure ${PENCIL_HOME}/lib/perl is in the Perl path if (-d "$ENV{PENCIL_HOME}/lib/perl") { unshift @INC, "$ENV{PENCIL_HOME}/lib/perl"; } else { if ($0 =~ m!(.*[/\\])!) { unshift @INC, "$1../lib/perl"; } } } use Pencil::Util; Pencil::Util::use_pencil_perl_modules( 'Pencil::ReadmeReader', 'Test::NumericFileComparator', 'Test::ScriptTester', 'Test::Parse', ) or die; use constant NOQUIET => 0; use constant QUIET => 1; use constant LOCK_FAILED => 128; # return status if we can't get lock use Cwd; use File::Copy; use File::Compare; use POSIX qw(floor strftime); use Getopt::Long; use vars qw{ %failed $test_status }; # do we really need global vars here? use IPC::Open2; use Cwd qw(abs_path); # ---------------------------------------------------------------------- # # Test the following subdirs of samples/ . # Tests are grouped by `level': # 0 is the absolute minimum, # 1 are the most important tests, # 2 is the standard program for nightly tests, # 3 adds the remaining tests under samples. # [Note: until we get closer to have all test running, we will use # levels beyond 3 to distinguish the different degrees of failures] # Within a level, tests should be roughly sorted by total run time, such # that the slowest tests are run last. my $default_level = 2; my %tests = ( 0 => [qw( no-modules most-modules )], 1 => [qw( backwards-compatible 1d-tests/implicit_diffusion 2d-tests/chemistry_GrayScott 2d-tests/shearwave-dust-par dust-vortex damped_alfven_waves corona helical-MHDturb )], 2 => [qw( 0d-tests/heating_ionize 0d-tests/heating_noionize 1d-tests/H2_flamespeed 1d-tests/horndeski_choiceIII 1d-tests/rel-shock 1d-tests/inflationary-B-genesis 1d-tests/axionSU2back chiral-diffusion cosmicray 1d-tests/sod_10 1d-tests/ambipolar_diffusion GravitationalWaves polymer/forced_3dturbulence testfield_z random_uu_particles 1d-tests/solar-atmosphere-temperature kin-dynamo meissner conv-slab-noequi 2d-tests/selfgravitating-shearwave 2d-tests/streaming_instability/single-species 2d-tests/streaming_instability/multi-species 2d-tests/globaldisc 2d-tests/cylindrical_gdisk 2d-tests/planet-disk-fargo spherical-globaldisk spherical-globaldisk-mhd spherical-gdisk-planet-thermo 0d-tests/solar_system mdwarf sedimentation MRI-turbulence_hyper dust_turb_globaldisk geodynamo 2d-tests/cylinder_deposition 2d-tests/selfgravdisk-deadzone-dust 2d-tests/spherical_viscous_ring 2d-tests/field-loop-fargo baroclinic 1d-tests/sedimentation-noneq-blocks 1d-tests/alphadisk implicit_resistivity debris-photoelectric-fluid 2d-tests/debris-photoeletric-streaming 2d-tests/Kelvin-Helmholtz 2d-tests/selfgravdisk-planet-dust cylindrical-globaldisk-dzone 2d-tests/selfgravdisk-logspirals 2d-tests/turbulent_potential 2d-tests/WhiteDwarfDisk 2d-tests/cylinder_ogrid_thermo_chem 2d-tests/shallow-water 2d-tests/torque_migration_noniso sink-particles 2d-tests/jupiter-atmosphere-cyclogenesis convective-overstability-particles streaming-instability-sink 1d-tests/hyperdiffusion 2d-tests/eccentricity-decay interstellar )], # Level 3 for now contains all non-canonical tests that run # successfully 3 => [qw( 0d-tests/heating_ionize_planck 1d-tests/conduction 1d-tests/jeans-drag-dustpar-x 1d-tests/jeans-x 1d-tests/sod_10s 1d-tests/toomre-x 2d-tests/bubble 2d-tests/conv-slab-MLT 2d-tests/2d_methane_flame 2d-tests/boussinesq_convection 2d-tests/chiral_dynamo cartesian-convection-kramers-chimax cartesian-convection-kramers-chimax_cp_2 continuous-forcing-from-file conv-slab conv-slab_cp_2 interlocked-fluxrings conv-slab-flat coronae_heatflux_boris power_xy/complex power_xy/integrate_shell power_xy/integrate_shell_z testfield_nonlin_z spherical-convection spherical-convection-corona small-scale-dynamo_slope-limited-diffusion meanfield_special_e_tensor_GCD dust-accretion superparticle_condensation )], # Level 4 for now contains overlong tests (e.g. test that # compile, start and then run -- but take too long time) 4 => [qw( forced-boundary sound-spherical-bufferzone 2d-tests/selfgrav-shearwave-dust-par )], # Level 5 for now contains all non-canonical tests that run, # but produce wrong results 5 => [qw( superparticle-coagulation superparticle-condensation-coagulation hexagon MHD-ABCforcing taylor-couette potential_field 1d-tests/sod_100 1d-tests/sod_10y geodynamo-noequi 1d-tests/sod_1000 1d-tests/sod_100s 1d-tests/sod_10sy 1d-tests/sod_10sz 2d-tests/GMSW1976 1d-tests/sod_1000s 1d-tests/sod_10sho taylor-couette-cyl 0d-tests/chemistry_aped 2d-tests/GMSW1976_cpne1 0d-tests/coag_kernel_lin 2d-tests/dynamical_alpha 2d-tests/potential_field 2d-tests/spherical_gdisk solar-atmosphere-magnetic MRI-turbulence_hyper_strict 0d-tests/heating_ionize_temp 1d-tests/solar-atmosphere 0d-tests/reactive_particles )], # Level 6 for now contains all non-canonical tests that # start, but don't run successfully 6 => [qw( 0d-tests/coagulation-fragmentation )], # Level 7 for now contains all non-canonical tests that # compile, but don't start sucessfully 7 => [qw( interstellar_ion turb-cylindrical-disk 2d-tests/A3+chi11+Ra1e5 )], # Level 8 for now contains all non-canonical tests that don't # compile sucessfully 8 => [qw( multigrid 2d-tests/radiation_doppler )], ); # ---------------------------------------------------------------------- # my $base_URL = 'https://github.com/pencil-code/pencil-code/'; # repository URL my $ntests = 0; # total number of tests run my $failed = 0; my $created_datadir = 0; my $remove_lock = 0; my $failure_message = ''; my ($current_revision, $current_hash); my $reference_out = 'reference.out'; # automatically changed to # reference.out.double below for # double precision runs my $user = $ENV{USER} || $ENV{LOGNAME} || 'unknown'; my $tmpdir = "/tmp/pencil-tmp-$user-$$"; my $lockdir = '/tmp'; # put / check for lock file here my $lockfile = "pencil-auto-test-$user.pid"; my $headerpat = '^-*\s*(-+\s*[A-Za-z0-9_]+)+-*\s*$'; # regexp for header line my $noheader = "Couldn't find header line"; my $cfloat = '([+-]?)(?=\d|\.\d)\d*(\.\d*)?([Ee]([+-]?\d+))?'; # regexp # for C float my $ieee_float = "(?:$cfloat|[+-]?(?:NaN|Inf))"; # C float | ±NaN | ±Inf my $res; my ($t_comp, $t_start, $t_run, $t_res, $t_script) = (1, 1, 1, 1, 1); # test everything by default my (%opts); # variables written by GetOptions (my $cmdname = $0) =~ s{.*/}{}; my $help = 0; my $itest = 1; my $indi_fmt = '%-17s'; # allow that much space for indicators like # `[double]' or `[KEEP_RELOADING]' my $skip_end_block = 0; my $usage = "Usage: $cmdname [options] [dir1 [dir2 [..]]] Test compilation and results on a set of sample directories (or on the list of directories given in the command line) to validate the pencil code. Uses Perl because we need to do pattern matching to extract the relevant output lines. If you don't have a data directory, $cmdname creates a link to $tmpdir -- both link and directory are removed after each run. Options: -h, --help \tShow usage overview -v, --version \tPrint version number -C, --clean \tClean up (pc_build --cleanall) before compiling -c, --compile-only \tTest compilation only -o, --exec-opts= \tPass cmd line options in quotes such as partition \tand/or billing account to execute mpi example: \t --exec-opts='--account=projectA -p test' -n, --norun \tTest compilation and starting only -f, --results-from-file \tDon't run, read results from data/timeseries.dat -r, --reload \tKeep reloading parameters (using KEEP_RELOADING) -l, --list \tList test directories (don't run any tests) -L, --list-all \tList even test dirs lacking reference.out --logfile=FILE \tAppend one-line summary to the given file -t, --time \tPrint CPU time used -T, --summarize-times \tPrint timings line for pencil-test and statistics -i, --infofile= \tAfter last test, print content of -D, --pencil-home= \tSet PENCIL_HOME directory to -N, --nice= \tRun tests with nice value --no-pencil-check \tSuppress pencil consistency check, irrespective \tof &run_pars.lpencil_check --notify \tTell us (audibly and visually) when test is done. --parallel \tAllow parallel compilation (passes -j to make) -j, --jobs= \tPass through -j option to make -d, --datadir \tUse mkdir if data/ is needed, don't ln -s /tmp -s, --short \tPrint short directory names only -p, --postproc=PP \tApply postprocessor PP to output from make -F, --file= \tOnly run tests for samples that use module 'xx' (based on 'grep -w') -a, --all-dirs \tRecurse through all subdirectories of samples/ --here \tRecurse through all subdirectories in current directory --level=LEV \tRun only tests from level LEV --max-level=LEV \tRun all tests below with level <= LEV (default: 2) --script-tests=TYPES\tRun script tests matching TYPES, a comma-separated \tlist of types 'type1,type2,...', or a map \t(comma-separated list of colon-separated \tassociations 'type1:interp1,type2:interp2,...'. \tUse 'DEFAULT' to get all configured types with \ttheir default interpreters --time-limit=limit \tLimit time for each phase --auto-clean \tAutomatic clean & recompile, if compilation fails --config-files= \tUse the given (a comma-separated list) as \tconfiguration files, rather than trying to find a \tconfig file based on a host ID. --host-id= \tUse the given as host ID. --list-maintainers \tPrint maintainer emails for failed tests --rsh=RSH-PROG \tUse the specified program as to get a remote shell --nodelist=NODE1,.. \tUse a forked remote shell to parallelize --no-summary \tDo not summarize at the end --no-lock \tIgnore and don't write lock file --local-lock \tLet multiple auto-tests run in different working copies (default: off) --debug \tPrint lots of debugging output --fast \tShortcut for the option FFLAGS+=-O0 of pc_build --log-dir= \tDirectory to store the logfiles (default: .) --previous-dir=\tDirectory for logfiles of the previous run (default: off) --bisect \tTry to bisect to find offending commit (useful for automatic tests) --keep-fail-info-on-success \tDo not delete failure information on successful test (for bisecting) --overwrite-fail-info \tUpdate failure information even if it already exists (for bisecting) Examples: pc_auto-test # run standard program, recycling .o files pc_auto-test -C # run standard program, compiling from scratch pc_auto-test --auto-clean --log-dir=/my/test/ --previous-dir=/my/test-previous/ # run with logfiles, make clean on error pc_auto-test . # run test in current directory pc_auto-test -l # list dirs in standard program pc_auto-test --time-limit=2m # run standard program, killing (for each test) # any remaining start.x or run.x processes # after 2 minutes. # Mainly useful for run.x, but we also limit # start.x o avoid hour-long runaway processes # The format of the time specification is # documented in bin/reaper pc_auto-test -a # run tests in all subdirs of samples/ with a # referece.out{,.double} file pc_auto-test --level=1+2 # run all level 1 and level 2 tests pc_auto-test -l --max-level=1 # list test dirs in levels 0 and 1 pc_auto-test -al # list all dirs 'pc_auto-test -a' would run in pc_auto-test -aL # list all run dirs under samples/, even those without # reference data pc_auto-test --script-tests # standard program, plus all supported # script tests (python and idl) pc_auto-test --script-tests=python # standard program, plus python script # tests pc_auto-test --script-tests='python:python3,idl:bin/gdl' # explicitly specify interpreters "; ## Process command line options eval { Getopt::Long::config("bundling"); # make single-letter opts. case-sensitive }; GetOptions(\%opts, qw( -h --help -v --version --debug -C --clean -c --compile-only -n --norun -f --results-from-file -r --reload -o --exec-opts=s -l --list -L --list-all --logfile=s -t --time -T --summarize-times -i=s --infofile=s -D=s --pencil-home=s -N=s --nice=s --no-pencil-check --notify --parallel -j=n --jobs=n -d --datadir -s --short -p=s --postproc=s -F=s --file=s -a --all-dirs --here --level=s --max-level=s --script-tests=s --time-limit=s --auto-clean --config-files=s --host-id=s --list-maintainers --rsh=s --nodelist=s --no-summary --nolock --no-lock --local-lock --fast --log-dir=s --previous-dir=s --bisect --keep-fail-info-on-success --overwrite-fail-info ) ) or $help=1, die "Aborting.\n"; if ($opts{'h'} || $opts{'help'}) { $help=1; die "$usage\n"; } if ($opts{'v'} || $opts{'version'}) { $help=1; die '$Id$ ' . "\n"; } if ($opts{'c'} || $opts{'compile-only'}) { $t_start = $t_run = $t_res = $t_script = 0 }; if ($opts{'n'} || $opts{'norun'}) { $t_run=0 }; my $res_from_file = ($opts{'f'} || $opts{'res_from_file'} || 0 ); if ($res_from_file) { $t_comp=$t_start=$t_run=0 }; my $debug = ( $opts{'debug'} || 0 ); my $clean = ($opts{'C'} || $opts{'clean'} || 0 ); my $reload = ($opts{'r'} || $opts{'reload'} || 0 ); my $exec_opts = ($opts{'o'} || $opts{'exec-opts'} || '' ); my $list_only = ($opts{'l'} || $opts{'list'} || 0 ); my $list_all = ($opts{'L'} || $opts{'list-all'} || 0 ); my $logfile = ( $opts{'logfile'} || '' ); my $infofile = ($opts{'i'} || $opts{'infofile'} || '' ); my $time = ($opts{'t'} || $opts{'time'} || 0 ); my $summarize_t = ($opts{'T'} || $opts{'summarize-times'} || 0 ); my $pencil_home = ($opts{'D'} || $opts{'pencil-home'} || $ENV{PENCIL_HOME}); my $niceval = ($opts{'N'} || $opts{'nice'} || 0 ); my $parallel = ( $opts{'parallel'} || 0 ); my $makejobs = ($opts{'j'} || $opts{'jobs'} || '' ); my $mkdatadir = ($opts{'d'} || $opts{'datadir'} || 0 ); my $short = ($opts{'s'} || $opts{'short'} || 0 ); my $file = ($opts{'F'} || $opts{'file'} || '' ); my $postproc = ($opts{'p'} || $opts{'postproc'} || '' ); my $deep = ($opts{'a'} || $opts{'all-dirs'} || 0 ); my $here = ( $opts{'here'} || 0 ); my $level = ( $opts{'level'} ); my $max_level = ( $opts{'max-level'} ); my $script_tests = ( $opts{'script-tests'} ); my $time_limit = ( $opts{'time-limit'} || '' ); my $auto_clean = ( $opts{'auto-clean'} || 0 ); my $config_files = ( $opts{'config-files'} || '' ); my $host_id = ( $opts{'host-id'} || '' ); my $list_maint = ( $opts{'list-maintainers'} || 0 ); my $rsh = ( $opts{'rsh'} || 'ssh'); my $nodelist = ( $opts{'nodelist'} || 0 ); my $nosummary = ( $opts{'no-summary'} || 0 ); my $suppress_check = ( $opts{'no-pencil-check'} || 0 ); my $notify = ( $opts{'notify'} || '' ); my $nolock = ( $opts{'nolock'} || $opts{'no-lock'} || 0 ); my $local_lock = ( $opts{'local-lock'} || 0 ); my $fast = ( $opts{'fast'} || '' ); my $log_dir = ( $opts{'log-dir'} || '' ); my $previous_dir = ( $opts{'previous-dir'} || '' ); my $bisect = ( $opts{'bisect'} || 0 ); my $keep_fail = ( $opts{'keep-fail-info-on-success'} || 0 ); my $overwrite_fail = ( $opts{'overwrite-fail-info'} || 0 ); if (!$pencil_home) { die "ERROR: PENCIL_HOME unknown, please use -D or environment variable!\n"; } my %interpreter_map; if (defined($script_tests)) { %interpreter_map = interpreter_map_from($script_tests); $t_script = 1; } else { $t_script = 0; } my @exit_status=('setting up directory', 'cleaning up', 'compilation', 'starting', 'running', 'results'); $list_only = 1 if ($list_all); my $timeout_command_found = check_timeout_found(); if ($timeout_command_found && !$nolock) { # The global lock is on by default because when the time limit is reached, # reaper kills processes based on their name. If we are using the `timeout` # utility, that concern does not apply, and one only needs local_lock. $local_lock = 1; } if ($local_lock) { $lockdir = $pencil_home; } $lockfile = $lockdir."/pencil-auto-test-$user.pid"; #If relative paths have been specified, convert to absolute paths. Otherwise, #$log_dir is treated as relative to the run directory (not the directory where #pc_auto-test was invoked). if ($log_dir ne '') { $log_dir = abs_path($log_dir); } if ($previous_dir ne '') { $previous_dir = abs_path($previous_dir); } my $makeopts = ''; if ($makejobs) { $makeopts = "--jobs=$makejobs"; } if ($parallel) { $makeopts = "--parallel"; } my @pc_dump_config = qw/pc_dump_config/; my @pc_bisect = qw/pc_bisect/; my @pc_build = qw/pc_build/; my @pc_run = qw/pc_run/; if ($config_files) { push @pc_dump_config, "--config-files=$config_files"; push @pc_bisect, "--config-files=$config_files"; push @pc_build, "--config-files=$config_files"; push @pc_run, "--config-files=$config_files"; } if ($exec_opts) { push @pc_dump_config, "--exec-opts='$exec_opts'"; push @pc_run, "--exec-opts='$exec_opts'"; } if ($host_id) { push @pc_dump_config, "--host-id=$host_id"; push @pc_bisect, "--host-id=$host_id"; push @pc_build, "--host-id=$host_id"; push @pc_run, "--host-id=$host_id"; } if ($pencil_home) { push @pc_bisect, "--pencil-home='$pencil_home'"; } if ($makejobs) { push @pc_bisect, "--jobs=$makejobs"; } if ($parallel) { push @pc_bisect, "--parallel"; } if ($log_dir) { push @pc_bisect, "--log-dir='$log_dir'"; } if ($fast) { push @pc_bisect, "--fast"; } if ($debug) { push @pc_bisect, "--debug"; } my $host = `hostname`; chomp($host); my $nice = ""; if ($niceval != 0) { $nice = "\\nice -n $niceval" }; # Note: Csh's builtin 'nice' cmd has a different calling syntax, so we # need to use their real nice cmd here. my @testdirs; # full names of all dirs to run tests in my %time_summary; # hash to accumulate timing summary # within each run # Make a backup of the previous logfile directory if ($log_dir && $previous_dir) { $log_dir =~ s|/+$||s; $previous_dir =~ s|/+$||s; my $exclude = ""; my $sub_dir = substr $previous_dir, 0, length($log_dir); if ($sub_dir eq $log_dir) { $sub_dir = substr $previous_dir, length($log_dir)+1; $exclude = '--exclude "'.$sub_dir.'"'; } system('rsync -auq --del '.$exclude.' "'.$log_dir.'/" "'.$previous_dir.'/"'); } ## Debugging output: if ($debug) { my $dummy = $Test::ScriptTester::DEBUG; # suppress 'used only once' # warning $Test::ScriptTester::DEBUG = 1; eval { use Data::Dumper }; # Need eval here to calm down compiler print STDERR Dumper(\%opts); print STDERR "(\$t_comp, \$t_start, \$t_run, \$t_res, \$t_script)" . " = ($t_comp, $t_start, $t_run, $t_res, $t_script)\n"; print STDERR "(\$debug,\$help,\$short) " . " = ($debug,$help,$short)\n"; } # $t_comp .. $t_script form a hierarchy: We can't test running without # compiling and starting $t_start &&= $t_comp; $t_run &&= $t_start; $t_res &&= ($t_run || $res_from_file); $t_script &&= $t_res; # Check for lock file and write one unless ($nolock or $list_only) { handle_lock_file() or exit(LOCK_FAILED); } # Remember current directory my $cwd = cwd(); # pc_update_autotest system("$pencil_home/bin/pc_update_autotest.sh &"); # Make sure we are in the top directory and have the right PATH my $topdir = "$pencil_home"; if (!$here) { chdir $topdir; } $ENV{PATH} .= ":$pencil_home/bin"; # Don't follow these boring subdirectories when recursing through whole tree: my $prunedirs = '^(data|src|CVS|RCS|_darcs|.hg|.svn)$'; # Autoflush stdout: $| = 1; @ARGV = grep { /./ } @ARGV; # Eliminate empty @ARGV list (needed under # OSF1, no clue why) if ($list_only && ! $short) { print "Test directories:\n" } if ($summarize_t) { write_timing_summary_header(); } my %t_global = ( 'compile' => 0, 'start+run' => 0, ); my $t0_global = get_time(); # remember start time if (@ARGV) { ## Process dirs given in cmd line @ARGV = map { s{^(?!/)}{$cwd/}; $_ } @ARGV; # Make '.' and similar work $ntests = @ARGV; for my $d (@ARGV) { test_rundir("$d", $makeopts); } if ((scalar @ARGV)==1) { if (%failed) { # Do some bizarre chicken sacrifice voodoo # [wd 29-Jan-2007:] Tried to sanitize this without knowing what # it was supposed to do # Assuming that the exit code is supposed to indicate the # worst phase that failed (i.e., 2 means setting up directory # failed, ..., 7 means results are wrong, 8 means something # unexpected failed, see @exit_status above): my $exit_code=1; # Loop through exit codes, starting with most serious one: foreach my $status (@exit_status) { # Exit if any of the exit codes stored in %failed matches # $status: if (grep /^$status$/, values %failed) { exit $exit_code; } else { $exit_code++; } } exit $exit_code; # Still here? Then exit now. } } } else { ## No dirs in cmd line -> do standard program if ($deep) { # Scan samples/ tree recursively find_test_dirs("$topdir/samples"); # populates @testdirs } elsif ($here) { # Scan the current directory for autotests find_test_dirs("."); } else { # Use predefined directories my @sampdirs; if (defined($level)) { @sampdirs = get_sampdirs($level); } else { $max_level = $default_level unless defined($max_level); @sampdirs = get_sampdirs("0-".$max_level); } @testdirs = ( map { "$topdir/samples/$_" } @sampdirs, ); } if ($file ne "") { my @testdirs_tmp = (); for my $d (@testdirs) { my $filecheck = `grep -w "$file" $d/src/Makefile.local | wc -l`; if ($filecheck > 0) { push(@testdirs_tmp,($d)); } } @testdirs = @testdirs_tmp; } $ntests = @testdirs; if ($nodelist) { # Run tests in parallel my @nodes=split /,/ , $nodelist; my $passthru_opts = ''; if ($clean) { $passthru_opts .= '-C '; } if ($makejobs) { $passthru_opts .= " --jobs=$makejobs"; } if ($parallel) { $passthru_opts .= " --parallel"; } if ($opts{'c'} || $opts{'compile-only'}) { $passthru_opts .= ' -c'; } do_tests_in_parallel(\@testdirs, \@nodes, $passthru_opts); } else { # Run tests consecutively for my $d (@testdirs) { test_rundir("$d", $makeopts); } } } # ---------------------------------------------------------------------- # sub check_timeout_found { # Check if the timeout command exists on the system. return 0; #Kishore: disabling the use of timeout for now as it causes problems on Norlx51 return (`which timeout` ne ""); } # ---------------------------------------------------------------------- # sub get_sampdirs { # Return list of sample dirs that belong to $level . my $level = shift; if ($level =~ /^\s*(\d+)\s*\-+\s*(\d+)\s*$/s) { my $upper = $2; $level = $1; if ($2 < $1) { $level = $2; $upper = $1; } if ($upper > $level) { for (my $pos = $level + 1; $pos <= $upper; $pos++) { $level .= ','.$pos; } } } my @requested_levels = split (/[+,; ]+/s, $level); my @dirs; foreach my $requested (sort {$a <=> $b} @requested_levels) { my @levels = grep { $_ == $requested } keys %tests; foreach my $lev (sort {$a <=> $b} @levels) { push @dirs, @{$tests{$lev}}; } } return @dirs; } # ---------------------------------------------------------------------- # sub handle_lock_file { # Try to aquire a lock and write a lock file containing our pid. # Return true if that succeeded, false otherwise. my $got_lock = get_lock(); print STDERR "handle_lock_file: \$got_lock = $got_lock\n" if ($debug); # Write our own lock file if ($got_lock) { print STDERR "Writing <$$> to lock file $lockfile\n" if ($debug); write_to_file($lockfile, "$$", 0, 1); $remove_lock = 1; # clean up lock at END } else { $failed++; $failure_message .= "Couldn't get lock file\n"; } return $got_lock; } # ---------------------------------------------------------------------- # sub get_lock { # Check for existing lock file and return false if there is one, or # write one and return true. if (-e $lockfile) { # lock file exists -> get and check pid if (-z $lockfile) { # empty lock file (happened at least once) print "Removing empty lock file\n"; unlink $lockfile or die "Cannot unlink <$lockfile>\n"; return 1; } open(LOCK, "<", $lockfile) or die "Cannot read <$lockfile>"; my $pid = ; close LOCK; $pid =~ s/^\s*([0-9]+)\s*$/$1/ or die "$lockfile contains garbage instead of pid: <$pid>\n"; my $pid_cmd = `ps -p $pid -o args=`; # Who runs with pid from lockfile? if ($pid_cmd =~ /^\S*$/) { # no process for this pid print "Removing stale lock file (no process with pid $pid)\n"; unlink $lockfile or die "Cannot unlink <$lockfile>\n"; return 1; } elsif ($pid_cmd =~ /auto-test\s*$/) { # pid belongs to auto-test print "Failed getting lock (lock file is $lockfile):\n" . "Another auto test is running as pid $pid\n" . "Use 'pc_auto-test --nolock' to ignore the lock file\n"; return 0; } else { # pid belongs to other process print "Failed getting lock (lock file is <$lockfile>):\n" . "Another process runs as pid $pid (this is weird...)\n" . "Use 'pc_auto-test --nolock' to ignore the lock file\n"; return 0; } } else { print STDERR "handle_lock_file: Found no lock file\n" if ($debug); return 1; } } # ---------------------------------------------------------------------- # sub do_tests_in_parallel { # Distribute auto tests over nodes contained in $nodelist my $jobs = shift; my $nodelist = shift; my $passthru_opts = shift; my @queue=@$jobs; my %nodes; for my $node (@$nodelist) { $nodes{$node}{'busy'} = 0; $nodes{$node}{'pid'} = 0; } my $running = 0; my $queued = scalar @queue; while ((scalar @queue) || $running) { if (scalar @queue) { foreach my $node (keys %nodes) { next if $nodes{$node}{'busy'}; my $job = shift @queue; $nodes{$node}{'busy'}=$job; $running++; if (! ($nodes{$node}{'pid'} = fork)) { my $nodeb=$node; exec "$rsh $node pc_auto-test --no-summary $passthru_opts $job | sed 's/^/$nodeb : /'"; # didn't get here } } } if ($running) { my $pid = wait; if ($pid != -1) { my $result = $?; $running--; foreach my $node (keys %nodes) { if ($nodes{$node}{'pid'} == $pid) { my $exit_code = ($result >> 8); $failed{$nodes{$node}{'busy'}} = $exit_status[$exit_code-1] if ($exit_code); $nodes{$node}{'busy'} = 0; $nodes{$node}{'pid'} = 0; last; } } } } } } # ---------------------------------------------------------------------- # sub get_log_link { # Create the HTML code for a link to a logfile. my $dir = shift; my $name = shift; if ($name eq 'running') { $name = 'run'; } elsif ($name eq 'starting') { $name = 'start'; } elsif ($name eq 'compilation') { $name = 'build'; } else { return (''); } return (' [log]'); } # ---------------------------------------------------------------------- # sub my_ok { # Similar to Test's and Test::Harness' ok() function: consider success if # first two argumets are equal, otherwise report a problem and print the # third argument (which should normally contain the output from the shell # calls that we are normally testing here). # Args #4 and #5 are the current run directory and the phase (compilation, # starting, running) we are in. my $arg1 = shift; my $arg2 = shift; my $mesg = (shift || ""); chomp($mesg); my $dir = shift; my $phase = shift; my $quiet = (shift || 0); my $dt = shift; my $timestr; if (defined($dt) && $time) { $timestr = s_to_hms($dt,7); } else { $timestr = ''; } $dir =~ s|^.*/samples/||s; my $log_link = ''; if ($log_dir) { if (!-e $log_dir.'/'.$dir) { system ('mkdir -p "'.$log_dir.'/'.$dir.'"'); } if ($phase eq 'compilation') { # store full output of the build open (FH, '>', $log_dir.'/'.$dir.'/build.log') or die "\n".$!."\n on '".$log_dir."/".$dir."/build.log'\n"; print FH $mesg; close (FH); } $log_link = get_log_link($dir, $phase); if ($log_link) { # suppress error output if we have a logfile $mesg = ''; } } # Allow for calls like 'ok(0)' or 'ok(1)': if (!defined($arg2)) { $arg2 = $arg1 ? $arg1 : 1; } if ($arg1 eq $arg2) { print " ok $timestr$log_link\n" unless ($quiet); if ($phase eq 'results') { mark_success ($dir); } } else { print " not ok: $timestr$log_link\n$mesg\n"; $test_status = 1; # report only first failure: $failed{$dir} = $phase unless defined($failed{$dir}); mark_failed ($dir); } # Record time usage in an easily greppable format if ($summarize_t) { record_time($phase, $dt); } } # ---------------------------------------------------------------------- # sub record_time { # Store a time value so we can summarize times when done with this # directory my ($phase, $time) = @_; if (defined($time)) { $time_summary{$phase} = $time; } } # ---------------------------------------------------------------------- # sub find_test_dirs { # Traverse the samples/ directory tree and find all run directories eval { require File::Find }; if ($@) { die "Could not load File::Find (required for the --deep and --here option):\n" . "$@\n"; } import File::Find qw( find ); my @rootdirs = @_; find( { wanted => \&FF_wanted, follow => 1, follow_skip => 2, }, @rootdirs); # $File::Find::prune = 1; } # ---------------------------------------------------------------------- # sub FF_wanted { # 'wanted' function for File::Find, i.e. callback function that is called # from each file or directory found. my $name = $File::Find::name; # Suppress spurious 'used only once' warning: my $dummy = "$File::Find::prune $File::Find::name"; if (-d $name) { if ($_ =~ /$prunedirs/) { # Don't recurse into well-known boring dirs: $File::Find::prune = 1; } else { # Directory, but not boring: if ( $list_all || -r "$name/reference.out" || -r "$name/reference.out.double" ) { push @testdirs, $name; } } } } # ---------------------------------------------------------------------- # sub check_cmd_result { # Check result of command my $error = shift; my $arg = shift; my $msg = shift; my $dir = (shift || ""); my $phase = (shift || ""); my $quiet = (shift || 0); # flag for suppressing the 'ok' my $dt = shift; if (($phase eq 'starting') || ($phase eq 'running')) { # Check for missing MPI option "--oversubscribe" if ($msg =~ /use the [\-]*oversubscribe option/is) { my_ok(0, 1, "Not enough CPU cores! Try 'mpiexec' with the '--oversubscribe' option.", $dir, $phase); return; } # Check for a LOCK file that is in the way if ($msg =~ /^\s*pc_run:\s+found LOCK file([\s.]|$)/im) { my_ok(0, 1, "There is a LOCK file in the way! Please check if it can be removed.", $dir, $phase); return; } # Check for broken pencil check or floating-point exception if ($msg =~ /^\s*pencil_consistency_check:.*?\s+failed([\s,.]|$)/im) { my_ok(0, 1, "Pencil consistency check failed.", $dir, $phase); return; } if ($msg =~ /floating[\s\-]point exception([\s:.]|$)/is) { my_ok(0, 1, "Floating-point exception.", $dir, $phase); return; } # Check for Fortran runtime error if ($msg =~ /^\s*Fortran runtime error[\s:.]*(.*)$/im) { my_ok(0, 1, "Fortran runtime error: ".$1, $dir, $phase); return; } } my_ok($error, $arg, $msg, $dir, $phase, $quiet, $dt); # Give information on floating-point exceptions if ($msg =~ /floating[\s\-]point exceptions recorded:[^\n]*\n+(.*?)(?=\n\S)/is) { my $exceptions = $1; # $exceptions =~ s/\s+share_SN_parameters:.*$//is; # temporal workaround, uncomment only if needed $exceptions =~ s/^\s+/\t* /gm; print "\tFloating-point exceptions:\n".$exceptions."\n"; } } # ---------------------------------------------------------------------- # sub test_shell_cmd { # As the name says... my $cmd = shift; my $dir = (shift || ""); my $phase = (shift || ""); my $quiet = (shift || 0); # flag for suppressing the 'ok' my $t_ref = shift; print STDERR "\nRunning `$nice $cmd`\n" if ($debug); $! = 0; # reset my $res = `$nice $cmd 2>&1`; my ($cmdstatus, $cmdmesg) = ($?, $!); if ($auto_clean && ($phase eq 'compilation') && ($cmdstatus != 0)) { # Retry compilation after cleaning test_shell_cmd("@pc_build --cleanall", $dir, "cleaning up", QUIET); return "ERROR running @pc_build --cleanall" if ($test_status); print STDERR "\nRetrying `$nice $cmd`\n" if ($debug); $! = 0; # reset $res = `$nice $cmd 2>&1`; ($cmdstatus, $cmdmesg) = ($?, $!); } $res = '' unless defined($res); chomp($res); if ($res ne '') { $res .= ' '; } $res .= "['$nice $cmd' reported: $cmdmesg]" if ($cmdstatus != 0); print STDERR " ..done \n" if ($debug); if (($phase eq 'compilation') && ($postproc ne '')) { print STDERR "Postprocessing output..\n" if ($debug); $res = postprocess($res); } my $dt; $dt = get_time()-$t_ref if (defined($t_ref)); # otherwise leave undefined check_cmd_result($cmdstatus, 0, $res, $dir, $phase, $quiet, $dt); # $? = 0 means success return $res; # Return output so we can analyze it if desired } # ---------------------------------------------------------------------- # sub test_timelimited_shell_cmd { # Test shell command, but run it through reaper to limit total time it is # allowed to take my $cmd = shift; my $maxtime = shift; my $procname = shift; my @rest = @_; my $time_limit; if (Scalar::Util::looks_like_number($maxtime)) { $time_limit = $maxtime; } else { #$failed++; #dirty hack to make sure an exception in the line below is detected as a failing test. $time_limit = Test::Parse::parse_time($maxtime); #$failed--; } if ($timeout_command_found) { return test_shell_cmd("timeout -v --kill-after=10s --preserve-status $time_limit $cmd", @rest); } else { return test_shell_cmd("reaper -t '$maxtime,$procname' '$cmd'", @rest); } } # ---------------------------------------------------------------------- # sub test_perl_cmd { # Execute a Perl function. # The first argument is a ref to a function that returns its status as # ($error_count, $message). # Return error message. my $cmd = shift; return test_timelimited_perl_cmd($cmd, 0, @_); my $dir = (shift || ""); my $phase = (shift || ""); my $quiet = (shift || 0); # flag for suppressing the 'ok' my $t_ref = shift; print STDERR "\nRunning Perl command\n" if ($debug); my ($error_count, $mesg) = &$cmd(); $res = $error_count ? "[$mesg]" : ''; print STDERR " ..done \n" if ($debug); my $dt; $dt = get_time()-$t_ref if (defined($t_ref)); # otherwise leave undefined my_ok($error_count, 0, $res, $dir, $phase, $quiet, $dt); return $res; # Return output so we can analyze it if desired } # ---------------------------------------------------------------------- # sub test_timelimited_perl_cmd { # Execute a Perl function within a limited time. # The first argument is a ref to a function that returns its status as # ($error_count, $message). # Return error message. my $cmd = shift; my $time_limit_str = shift; my $dir = (shift || ""); my $phase = (shift || ""); my $quiet = (shift || 0); # flag for suppressing the 'ok' my $t_ref = shift; print STDERR "\nRunning Perl command\n" if ($debug); my $time_limit; if (Scalar::Util::looks_like_number($time_limit_str)) { $time_limit = $time_limit_str; } else { $time_limit = Test::Parse::parse_time($time_limit_str); } my ($error_count, $res) = (0, ''); eval { local $SIG{ALRM} = sub { die "timed out\n" }; # NB: \n is required here alarm $time_limit if $time_limit != 0; # TODO: use HiRes::ualarm() my ($good, $bad, $output) = call_sub_catch_output($cmd); alarm 0; # clear alarm $error_count += $bad; $res .= $output if $bad; print STDERR "good: $good, bad: $bad, errors: $error_count\n" if $debug; }; if ($@) { die unless $@ eq "timed out\n"; # propagate unexpected errors $error_count++; $res .= ' -- ' if defined $res; $res .= 'Timed out'; } print STDERR " ..done \n" if ($debug); my $dt; $dt = get_time() - $t_ref if defined $t_ref; # otherwise leave undefined my_ok($error_count, 0, $res, $dir, $phase, $quiet, $dt); return $res; # Return output so we can analyze it if desired } # ---------------------------------------------------------------------- # sub call_sub_catch_output { # Call the given $cmd, intercepting output to stdout and stderr. # Return the return value of &$cmd() and a string containing the output. my $cmd = shift; # We need to explicitly mark the end of conversation over the pipe, or # else the reader hangs (why does it not realize that the pipe is # closed?). my $end_marker = '## DONE ##'; my $ipc_prefix = 'IPC: '; my $output; my $comm; my @retval; pipe my $read_retval_handle, my $write_retval_handle; my $child_id = open($comm, '-|'); unless (defined $child_id) { die "Cannot fork: $!"; } if ($child_id) { # parent --> read output from child print STDERR "Parent here\n" if $debug; while (<$comm>) { $output .= $_; } close $comm || warn "child exited $?"; while (<$read_retval_handle>) { chomp; last if $_ eq $end_marker; push @retval, $_; } close $read_retval_handle; } else { # child --> run &$cmd print STDERR "Child here\n" if $debug; $skip_end_block = 1; # don't remove lock, etc. yet open(STDERR, ">& STDOUT"); # also redirect STDERR to $comm @retval = &$cmd(); # Send @retval to parent for my $value (@retval) { print $write_retval_handle "$value\n"; } print $write_retval_handle "$end_marker\n"; close $write_retval_handle; exit; } return @retval, $output; } # ---------------------------------------------------------------------- # sub test_compile { # Test compilation only if ($t_comp) { my $dir = shift; my @makeopts = (shift); # gets one string ("" or "-j N") my $t_ref = shift; # start time print " Compiling.. "; # First pc_setupsrc, then make clean (if requested). Must be done # in this order as pc_setupsrc links the Makefile to the run directory. test_shell_cmd("pc_setupsrc --pencil-home $pencil_home", $dir, "setting up directory", QUIET); return 'ERROR calling pc_setupsrc' if ($test_status); if ($clean) { test_shell_cmd("@pc_build --cleanall", $dir, "cleaning up", QUIET); return "ERROR running @pc_build --cleanall" if ($test_status); } # Compile in double precision if we have data for that if (-r 'print.in.double' && -r 'reference.out.double') { push @makeopts, 'REAL_PRECISION=double'; $reference_out = 'reference.out.double'; printf "$indi_fmt ", '[double]'; } else { $reference_out = 'reference.out'; printf "$indi_fmt ", ''; } if ($fast) { push @makeopts, 'FFLAGS+=-O0'; } print STDERR "\@makeopts = @makeopts\n" if ($debug); test_shell_cmd("@pc_build @makeopts", $dir, "compilation", NOQUIET, $t_ref); } } # ---------------------------------------------------------------------- # sub test_start { # Test starting only my $dir = shift; my $t_ref = shift; # start time if ($t_start) { print " Starting.. "; printf "$indi_fmt ", ''; # Start the initialization my $start_cmd = "@pc_run"; if ($log_dir && not( -l $log_dir )) { my $sub_dir = $dir; $sub_dir =~ s|^.*/samples/||s; $start_cmd .= ' --log-dir="'.$log_dir."/".$sub_dir.'"'; } $start_cmd .= " --log start"; if ($time_limit) { $res = test_timelimited_shell_cmd( $start_cmd, $time_limit, 'start.x', $dir, "starting", NOQUIET, $t_ref ); } else { $res = test_shell_cmd( $start_cmd, $dir, "starting", NOQUIET, $t_ref ); } } } # ---------------------------------------------------------------------- # sub test_run { # Test running only (does not analyze output, but returns it) my $dir = shift; my $t_ref = shift; # start time if ($t_run) { print " Running.. "; # Handle KEEP_RELOADING file (our -r/--reload option) my $reload_file = "$dir/KEEP_RELOADING"; if (-e $reload_file) { warn "Found file $reload_file -- deleting\n"; unlink $reload_file or die "Cannot unlink $reload_file!\n"; } if ($reload) { write_to_file($reload_file, ''); printf "$indi_fmt ", '[KEEP_RELOADING]'; } else { printf "$indi_fmt ", ''; } # Run the Code my $run_cmd = "@pc_run"; if ($log_dir && not( -l $log_dir )) { my $sub_dir = $dir; $sub_dir =~ s|^.*/samples/||s; $run_cmd .= ' --log-dir="'.$log_dir."/".$sub_dir.'"'; } $run_cmd .= " --log run"; if ($suppress_check) { $run_cmd .= " --no-pencil-check"; } if ($time_limit) { $res = test_timelimited_shell_cmd( $run_cmd, $time_limit, 'run.x', $dir, "running", NOQUIET, $t_ref ); } else { $res = test_shell_cmd( $run_cmd, $dir, "running", NOQUIET, $t_ref ); } # Clean up if (-e $reload_file) { unlink $reload_file or die "Cannot unlink $reload_file!\n"; } } } # ---------------------------------------------------------------------- # sub test_results { # Analyze results from code my $dir = shift; if ($t_res) { my ($rdmsg,$diagn); print " Validating results.. "; printf "$indi_fmt ", ''; my $ts_file = 'data/time_series.dat'; if (! -r $reference_out) { my_ok(0, 1, "File $reference_out missing or unreadable", $dir, "results"); return; } if (! -r $ts_file) { my_ok(0, 1, "File $ts_file missing or unreadable", $dir, "results"); return; } my $comparator; eval { $comparator = Test::NumericFileComparator->new($reference_out); }; if ($@) { my_ok(0, 1, join ("\n",$@), $dir, "results"); return; } my @message; eval { @message = $comparator->compare($ts_file); }; if ($@) { my_ok(0, 1, join ("\n",$@), $dir, "results"); return; } if (@message) { my $msg = join( "\n ", "Files $reference_out, $ts_file differ:", @message ); $msg .= "\n"; $msg .= join(" \n", 'Expected:', read_lines($reference_out)); $msg .= "\n"; $msg .= join(" \n", 'Got:', read_lines($ts_file)); my_ok(0, 1, $msg, $dir, "results"); return; } # additional "reference_XYZ.out" versus "data/XYZ.dat" checks my @refs = glob ("reference_*.out"); my ($ref_file, $cmp_file, @ref_lines, @cmp_lines, $ref_num, $cmp_num, @ref_values, @cmp_values, $ref_nv, $cmp_nv, $row, $col, $ref, $cmp); foreach $ref_file (@refs) { if ($ref_file !~ /^reference_(.*?)\.out$/s) { next; } $cmp_file = "data/".$1.".dat"; if (!-r $ref_file || !-r $cmp_file) { next; } # data file does not exist $cmp = compare ($ref_file, $cmp_file); if ($cmp <= 0) { next; } # same content or error while reading @ref_lines = read_values ($ref_file); @cmp_lines = read_values ($cmp_file); $ref_num = $#ref_lines; $cmp_num = $#cmp_lines; if ($ref_num != $cmp_num) { my_ok(0, 1, "number of lines in reference and data of \"".$cmp_file."\" is different (".$ref_num." != ".$cmp_num.")", $dir, "results"); return; } for ($row = 0; $row <= $ref_num; $row++) { eval ('@ref_values = '.$ref_lines[$row]); eval ('@cmp_values = '.$cmp_lines[$row]); $ref_nv = $#ref_values; $cmp_nv = $#cmp_values; if ($ref_nv != $cmp_nv) { my $diff = "< ".$ref_lines[$row]."\n"."> ".$cmp_lines[$row]; my_ok(0, 1, "number of reference and data columns at line ".($row+1)." of \"".$cmp_file."\" is different (".$ref_nv." != ".$cmp_nv.")\n".$diff, $dir, "results"); return; } for ($col = 0; $col <= $ref_nv; $col++) { $ref = $ref_values[$col]; $cmp = $cmp_values[$col]; if ($ref != $cmp) { my $diff = "< ".$ref_lines[$row]."\n"."> ".$cmp_lines[$row]; my_ok(0, 1, "value in column ".($col+1)." at line ".($row+1)." of \"".$cmp_file."\" does not match the reference (".$ref." != ".$cmp.")\n".$diff, $dir, "results"); return; } } } } my_ok(0, 0, "", $dir, "results"); } } # ---------------------------------------------------------------------- # sub test_scripts { # Run test scripts if available my $dir = shift; my $t_ref = shift; # start time return unless $t_script; my $tester = Test::ScriptTester->new([$dir], \%interpreter_map); my @tests = $tester->list_tests(); return unless @tests; if ($debug) { print "\nRunning test scripts:\n"; for my $t (@tests) { my ($test_dir, $test_ref) = @{$t}; my ($test_file, $test_type) = @{$test_ref}; print " -> $test_dir/$test_file ($test_type)\n"; } } print " Running script tests.. "; printf "$indi_fmt ", ''; my $test_cmd = sub { $tester->run_tests(@tests); }; if ($time_limit) { print "\nRunning Perl cmd with time limit $time_limit\n" if $debug; test_timelimited_perl_cmd( $test_cmd, $time_limit, $dir, "running script tests", NOQUIET, $t_ref ); } else { print "\nRunning Perl cmd\n" if $debug; test_perl_cmd( $test_cmd, $dir, "running script tests", NOQUIET, $t_ref ); } } # ---------------------------------------------------------------------- # sub interpreter_map_from { # Create the appropriate script tester. # $types_arg is either # - 'DEFAULT': use default interpreters # - a comma-separated list of types and key-value pairs, e.g. # 'python,idl:/usr/bin/gdl': use the default interpreter for python # tests and gdl for idl tests. my ($types_arg) = @_; my %default_interpreters = Test::ScriptTester::get_default_interpreters(); my %interpreters; if ($types_arg eq 'DEFAULT') { %interpreters = %default_interpreters; } else { foreach my $type_spec (split(',', $types_arg)) { my $regex = qr{ ^ \s * # leading whitespace ( [^:] * ) (?: # don't group \s * : \s * # colon with optional whitespace ( .*? ) )? \s * $ # trailing whitespace }x; $type_spec =~ m/$regex/; my ($type, $interpreter) = ($1, $2); if (! defined $interpreter) { $interpreter = $default_interpreters{$type}; } if (! defined $interpreter) { croak("Cannot find interpreter for $type\n"); } $interpreters{$type} = $interpreter; } } return %interpreters; } # ---------------------------------------------------------------------- # sub read_lines { # Read file an return list of non-empty lines my $file = shift; my @lines = (); my $msg = ""; { local $/ = undef; # read in whole file if (open (REF, "< $file")) { @lines = grep { /\S/ } split(/[\n\r]+/,); # Remove leading comment sign from header line: $lines[0] =~ s/^(\s*)#/$1 /; } else { $msg = "Couldn't open $file"; } } $_[0] = $msg; @lines; } # ---------------------------------------------------------------------- # sub read_values { # Read file an return list of non-empty values my $file = shift; my $msg = ""; my $data = join ("\n", read_lines ($file)); $data =~ s/[\t\r\f]+/ /gs; $data =~ s/^ +//gm; $data =~ s/ +$//gm; $data =~ s/ +/,/gs; $data = "(".$data.")"; $data =~ s/\n\s*/)\n(/gs; return (split ("\n", $data)); } # ---------------------------------------------------------------------- # sub test_rundir { # Full program for one run directory: Test compilation, starting, running, # results, scripts, and cleanup my $dir = shift; my $makeopts = shift; my $t0 = get_time(); $test_status = 0; # so far, everything is OK %time_summary =(); # Indicate current run directory in process name (for ps) my $shortdir = $dir; $shortdir =~ s{^$topdir/}{}; # remove common directory prefix $shortdir =~ s{^samples/}{}; # remove leading 'samples/' $0 =~ s/^\s*(\[.*?\])?\s*/[$shortdir] /; chdir $cwd; # Go to directory and identify it if (! -d $dir) { print STDERR "No such directory: $dir\n"; return; } chdir $dir; my $cwd = cwd(); if ($short) { my $full_cwd = $cwd; $cwd =~ s{.*pencil[^/]*/}{}i; unless ($cwd) { $cwd = $full_cwd; } } if ($list_only) { print "$cwd\n"; return; } print "\n$cwd:"; print " ($itest/$ntests)" if ($ntests>1); print "\n"; $itest++; # Make sure we have everything we need if (! defined(-e 'src/run.f90')) { # has 'pc_setupsrc' been run yet? my $res = `pc_setupsrc --pencil-home $pencil_home 2>&1`; if ($?) { print " Problems running pc_setupsrc:\n", $res; } } # Remove orphaned or broken data directory links if (-l "data" && !-e "data") { unlink ("data"); } # 1. Test compilation test_compile($dir, $makeopts, $t0); return if ($test_status); my $t1 = get_time(); $t_global{'compile'} += ($t1-$t0); # 2. Ensure we have a data directory and test starting... if ($t_start) { # no need for a data directory unless we start # the code $created_datadir = check_for_datadir(); test_start($dir, $t1); goto cleanup if ($test_status); } # 3. Test running, if compiling and starting was OK my $t2 = get_time(); test_run($dir, $t2); return if ($test_status); my $t3 = get_time(); $t_global{'start+run'} += ($t3-$t1); # 4. Check the output test_results($dir); # 5. Run scripts test_scripts($dir); # Clean up if necessary cleanup: if ($created_datadir) { system("rm", "data"); system("rm", "-r", "$tmpdir"); } system("rm", "-f", "pc_commands.log"); system("rm", "-f", "src/.build-history"); system("rm", "-f", "src/.buildinfo"); system("rm", "-f", "src/.buildtime"); system("rm", "-f", "src/revision.txt"); system("rm", "-f", "src/pre_and_post_processing/*.x"); # system("rm", "-f", "src/*.x"); # we want to have the final binaries in the Travis cache to save the linking stage, if possible. # Summarize timings in human-readable form my $t4 = get_time(); if ($time) { print " Time used: ", s_to_hms(time_diff($t0,$t3), 44), " = ", s_to_hms(time_diff($t0,$t1)), " + ", s_to_hms(time_diff($t1,$t3)), "\n"; } # Write timings in greppable form if (%time_summary) { record_time('total', time_diff($t0,$t3)); for my $phase (qw( compilation starting running total )) { write_timing_summary($dir, $phase, \%time_summary); } } } # ---------------------------------------------------------------------- # sub write_timing_summary_header { # Write comment to make timing summary files more comprehensible printf "%s %s\n", '#TIMING ', '# --------------------------------------------------'; printf "%s %s\n", '#TIMING ', '# Date: ' . localtime() . ", host: $host"; printf "%s %-45s %-12s %-12s %-7s\n", '#TIMING ', '# directory', 'phase', 'time-stamp', 'seconds'; } # ---------------------------------------------------------------------- # sub write_timing_summary { # Write timings summary in greppable form my ($dir, $phase, $summary_ref) = @_; my $time = $summary_ref->{$phase}; if (defined($phase) && defined($time)) { printf "%s %-45s %-12s %12.1f %7.3f\n", '#TIMING ', compactify(quote($dir)), ($phase), time_stamp(), $time; } } # ---------------------------------------------------------------------- # sub check_for_datadir { # Make sure we have an appropriate data directory my $created_dir = 0; if (! -d 'data') { if ($mkdatadir) { if (-l 'data') { print " data is file or empty link;\n" . " removing and generating data directory \n"; system("rm", "data"); } else { print " No data directory; generating data directory\n"; } system("mkdir -p data"); } else { if (-l 'data') { print " data is file or empty link;\n" . " removing and generating data -> $tmpdir\n"; system("rm", "data"); } else { if ($debug) { print " No data directory; generating data -> $tmpdir\n"; } } system("mkdir -p $tmpdir; ln -s $tmpdir data"); $created_dir = 1; } } return $created_dir; } # ---------------------------------------------------------------------- # sub time_diff { # Return difference of times if both args are defined, undef otherwise my $t1 = shift; my $t2 = shift; if (defined($t1) && defined($t2)) { return $t2-$t1; } else { return undef; } } # ---------------------------------------------------------------------- # sub s_to_hms { # Convert no. of seconds to [ddd][hh:]mm:ss string my $secs = shift; my $width = (shift || 0); my $string; # Not much to do if arg is undef: if (! defined($secs)) { $string = 'undef'; } else { my $ss = $secs % 60; my $mm = floor($secs/60) % 60; my $hh = floor($secs/3600) % 24; my $dd = floor($secs/86400); $string = sprintf("%02d:%02d", $mm,$ss); if ($hh) { $string = sprintf("%02d:", $hh) . $string }; if ($dd) { $string = sprintf("%dd", $dd) . $string }; } if (length($string) < $width) { $string = (" " x ($width-length($string))) . $string; }; return $string; } # ---------------------------------------------------------------------- # sub min { # Numerical minimum my ($a, $b) = @_; if ($a+0 < $b+0) { $a; } else { $b; } } # ---------------------------------------------------------------------- # sub quote { # Quote most non-word characters in a string, but not a few common ones my ($text) = @_; $text = "\Q$text\E"; $text =~ s{\\(/|-|_)}{$1}g; # unquote some chars return $text; } # ---------------------------------------------------------------------- # sub compactify { # Remove uninteresting parts from a run directory my ($text) = @_; $text =~ s{/+\\\.$}{}; # drop trailing '/.' from path names $text =~ s|.*$pencil_home/||; $text =~ s{.*/pencil-(code|auto-test)/}{}; return $text; } # ---------------------------------------------------------------------- # sub time_stamp { # Return a time stamp based on unix time (epoch 1970/1/1) return get_time() - get_unix_epoch_start(); } # ---------------------------------------------------------------------- # sub get_unix_epoch_start { # Get the time value for 1970/01/01 0:00 UTC -- even on operating systems # that use a different epoch. # On Unix, this function returns 0. my $t0; { require POSIX; my $old_TZ = $ENV{TZ}; $ENV{TZ} = 'UCT'; POSIX::tzset(); $t0 = POSIX::mktime(0, 0, 0, 1, 0, 70); if (defined $old_TZ) { $ENV{TZ} = $old_TZ; } else { delete $ENV{TZ}; } } return $t0; } # ---------------------------------------------------------------------- # sub get_time { # Wrapper around time() that uses Time::HiRes if possible my $time = 0; eval { require Time::HiRes; $time = Time::HiRes::time(); }; return $time || time(); } # ---------------------------------------------------------------------- # sub hi_res_sleep { # Sleep $duration secons, with sub-second accuracy my ($duration) = @_; eval { require Time::HiRes; time::HiRes::usleep($duration * 1e6); return; }; select(undef, undef, undef, $duration); return; } # ---------------------------------------------------------------------- # sub postprocess { # Clean compiler output with postprocessor my $text = shift; print STDERR " PP: Cleaning compiler output with <$postproc>\n" if ($debug); # open read/write pipe to postprocessor (see 'man perlipc') my $pid = open2(\*POSTPROCD,\*UNPROCD, "$postproc 2>&1"); die "Couldn't start $postproc" unless defined($pid); # fork() a child to avoid deadlock if $text is large my $child_id; if ($child_id=fork()) { # parent --> read output close UNPROCD; # important local $/ = undef; # get it all at once $text = ; close POSTPROCD; # not necessary, I guess waitpid($child_id,0); # wait for child to finish to avoid zombie wait(); # bizarre enough there is another zombie.. } else { # child --> write to postproc die "Cannot fork: $!" unless defined($child_id); $nosummary = 1; # don't waffle when exiting close POSTPROCD; # apparently not necessary print UNPROCD $text; close UNPROCD; exit 1; # irrelevant } print STDERR " PP: done postprocessing\n" if ($debug); $text; } # ---------------------------------------------------------------------- # sub write_to_file { # Write TEXT to file FILE, aborting if file exists (unless OVERWRITE is # set), and write a helpful header (unless NOHEADER is set) my $file = shift; my $text = shift || ''; my $overwrite = shift || 0; my $noheader = shift || 0; my $header = !$noheader; # Avoid iterated negation (=spinning head) if (-e $file && !$overwrite) { die "File $file exists!\n"; } else { open(FILE,"> $file") or die "Cannot open file $file for writing\n"; if ($header) { my $shortfile = $file; $shortfile =~ s{/\./}{/}g; # remove './' path components $shortfile =~ s{^.*?([^/]*/?[^/]*)$}{$1}g; # keep only last path component print FILE "$shortfile\n"; print FILE "-" x length($shortfile), "\n"; print FILE "Created: ", scalar localtime(), "\n\n"; print FILE "This file was automatically generated by pc_auto-test," . " so think twice before\nyou modify it.\n\n"; } chomp($text); print FILE "$text\n" if (length($text)); close FILE or die "Cannot close file $file\n"; } } # ---------------------------------------------------------------------- # sub get_path { # Generate path for marking files my $dir = shift; my $file = shift; my $path = ""; if ($dir) { $path = $dir."/"; } if ($log_dir) { $path = $log_dir."/".$path; } elsif ($pencil_home) { $path = cwd()."/"; } return $path.$file; } # ---------------------------------------------------------------------- # sub get_SVN_revision { # Extract SVN revision from file my $file = shift; my $revision = `head -1 "$file"`; $revision =~ s/^SVN: +//is; chomp $revision; return $revision; } # ---------------------------------------------------------------------- # sub get_GIT_commit { # Extract GIT commit from file my $file = shift; my $commit = `tail -1 "$file"`; $commit =~ s/^GIT: +//is; chomp $commit; return $commit; } # ---------------------------------------------------------------------- # sub mark_failed { # Save revision of first failed auto-test my $dir = shift; my $last = get_path ($dir, "FIRST_FAILED"); # Mark failure only if not previously failed - or if explicity requested! if (!-e $last || $overwrite_fail) { write_revision ($last); } my $readme = ""; my $source = $pencil_home.'/samples/'.$dir.'/README'; if ($log_dir && (-e $source)) { # copy sample's README file into web directory my $destination = get_path ($dir, "README"); if (copy ($source, $destination)) { $readme = ' [README]'; } } my $last_succeeded = get_path ($dir, "LAST_SUCCEEDED"); if (-e $last_succeeded) { print "===> Test still succeeded with revision:\n"; print "SVN: ".get_SVN_revision ($last_succeeded)."\n"; print "GIT: ".get_GIT_commit ($last_succeeded)."\n"; print scalar localtime `stat -L --format="%Y" "$last_succeeded"`; print "\n"; } if (-e $last) { print "===> Test failed at latest with revision:\n"; print "SVN: ".get_SVN_revision ($last)."\n"; my $commit = get_GIT_commit ($last); if ($log_dir) { print 'GIT: '.$commit."\n"; } else { print "GIT: ".$commit."\n"; } print scalar localtime `stat -L --format="%Y" "$last"`; print "\n"; } if ((-e $last_succeeded) && (-e $last)) { get_current_revision(); if (defined ($current_revision)) { my $works = get_SVN_revision ($last_succeeded); my $fails = get_SVN_revision ($last); if ($works < $fails - 1) { if ($bisect) { print "===> Bisecting automatically to find the offending commit...\n"; my $bisect_command = "@pc_bisect --no-lock '$dir' $works $fails"; if ($debug) { print "$bisect_command\n"; } system ($bisect_command); if ($?) { die "ERROR while bisecting: $? - $!\n"; } $works = get_SVN_revision ($last_succeeded); $fails = get_SVN_revision ($last); my $offending = get_GIT_commit ($last); print "===> Found the offending commit:\n"; print "SVN: $fails\n"; if ($log_dir) { print 'GIT: '.$offending."\n"; } else { print "GIT: ".$offending."\n"; } } else { print "===> Need to bisect manually to find the offending commit!\n"; } } if ($works == $fails - 1) { my $author = `svn info "$pencil_home" --revision $fails --show-item last-changed-author`; chomp $author; print "===> This is the offending commit from: $author$readme\n"; } elsif ($readme) { print "===> Please also see the $readme\n"; } } else { print "===> Please use SVN checkout for an automatic analysis!\n"; } } elsif ($readme) { print "===> Please also see the $readme\n"; } } # ---------------------------------------------------------------------- # sub mark_success { # Save revision of last succeeded auto-test my $dir = shift; if (!$keep_fail) { unlink get_path ($dir, "FIRST_FAILED"); } write_revision (get_path ($dir, "LAST_SUCCEEDED")); } # ---------------------------------------------------------------------- # sub get_current_revision { # Get current revision only if not previously obtained $current_hash = ""; if (!defined ($current_revision)) { # Get current SVN revision and GIT hash if (-d "$pencil_home/.svn") { # SVN revision $current_revision = `svn info "$pencil_home" --show-item revision`; if (!$current_revision) { $current_revision = "Problem obtaining SVN revision!"; } else { # GIT hash my $tries = 0; while (!$current_hash && ($tries < 4)) { if ($tries > 0) { sleep (3*$tries); } $tries++; $current_hash = `wget -q "https://pencil-code.org/translate.php?output=plain&revision=$current_revision" -O -`; chomp ($current_hash); } } if (!$current_hash) { $current_hash = "No network connection to pencil-code.org!"; } } elsif (-e "$pencil_home/.git") { # GIT hash $current_hash = `git log -1 --format=format:"%H"`; $current_revision = "please use SVN checkout!"; } else { warn "Marking of succeeded or failed auto-tests not possible without SVN or GIT!\n"; return; } chomp ($current_revision, $current_hash); } } # ---------------------------------------------------------------------- # sub write_revision { # Write revision information of auto-test my $file = shift; get_current_revision(); # Write revision information to file open (LAST, ">$file") or die "Error while writing '$file' file: $!\n"; print LAST "SVN: ".$current_revision."\n"; print LAST "GIT: ".$current_hash."\n"; close (LAST) or die "Error while closing '$file' file: $!\n" } # ---------------------------------------------------------------------- # sub git_version { # Return the current version of the code. my $rev = `$pencil_home/utils/pc_identify_revision --one-line`; chomp $rev; return $rev; } # ---------------------------------------------------------------------- # sub get_maintainer_list { # Return comma-separated list of maintaners of directories listed in # %failed, avoiding duplicate entries. my %maintainers; foreach my $dir (keys %failed) { foreach my $maint (get_maintainers("$dir/README")) { $maintainers{$maint}++; } } return join(',', grep /./, keys %maintainers); } # ---------------------------------------------------------------------- # sub get_maintainers { # Given a run directory, return list of maintainers extracted from the # README file. my $readme = shift; unless (-r $readme) { warn "Cannot open $readme\n"; return (); } my @emails = (); my $reader = Pencil::ReadmeReader->new($readme); foreach my $maint ($reader->get_content('Maintainer')) { $maint =~ s{(\[at\]|\@)}{/}; # obfuscate '@' (may end up on the web) $maint =~ s{(\[dot\]|\.)}{:}g; # obfuscate '.' $maint =~ s{^\s*(.*?)\s*$}{$1}; # strip surrounding whitespace if ($maint =~ /<([^>]*)>/) { $maint = $1; } if ($maint =~ m{[-.:a-zA-Z_0-9]+/[-:a-zA-Z_0-9]+}) { push @emails, $maint; } } return @emails; } # ---------------------------------------------------------------------- # ## Summarize results END { exit 0 if $skip_end_block; unless ($help || $nosummary || $list_only) { print "\n" . "-" x 70 . "\n"; # Print failure header that can be identified by pencil-test if ($failed || %failed) { print "### auto-test failed ###\n"; } # Failed outside individual tests (e.g. other auto test is running) if ($failed) { print "$failure_message"; } # Failed during some of the individual tests if (%failed) { print "Failed ", scalar(keys %failed), " test(s) out of $ntests:\n"; foreach my $dir (sort (keys %failed)) { print " $dir (".$failed{$dir}.")\n"; } } else { if ($ntests == 1) { print "Test succeeded.\n"; } elsif ($ntests > 1) { print "All $ntests tests succeeded.\n"; # pc_update_validated # system("$pencil_home/bin/pc_update_validated.sh &"); } elsif (($ntests < 1) && ! $failed) { print "There was no test to run???\n"; } } # Print timing numbers if ($time) { my @t = times(); print "\nCPU time (including compilation): ", s_to_hms($t[2], 7) . "u ", s_to_hms($t[3] ) . "s\n"; my $t1_global = time(); # end time print "Total wall-clock time: ", s_to_hms(time_diff($t0_global,$t1_global), 7), " = ", s_to_hms($t_global{'compile'}), " + ", s_to_hms($t_global{'start+run'}), "\n"; } # Log test status to log file if ($logfile) { open(my $log, ">>", $logfile) or warn "Cannot open $logfile for writing"; print $log strftime("%Y-%m-%d_%H:%M:%S_%Z ", localtime); print $log sprintf("%-22s : ", git_version()); # if (%failed) { # my $failed_count = scalar(keys %failed); # print $log sprintf("%d/%d failed: ", $failed_count, $ntests); # my $first = 1; # while (my ($dir, $phase) = each %failed) { # if ($first) { # $first = 0; # } else { # print $log ", "; # } # print $log sprintf("%s (%s)", compactify($dir), $phase); # } # } else { # print $log sprintf("All %d tests succeeded ", $ntests); # } print $log "\n"; close($log); } # Print info from file if (-r $infofile) { print "\n------------ Other info: ------------\n"; open(INFO,"< $infofile") or warn "Couldn't open $infofile\n"; print while (); close(INFO); } # List maintainers of failed tests if ($list_maint && %failed) { print 'Maintainers of failed tests: ', get_maintainer_list(), "\n"; } print "\n" . "-" x 70 . "\n"; # List config settings in case of a failure if (%failed) { print "The following configuration has been used:\n"; system(@pc_dump_config); printf "Perl version v%vd\n", $^V; get_current_revision(); print "SVN: ".$current_revision."\n"; print "GIT: ".$current_hash."\n"; } } # Clean up generated directories if ($created_datadir) { if (-d $tmpdir) { system("rm", "-r", "$tmpdir"); } } # Remove lock file if ($remove_lock) { unlink $lockfile or warn "Couldn't remove lockfile <$lockfile>\n"; } # Tell user we are done Pencil::Util::notify('with auto test') if $notify; # Exit with appropriate status if ($failed || %failed) { exit 1; } else { exit 0; } } # End of file pc_auto-test