* Stuff for automatic and manual testing of NixOS VMs.

lib/build-vms.nix contains a function `buildVirtualNetwork' that
  takes a specification of a network of machines (as an attribute set
  of NixOS machine configurations) and builds a script that starts
  each configuration in a separate QEMU/KVM VM and connects them
  together in a virtual network.  This script can be run manually to
  test the VMs interactively.  There is also a function `runTests'
  that starts and runs the virtual network in a derivation, and
  then executes a test specification that tells the VMs to do certain
  things (i.e., letting one VM send an HTTP request to a webserver on
  another VM).  The tests are written in Perl (for now).

  tests/subversion.nix shows a simple example, namely a network of two
  machines: a webserver that runs the Subversion subservice, and a
  client.  Apache, Subversion and a few other packages are built with
  coverage analysis instrumentation.  For instance,

    $ nix-build tests/subversion.nix -A vms
    $ ./result/bin/run-vms

  starts two QEMU/KVM instances.  When they have finished booting, the
  webserver can be accessed from the host through
  http://localhost:8081/.

  It also has a small test suite:

    $ nix-build tests/subversion.nix -A report

  This runs the VMs in a derivation, runs the tests, and then produces
  a distributed code coverage analysis report (i.e. it shows the
  combined coverage on both machines).

  The Perl test driver program is in lib/test-driver.  It executes
  commands on the guest machines by connecting to a root shell running
  on port 514 (provided by modules/testing/test-instrumentation.nix).

  The VMs are connected together in a virtual network using QEMU's
  multicast feature.  This isn't very secure.  At the very least,
  other processes on the same machine can listen to or send packets on
  the virtual network.  On the plus side, we don't need to be root to
  set up a multicast virtual network, so we can do it from a
  derivation.  Maybe we can use VDE instead.

  (Moved from the vario repository.)

svn path=/nixos/trunk/; revision=16899
This commit is contained in:
Eelco Dolstra 2009-08-31 14:25:12 +00:00
parent a7001d34d7
commit 27a8e656bc
4 changed files with 575 additions and 0 deletions

180
lib/build-vms.nix Normal file
View file

@ -0,0 +1,180 @@
{ nixos ? /etc/nixos/nixos
, nixpkgs ? /etc/nixos/nixpkgs
}:
let pkgs = import nixpkgs { config = {}; }; in
with pkgs;
rec {
inherit pkgs;
# Build a virtual network from an attribute set `{ machine1 =
# config1; ... machineN = configN; }', where `machineX' is the
# hostname and `configX' is a NixOS system configuration. The
# result is a script that starts a QEMU instance for each virtual
# machine. Each machine is given an arbitrary IP address in the
# virtual network.
buildVirtualNetwork =
{ nodes }:
let nodes_ = lib.mapAttrs (n: buildVM nodes_) (assignIPAddresses nodes); in
stdenv.mkDerivation {
name = "vms";
buildCommand =
''
ensureDir $out/vms
${
lib.concatMapStrings (vm:
''
ln -sn ${vm.config.system.build.vm} $out/vms/${vm.config.networking.hostName}
''
) (lib.attrValues nodes_)
}
ensureDir $out/bin
cat > $out/bin/run-vms <<EOF
#! ${stdenv.shell}
port=8080
for i in $out/vms/*; do
port2=\$((port++))
echo "forwarding localhost:\$port2 to \$(basename \$i):80"
QEMU_OPTS="-redir tcp:\$port2::80 -net nic,vlan=1 -net socket,vlan=1,mcast=232.0.1.1:1234" \$i/bin/run-*-vm &
done
EOF
chmod +x $out/bin/run-vms
''; # */
};
buildVM =
nodes: configuration:
import "${nixos}/lib/eval-config.nix" {
inherit nixpkgs;
modules = [ configuration ];
extraArgs = { inherit nodes; };
/* !!! bug in the module/option handling: this ignores the
config from assignIPAddresses. Too much magic.
configuration = {
imports = [configuration "${nixos}/modules/virtualisation/qemu-vm.nix"];
config = {
# We don't need the manual in a test VM, and leaving it out
# speeds up evaluation quite a bit.
services.nixosManual.enable = false;
};
};
*/
};
# Given an attribute set { machine1 = config1; ... machineN =
# configN; }, sequentially assign IP addresses in the 192.168.1.0/24
# range to each machine, and set the hostname to the attribute name.
assignIPAddresses = nodes:
let
machines = lib.attrNames nodes;
machinesWithIP = zip machines
(map (n: "192.168.1.${toString n}") (lib.range 1 254));
# Generate a /etc/hosts file.
hosts = lib.concatMapStrings (m: "${m.second} ${m.first}\n") machinesWithIP;
nodes_ = map (m: lib.nameValuePair m.first
{ imports = [
(lib.getAttr m.first nodes)
"${nixos}/modules/virtualisation/qemu-vm.nix" # !!!
"${nixos}/modules/testing/test-instrumentation.nix" # !!! should only get added for automated test runs
];
config =
{ networking.hostName = m.first;
networking.interfaces =
[ { name = "eth1";
ipAddress = m.second;
}
];
networking.extraHosts = hosts;
services.nixosManual.enable = false; # !!!
};
}
) machinesWithIP;
in lib.listToAttrs nodes_;
# Zip two lists together. Should be moved to pkgs.lib.
zip = xs: ys:
if xs != [] && ys != [] then
[ {first = lib.head xs; second = lib.head ys;} ]
++ zip (lib.tail xs) (lib.tail ys)
else [];
# Run an automated test suite in the given virtual network.
# `network' must be the result of a call to the
# `buildVirtualNetwork' function. `tests' is a Perl fragment
# describing the tests.
runTests = network: tests:
stdenv.mkDerivation {
name = "vm-test-run";
inherit tests;
buildCommand =
''
mkdir $out
cp ${./test-driver/Machine.pm} Machine.pm
${perl}/bin/perl ${./test-driver/test-driver.pl} ${network}/vms/*/bin/run-*-vm
for i in */coverage-data; do
ensureDir $out/coverage-data
mv $i $out/coverage-data/$(dirname $i)
done
''; # */
};
# Generate a coverage report from the coverage data produced by
# runTests.
makeReport = x: runCommand "report" { buildInputs = [rsync]; }
''
for d in ${x}/coverage-data/*; do
echo "doing $d"
ensureDir $TMPDIR/gcov/
for i in $(cd $d/nix/store && ls); do
if ! test -e $TMPDIR/gcov/nix/store/$i; then
echo "copying $i"
mkdir -p $TMPDIR/gcov/$(echo $i | cut -c34-)
rsync -rv /nix/store/$i/.coverage/ $TMPDIR/gcov/$(echo $i | cut -c34-)
fi
done
chmod -R u+w $TMPDIR/gcov
find $TMPDIR/gcov -name "*.gcda" -exec rm {} \;
for i in $(cd $d/nix/store && ls); do
rsync -rv $d/nix/store/$i/.coverage/ $TMPDIR/gcov/$(echo $i | cut -c34-)
done
find $TMPDIR/gcov -name "*.gcda" -exec chmod 644 {} \;
echo "producing info..."
${pkgs.lcov}/bin/geninfo --ignore-errors source,gcov $TMPDIR/gcov --output-file $TMPDIR/app.info
cat $TMPDIR/app.info >> $TMPDIR/full.info
done
${pkgs.lcov}/bin/lcov --remove $TMPDIR/full.info "/nix/store/*" > $TMPDIR/out.info
echo "making report..."
ensureDir $out/coverage
${pkgs.lcov}/bin/genhtml --show-details $TMPDIR/out.info -o $out/coverage
''; # */
}

242
lib/test-driver/Machine.pm Normal file
View file

@ -0,0 +1,242 @@
package Machine;
use strict;
use Socket;
use IO::Handle;
use POSIX qw(dup2);
# Stuff our PID in the multicast address/port to prevent collissions
# with other NixOS VM networks.
my $mcastAddr = "232.18.1." . ($$ >> 8) . ":" . (64000 + ($$ & 0xff));
print STDERR "using multicast address $mcastAddr\n";
sub new {
my ($class, $vmScript) = @_;
$vmScript =~ /run-(.*)-vm$/ or die;
my $name = $1;
my $tmpDir = $ENV{'TMPDIR'} || "/tmp";
my $self = {
script => $vmScript,
name => $name,
booted => 0,
pid => 0,
connected => 0,
socket => undef,
stateDir => "$tmpDir/$name",
};
mkdir $self->{stateDir}, 0700;
bless $self, $class;
return $self;
}
sub log {
my ($self, $msg) = @_;
chomp $msg;
print STDERR $self->{name}, ": $msg\n";
}
sub name {
my ($self) = @_;
return $self->{name};
}
sub start {
my ($self) = @_;
return if $self->{booted};
$self->log("starting vm");
my $pid = fork();
die if $pid == -1;
if ($pid == 0) {
my $name = $self->{name};
open LOG, "| sed --unbuffered 's|^|$name console: |'" or die;
dup2(fileno(LOG), fileno(STDOUT));
dup2(fileno(LOG), fileno(STDERR));
$ENV{TMPDIR} = $self->{stateDir};
$ENV{QEMU_OPTS} = "-nographic -redir tcp:65535::514 -net nic,vlan=1 -net socket,vlan=1,mcast=$mcastAddr";
$ENV{QEMU_KERNEL_PARAMS} = "console=ttyS0 panic=1 hostTmpDir=$ENV{TMPDIR}";
chdir $self->{stateDir} or die;
exec $self->{script};
die;
}
$self->log("vm running as pid $pid");
$self->{pid} = $pid;
$self->{booted} = 1;
}
sub connect {
my ($self) = @_;
return if $self->{connected};
$self->start;
while (1) {
last if -e ($self->{stateDir} . "/running");
sleep 1;
}
while (1) {
$self->log("trying to connect");
my $socket = new IO::Handle;
$self->{socket} = $socket;
socket($socket, PF_UNIX, SOCK_STREAM, 0) or die;
connect($socket, sockaddr_un($self->{stateDir} . "/65535.socket")) or die;
$socket->autoflush(1);
print $socket "echo hello\n" or next;
flush $socket;
my $line = readline($socket);
chomp $line;
last if $line eq "hello";
sleep 1;
}
$self->log("connected");
$self->{connected} = 1;
print { $self->{socket} } "PATH=/var/run/current-system/sw/bin:/var/run/current-system/sw/sbin:\$PATH\n";
print { $self->{socket} } "export GCOV_PREFIX=/tmp/coverage-data\n";
print { $self->{socket} } "cd /tmp\n";
# !!! Should make sure the commands above don't produce output, otherwise we're out of sync.
}
sub waitForShutdown {
my ($self) = @_;
return unless $self->{booted};
waitpid $self->{pid}, 0;
$self->{pid} = 0;
$self->{booted} = 0;
}
sub execute {
my ($self, $command) = @_;
$self->connect;
$self->log("running command: $command");
print { $self->{socket} } ("($command); echo '|!=EOF' \$?\n");
my $out = "";
while (1) {
my $line = readline($self->{socket}) or die "connection to VM lost unexpectedly";
#$self->log("got line: $line");
if ($line =~ /^(.*)\|\!\=EOF\s+(\d+)$/) {
$out .= $1;
$self->log("exit status $2");
return ($2, $out);
}
$out .= $line;
}
}
sub mustSucceed {
my ($self, $command) = @_;
my ($status, $out) = $self->execute($command);
if ($status != 0) {
$self->log("output: $out");
die "command `$command' did not succeed (exit code $status)";
}
return $out;
}
sub mustFail {
my ($self, $command) = @_;
my ($status, $out) = $self->execute($command);
die "command `$command' unexpectedly succeeded"
if $status == 0;
}
# Wait for an Upstart job to reach the "running" state.
sub waitForJob {
my ($self, $jobName) = @_;
while (1) {
my ($status, $out) = $self->execute("initctl status $jobName");
return if $out =~ /\(start\)\s+running/;
sleep 1;
# !!! need a timeout
}
}
sub stopJob {
my ($self, $jobName) = @_;
$self->execute("initctl stop $jobName");
while (1) {
my ($status, $out) = $self->execute("initctl status $jobName");
return if $out =~ /\(stop\)\s+waiting/;
sleep 1;
# !!! need a timeout
}
}
# Wait until the machine is listening on the given TCP port.
sub waitForOpenPort {
my ($self, $port) = @_;
while (1) {
my ($status, $out) = $self->execute("nc -z localhost $port");
return if $status == 0;
sleep 1;
}
}
# Wait until the machine is not listening on the given TCP port.
sub waitForClosedPort {
my ($self, $port) = @_;
while (1) {
my ($status, $out) = $self->execute("nc -z localhost $port");
return if $status != 0;
sleep 1;
}
}
sub shutdown {
my ($self) = @_;
return unless $self->{booted};
$self->execute("poweroff -f &");
$self->waitForShutdown;
}
# Make the machine unreachable by shutting down eth1 (the multicast
# interface used to talk to the other VMs). We keep eth0 up so that
# the test driver can continue to talk to the machine.
sub block {
my ($self) = @_;
$self->mustSucceed("ifconfig eth1 down");
}
# Make the machine reachable.
sub unblock {
my ($self) = @_;
$self->mustSucceed("ifconfig eth1 up");
}
1;

View file

@ -0,0 +1,41 @@
use strict;
use Machine;
$SIG{PIPE} = 'IGNORE'; # because Unix domain sockets may die unexpectedly
my %vms;
my $context = "";
foreach my $vmScript (@ARGV) {
my $vm = Machine->new($vmScript);
$vms{$vm->name} = $vm;
$context .= "my \$" . $vm->name . " = \$vms{'" . $vm->name . "'}; ";
}
sub startAll {
$_->start foreach values %vms;
}
sub runTests {
eval "$context $ENV{tests}";
die $@ if $@;
}
END {
foreach my $vm (values %vms) {
if ($vm->{pid}) {
print STDERR "killing ", $vm->{name}, " (pid ", $vm->{pid}, ")\n";
kill 9, $vm->{pid};
}
}
}
runTests;
print STDERR "DONE\n";

112
tests/subversion.nix Normal file
View file

@ -0,0 +1,112 @@
with import ../lib/build-vms.nix {};
let
# Build some packages with coverage instrumentation.
overrides = pkgs:
let
do = pkg: pkg.override (args: {
stdenv = pkgs.addCoverageInstrumentation args.stdenv;
});
in
rec {
apr = do pkgs.apr;
aprutil = do pkgs.aprutil;
apacheHttpd = do pkgs.apacheHttpd;
mod_python = do pkgs.mod_python;
subversion = do pkgs.subversion;
};
in
rec {
nodes =
{ webserver =
{ config, pkgs, ... }:
{
services.httpd.enable = true;
services.httpd.adminAddr = "e.dolstra@tudelft.nl";
services.httpd.extraSubservices =
[ { serviceType = "subversion";
urlPrefix = "";
dataDir = "/data/subversion";
userCreationDomain = "192.168.0.0/16";
}
];
nixpkgs.config.packageOverrides = overrides;
};
client =
{ config, pkgs, ... }:
{
environment.systemPackages = [ pkgs.subversion ];
nixpkgs.config.packageOverrides = overrides;
};
};
vms = buildVirtualNetwork { inherit nodes; };
test = runTests vms
''
startAll;
$webserver->waitForOpenPort(80);
my $out = $client->mustSucceed("svn --version");
print STDERR "GOT: $out";
my $out = $client->mustSucceed("curl --fail http://webserver/");
print STDERR "GOT: $out";
# Create a new user through the web interface.
$client->mustSucceed("curl --fail -F username=alice -F fullname='Alice Lastname' -F address=alice\@example.org -F password=foobar -F password_again=foobar http://webserver/repoman/adduser");
# Let Alice create a new repository.
$client->mustSucceed("curl --fail -u alice:foobar --form repo=xyzzy --form description=Xyzzy http://webserver/repoman/create");
$client->mustSucceed("curl --fail http://webserver/") =~ /alice/ or die;
# Let Alice do a checkout.
my $svnFlags = "--non-interactive --username alice --password foobar";
$client->mustSucceed("svn co $svnFlags http://webserver/repos/xyzzy wc");
$client->mustSucceed("echo hello > wc/world");
$client->mustSucceed("svn add wc/world");
$client->mustSucceed("svn ci $svnFlags -m 'Added world.' wc/world");
# Create a new user on the server through the create-user.pl script.
$webserver->execute("svn-server-create-user.pl bob bob\@example.org Bob");
$webserver->mustSucceed("svn-server-resetpw.pl bob fnord");
$client->mustSucceed("curl --fail http://webserver/") =~ /bob/ or die;
# Bob should not have access to the repo.
my $svnFlagsBob = "--non-interactive --username bob --password fnord";
$client->mustFail("svn co $svnFlagsBob http://webserver/repos/xyzzy wc2");
# Bob should not be able change the ACLs of the repo.
# !!! Repoman should really return a 403 here.
$client->execute("curl --fail -u bob:fnord -F description=Xyzzy -F readers=alice,bob -F writers=alice -F watchers= -F tardirs= http://webserver/repoman/update/xyzzy")
=~ /not authorised/ or die;
# Give Bob access.
$client->mustSucceed("curl --fail -u alice:foobar -F description=Xyzzy -F readers=alice,bob -F writers=alice -F watchers= -F tardirs= http://webserver/repoman/update/xyzzy");
# So now his checkout should succeed.
$client->mustSucceed("svn co $svnFlagsBob http://webserver/repos/xyzzy wc2");
# Test ViewVC and WebSVN
$client->mustSucceed("curl --fail -u alice:foobar http://webserver/viewvc/xyzzy");
$client->mustSucceed("curl --fail -u alice:foobar http://webserver/websvn/xyzzy");
$client->mustSucceed("curl --fail -u alice:foobar http://webserver/repos-xml/xyzzy");
# Stop Apache to gather all the coverage data.
$webserver->stopJob("httpd");
$webserver->execute("sleep 5"); # !!!
'';
report = makeReport test;
}