summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStefan Fritsch <sf@cvs.openbsd.org>2017-08-15 17:16:40 +0000
committerStefan Fritsch <sf@cvs.openbsd.org>2017-08-15 17:16:40 +0000
commitdc276708bcc35bc582745eaa184bfdedc38fa730 (patch)
treeeeaa548ae17cd7c3c640349c725ec4647bfe32e8
parent8942e02de8fbd40e040eeac71ced80c338eb49d1 (diff)
Add test cases for msdosfs bug
Add a test that writes a big file, reads it again, and compares the result. This detects the bug that briefly was in msdosfs. Also add tests that create lots of files in a directory. Do this in the root dir and a subdir because these work completely differently on msdosfs. Need to enlarge the used disk images for this test. ok bluhm@
-rw-r--r--regress/sys/fileops/Makefile.inc26
-rw-r--r--regress/sys/fileops/fileops2.pl101
2 files changed, 123 insertions, 4 deletions
diff --git a/regress/sys/fileops/Makefile.inc b/regress/sys/fileops/Makefile.inc
index 410817a63b9..d6d2c05781e 100644
--- a/regress/sys/fileops/Makefile.inc
+++ b/regress/sys/fileops/Makefile.inc
@@ -1,6 +1,10 @@
-# $OpenBSD: Makefile.inc,v 1.2 2017/05/29 13:49:40 bluhm Exp $
+# $OpenBSD: Makefile.inc,v 1.3 2017/08/15 17:16:39 sf Exp $
-TESTS= create read mmap
+PERL_REQUIRE != perl -e 'eval { require File::Slurp } or print $$@'
+
+TESTS1= create read mmap
+TESTS2= many_files_root many_files_subdir file_write
+TESTS= ${TESTS1} ${TESTS2}
FILEOPS_MNT= /mnt/regress-fileops
FILEOPS_PROG= ${.OBJDIR}/../fileops
@@ -12,7 +16,7 @@ CLEANFILES= diskimage stamp-*
.PHONY: disk mount unconfig clean
disk: unconfig
- dd if=/dev/urandom of=diskimage bs=1M count=32
+ dd if=/dev/urandom of=diskimage bs=1M count=64
vnconfig vnd0 diskimage
${NEWFS} /dev/rvnd0c
@@ -34,7 +38,7 @@ stamp-setup:
${.OBJDIR}/../fileops:
${.MAKE} -C ${.CURDIR}/.. fileops
-.for t in ${TESTS}
+.for t in ${TESTS1}
REGRESS_TARGETS+= run-regress-${t}
run-regress-${t}: stamp-setup ${.OBJDIR}/../fileops
@echo '\n======== $@ ========'
@@ -42,6 +46,20 @@ run-regress-${t}: stamp-setup ${.OBJDIR}/../fileops
${FILEOPS_PROG} ${t} ${FILEOPS_MNT}/file
.endfor
+.for t in ${TESTS2}
+REGRESS_TARGETS+= run-regress-${t}
+.if ! empty(PERL_REQUIRE)
+run-regress-${t}:
+ @echo "${PERL_REQUIRE}"
+ @echo "Install the p5-File-Slurp package to run the fileops ${t} subtest"
+ @echo SKIPPED
+.else
+run-regress-${t}: stamp-setup
+ @echo '\n======== $@ ========'
+ perl ${.CURDIR}/../fileops2.pl ${t} ${FILEOPS_MNT}
+.endif
+.endfor
+
REGRESS_TARGETS+= run-regress-cleanup
run-regress-cleanup:
@echo '\n======== $@ ========'
diff --git a/regress/sys/fileops/fileops2.pl b/regress/sys/fileops/fileops2.pl
new file mode 100644
index 00000000000..7136ad978a9
--- /dev/null
+++ b/regress/sys/fileops/fileops2.pl
@@ -0,0 +1,101 @@
+#!/usr/bin/perl
+
+use warnings;
+use strict;
+
+
+# File::Slurp does things differently than a simple print $fh and $foo = <$fh>
+# Only with File::Slurp, the issue caused by denode.h,v 1.31
+# msdosfs_vnops.c,v 1.114 is reproducible with file_write_test().
+# XXX One should do some ktrace analysis and rewrite the test to exactly do
+# XXX what is required to trigger the issue.
+use File::Slurp;
+
+my $test = $ARGV[0] or usage();
+my $basedir = $ARGV[1] or usage();
+
+if ($test eq 'many_files_root') {
+ many_files_test($basedir, 500);
+} elsif ($test eq 'many_files_subdir') {
+ my $dir = "$basedir/subdir";
+ mkdir($dir) or die "could not create $dir";
+ [ -d $dir ] or die "mkdir($dir) did not work?";
+ many_files_test("$dir", 2000);
+} elsif ($test eq 'file_write') {
+ file_write_test("$basedir/file");
+} else {
+ usage();
+}
+
+exit 0;
+
+### sub routines
+
+# create lots of files in a dir an check that they can be read again
+sub many_files_test {
+ my $dir = shift;
+ my $nfiles = shift;
+
+
+ for my $i (1 .. $nfiles) {
+ write_file("$dir/$i", "x$i\n")
+ or die "Could not writ file $dir/$i: $!";
+ }
+
+ foreach my $i (1 .. $nfiles) {
+ my $file = "$dir/$i";
+ my $content = read_file($file);
+ defined $content or die "could not read $file: $!";
+ if ($content ne "x$i\n") {
+ die "$file has wrong content:'$content' instead of 'x$i\n'";
+ }
+ unlink($file) or die "could not unlink $file";
+ }
+ foreach my $i (1 .. $nfiles) {
+ my $file = "$dir/$i";
+ if (-e $file) {
+ die "$file still exists?";
+ }
+ }
+}
+
+# create one ~ 500K file and check that reading gives the same contents
+sub file_write_test {
+ my $file = shift;
+ my $content = '';
+
+ for my $i (1 .. 100000) {
+ $content .= "$i\n";
+ }
+ write_file($file, $content) or die "Could not write $file: $!";
+
+ my $got = read_file($file) or die "Could not write $file: $!";
+ if (length $got != length $content) {
+ die "File $file is " . length $got . " bytes, expected "
+ . length $content;
+ }
+
+ if ($got ne $content) {
+ my $i = 0;
+ do {
+ if (substr($got, $i, 1) ne substr($content, $i, 1)) {
+ die "Got wrong content at pos $i in $file";
+ }
+ $i++;
+ } while ($i < length($got));
+ die "BUG";
+ }
+
+ unlink $file or die "can't delet $file: $!";
+}
+
+sub usage {
+ print << "EOF";
+usage: $0 <test> <mount-point>
+
+ test can be one of:
+ file_write
+ many_files_root
+ many_files_subdir
+EOF
+}